]> Git Repo - qemu.git/blob - migration/ram.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-vnc-20151116-1' into staging
[qemu.git] / migration / ram.c
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <[email protected]>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 #include <stdint.h>
29 #include <zlib.h>
30 #include "qemu/bitops.h"
31 #include "qemu/bitmap.h"
32 #include "qemu/timer.h"
33 #include "qemu/main-loop.h"
34 #include "migration/migration.h"
35 #include "migration/postcopy-ram.h"
36 #include "exec/address-spaces.h"
37 #include "migration/page_cache.h"
38 #include "qemu/error-report.h"
39 #include "trace.h"
40 #include "exec/ram_addr.h"
41 #include "qemu/rcu_queue.h"
42
43 #ifdef DEBUG_MIGRATION_RAM
44 #define DPRINTF(fmt, ...) \
45     do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
46 #else
47 #define DPRINTF(fmt, ...) \
48     do { } while (0)
49 #endif
50
51 static int dirty_rate_high_cnt;
52
53 static uint64_t bitmap_sync_count;
54
55 /***********************************************************/
56 /* ram save/restore */
57
58 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
59 #define RAM_SAVE_FLAG_COMPRESS 0x02
60 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
61 #define RAM_SAVE_FLAG_PAGE     0x08
62 #define RAM_SAVE_FLAG_EOS      0x10
63 #define RAM_SAVE_FLAG_CONTINUE 0x20
64 #define RAM_SAVE_FLAG_XBZRLE   0x40
65 /* 0x80 is reserved in migration.h start with 0x100 next */
66 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
67
68 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
69
70 static inline bool is_zero_range(uint8_t *p, uint64_t size)
71 {
72     return buffer_find_nonzero_offset(p, size) == size;
73 }
74
75 /* struct contains XBZRLE cache and a static page
76    used by the compression */
77 static struct {
78     /* buffer used for XBZRLE encoding */
79     uint8_t *encoded_buf;
80     /* buffer for storing page content */
81     uint8_t *current_buf;
82     /* Cache for XBZRLE, Protected by lock. */
83     PageCache *cache;
84     QemuMutex lock;
85 } XBZRLE;
86
87 /* buffer used for XBZRLE decoding */
88 static uint8_t *xbzrle_decoded_buf;
89
90 static void XBZRLE_cache_lock(void)
91 {
92     if (migrate_use_xbzrle())
93         qemu_mutex_lock(&XBZRLE.lock);
94 }
95
96 static void XBZRLE_cache_unlock(void)
97 {
98     if (migrate_use_xbzrle())
99         qemu_mutex_unlock(&XBZRLE.lock);
100 }
101
102 /*
103  * called from qmp_migrate_set_cache_size in main thread, possibly while
104  * a migration is in progress.
105  * A running migration maybe using the cache and might finish during this
106  * call, hence changes to the cache are protected by XBZRLE.lock().
107  */
108 int64_t xbzrle_cache_resize(int64_t new_size)
109 {
110     PageCache *new_cache;
111     int64_t ret;
112
113     if (new_size < TARGET_PAGE_SIZE) {
114         return -1;
115     }
116
117     XBZRLE_cache_lock();
118
119     if (XBZRLE.cache != NULL) {
120         if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
121             goto out_new_size;
122         }
123         new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
124                                         TARGET_PAGE_SIZE);
125         if (!new_cache) {
126             error_report("Error creating cache");
127             ret = -1;
128             goto out;
129         }
130
131         cache_fini(XBZRLE.cache);
132         XBZRLE.cache = new_cache;
133     }
134
135 out_new_size:
136     ret = pow2floor(new_size);
137 out:
138     XBZRLE_cache_unlock();
139     return ret;
140 }
141
142 /* accounting for migration statistics */
143 typedef struct AccountingInfo {
144     uint64_t dup_pages;
145     uint64_t skipped_pages;
146     uint64_t norm_pages;
147     uint64_t iterations;
148     uint64_t xbzrle_bytes;
149     uint64_t xbzrle_pages;
150     uint64_t xbzrle_cache_miss;
151     double xbzrle_cache_miss_rate;
152     uint64_t xbzrle_overflows;
153 } AccountingInfo;
154
155 static AccountingInfo acct_info;
156
157 static void acct_clear(void)
158 {
159     memset(&acct_info, 0, sizeof(acct_info));
160 }
161
162 uint64_t dup_mig_bytes_transferred(void)
163 {
164     return acct_info.dup_pages * TARGET_PAGE_SIZE;
165 }
166
167 uint64_t dup_mig_pages_transferred(void)
168 {
169     return acct_info.dup_pages;
170 }
171
172 uint64_t skipped_mig_bytes_transferred(void)
173 {
174     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
175 }
176
177 uint64_t skipped_mig_pages_transferred(void)
178 {
179     return acct_info.skipped_pages;
180 }
181
182 uint64_t norm_mig_bytes_transferred(void)
183 {
184     return acct_info.norm_pages * TARGET_PAGE_SIZE;
185 }
186
187 uint64_t norm_mig_pages_transferred(void)
188 {
189     return acct_info.norm_pages;
190 }
191
192 uint64_t xbzrle_mig_bytes_transferred(void)
193 {
194     return acct_info.xbzrle_bytes;
195 }
196
197 uint64_t xbzrle_mig_pages_transferred(void)
198 {
199     return acct_info.xbzrle_pages;
200 }
201
202 uint64_t xbzrle_mig_pages_cache_miss(void)
203 {
204     return acct_info.xbzrle_cache_miss;
205 }
206
207 double xbzrle_mig_cache_miss_rate(void)
208 {
209     return acct_info.xbzrle_cache_miss_rate;
210 }
211
212 uint64_t xbzrle_mig_pages_overflow(void)
213 {
214     return acct_info.xbzrle_overflows;
215 }
216
217 /* This is the last block that we have visited serching for dirty pages
218  */
219 static RAMBlock *last_seen_block;
220 /* This is the last block from where we have sent data */
221 static RAMBlock *last_sent_block;
222 static ram_addr_t last_offset;
223 static QemuMutex migration_bitmap_mutex;
224 static uint64_t migration_dirty_pages;
225 static uint32_t last_version;
226 static bool ram_bulk_stage;
227
228 /* used by the search for pages to send */
229 struct PageSearchStatus {
230     /* Current block being searched */
231     RAMBlock    *block;
232     /* Current offset to search from */
233     ram_addr_t   offset;
234     /* Set once we wrap around */
235     bool         complete_round;
236 };
237 typedef struct PageSearchStatus PageSearchStatus;
238
239 static struct BitmapRcu {
240     struct rcu_head rcu;
241     /* Main migration bitmap */
242     unsigned long *bmap;
243     /* bitmap of pages that haven't been sent even once
244      * only maintained and used in postcopy at the moment
245      * where it's used to send the dirtymap at the start
246      * of the postcopy phase
247      */
248     unsigned long *unsentmap;
249 } *migration_bitmap_rcu;
250
251 struct CompressParam {
252     bool start;
253     bool done;
254     QEMUFile *file;
255     QemuMutex mutex;
256     QemuCond cond;
257     RAMBlock *block;
258     ram_addr_t offset;
259 };
260 typedef struct CompressParam CompressParam;
261
262 struct DecompressParam {
263     bool start;
264     QemuMutex mutex;
265     QemuCond cond;
266     void *des;
267     uint8 *compbuf;
268     int len;
269 };
270 typedef struct DecompressParam DecompressParam;
271
272 static CompressParam *comp_param;
273 static QemuThread *compress_threads;
274 /* comp_done_cond is used to wake up the migration thread when
275  * one of the compression threads has finished the compression.
276  * comp_done_lock is used to co-work with comp_done_cond.
277  */
278 static QemuMutex *comp_done_lock;
279 static QemuCond *comp_done_cond;
280 /* The empty QEMUFileOps will be used by file in CompressParam */
281 static const QEMUFileOps empty_ops = { };
282
283 static bool compression_switch;
284 static bool quit_comp_thread;
285 static bool quit_decomp_thread;
286 static DecompressParam *decomp_param;
287 static QemuThread *decompress_threads;
288 static uint8_t *compressed_data_buf;
289
290 static int do_compress_ram_page(CompressParam *param);
291
292 static void *do_data_compress(void *opaque)
293 {
294     CompressParam *param = opaque;
295
296     while (!quit_comp_thread) {
297         qemu_mutex_lock(&param->mutex);
298         /* Re-check the quit_comp_thread in case of
299          * terminate_compression_threads is called just before
300          * qemu_mutex_lock(&param->mutex) and after
301          * while(!quit_comp_thread), re-check it here can make
302          * sure the compression thread terminate as expected.
303          */
304         while (!param->start && !quit_comp_thread) {
305             qemu_cond_wait(&param->cond, &param->mutex);
306         }
307         if (!quit_comp_thread) {
308             do_compress_ram_page(param);
309         }
310         param->start = false;
311         qemu_mutex_unlock(&param->mutex);
312
313         qemu_mutex_lock(comp_done_lock);
314         param->done = true;
315         qemu_cond_signal(comp_done_cond);
316         qemu_mutex_unlock(comp_done_lock);
317     }
318
319     return NULL;
320 }
321
322 static inline void terminate_compression_threads(void)
323 {
324     int idx, thread_count;
325
326     thread_count = migrate_compress_threads();
327     quit_comp_thread = true;
328     for (idx = 0; idx < thread_count; idx++) {
329         qemu_mutex_lock(&comp_param[idx].mutex);
330         qemu_cond_signal(&comp_param[idx].cond);
331         qemu_mutex_unlock(&comp_param[idx].mutex);
332     }
333 }
334
335 void migrate_compress_threads_join(void)
336 {
337     int i, thread_count;
338
339     if (!migrate_use_compression()) {
340         return;
341     }
342     terminate_compression_threads();
343     thread_count = migrate_compress_threads();
344     for (i = 0; i < thread_count; i++) {
345         qemu_thread_join(compress_threads + i);
346         qemu_fclose(comp_param[i].file);
347         qemu_mutex_destroy(&comp_param[i].mutex);
348         qemu_cond_destroy(&comp_param[i].cond);
349     }
350     qemu_mutex_destroy(comp_done_lock);
351     qemu_cond_destroy(comp_done_cond);
352     g_free(compress_threads);
353     g_free(comp_param);
354     g_free(comp_done_cond);
355     g_free(comp_done_lock);
356     compress_threads = NULL;
357     comp_param = NULL;
358     comp_done_cond = NULL;
359     comp_done_lock = NULL;
360 }
361
362 void migrate_compress_threads_create(void)
363 {
364     int i, thread_count;
365
366     if (!migrate_use_compression()) {
367         return;
368     }
369     quit_comp_thread = false;
370     compression_switch = true;
371     thread_count = migrate_compress_threads();
372     compress_threads = g_new0(QemuThread, thread_count);
373     comp_param = g_new0(CompressParam, thread_count);
374     comp_done_cond = g_new0(QemuCond, 1);
375     comp_done_lock = g_new0(QemuMutex, 1);
376     qemu_cond_init(comp_done_cond);
377     qemu_mutex_init(comp_done_lock);
378     for (i = 0; i < thread_count; i++) {
379         /* com_param[i].file is just used as a dummy buffer to save data, set
380          * it's ops to empty.
381          */
382         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
383         comp_param[i].done = true;
384         qemu_mutex_init(&comp_param[i].mutex);
385         qemu_cond_init(&comp_param[i].cond);
386         qemu_thread_create(compress_threads + i, "compress",
387                            do_data_compress, comp_param + i,
388                            QEMU_THREAD_JOINABLE);
389     }
390 }
391
392 /**
393  * save_page_header: Write page header to wire
394  *
395  * If this is the 1st block, it also writes the block identification
396  *
397  * Returns: Number of bytes written
398  *
399  * @f: QEMUFile where to send the data
400  * @block: block that contains the page we want to send
401  * @offset: offset inside the block for the page
402  *          in the lower bits, it contains flags
403  */
404 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
405 {
406     size_t size, len;
407
408     qemu_put_be64(f, offset);
409     size = 8;
410
411     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
412         len = strlen(block->idstr);
413         qemu_put_byte(f, len);
414         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
415         size += 1 + len;
416     }
417     return size;
418 }
419
420 /* Reduce amount of guest cpu execution to hopefully slow down memory writes.
421  * If guest dirty memory rate is reduced below the rate at which we can
422  * transfer pages to the destination then we should be able to complete
423  * migration. Some workloads dirty memory way too fast and will not effectively
424  * converge, even with auto-converge.
425  */
426 static void mig_throttle_guest_down(void)
427 {
428     MigrationState *s = migrate_get_current();
429     uint64_t pct_initial =
430             s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
431     uint64_t pct_icrement =
432             s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
433
434     /* We have not started throttling yet. Let's start it. */
435     if (!cpu_throttle_active()) {
436         cpu_throttle_set(pct_initial);
437     } else {
438         /* Throttling already on, just increase the rate */
439         cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
440     }
441 }
442
443 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
444  * The important thing is that a stale (not-yet-0'd) page be replaced
445  * by the new data.
446  * As a bonus, if the page wasn't in the cache it gets added so that
447  * when a small write is made into the 0'd page it gets XBZRLE sent
448  */
449 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
450 {
451     if (ram_bulk_stage || !migrate_use_xbzrle()) {
452         return;
453     }
454
455     /* We don't care if this fails to allocate a new cache page
456      * as long as it updated an old one */
457     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
458                  bitmap_sync_count);
459 }
460
461 #define ENCODING_FLAG_XBZRLE 0x1
462
463 /**
464  * save_xbzrle_page: compress and send current page
465  *
466  * Returns: 1 means that we wrote the page
467  *          0 means that page is identical to the one already sent
468  *          -1 means that xbzrle would be longer than normal
469  *
470  * @f: QEMUFile where to send the data
471  * @current_data:
472  * @current_addr:
473  * @block: block that contains the page we want to send
474  * @offset: offset inside the block for the page
475  * @last_stage: if we are at the completion stage
476  * @bytes_transferred: increase it with the number of transferred bytes
477  */
478 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
479                             ram_addr_t current_addr, RAMBlock *block,
480                             ram_addr_t offset, bool last_stage,
481                             uint64_t *bytes_transferred)
482 {
483     int encoded_len = 0, bytes_xbzrle;
484     uint8_t *prev_cached_page;
485
486     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
487         acct_info.xbzrle_cache_miss++;
488         if (!last_stage) {
489             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
490                              bitmap_sync_count) == -1) {
491                 return -1;
492             } else {
493                 /* update *current_data when the page has been
494                    inserted into cache */
495                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
496             }
497         }
498         return -1;
499     }
500
501     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
502
503     /* save current buffer into memory */
504     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
505
506     /* XBZRLE encoding (if there is no overflow) */
507     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
508                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
509                                        TARGET_PAGE_SIZE);
510     if (encoded_len == 0) {
511         DPRINTF("Skipping unmodified page\n");
512         return 0;
513     } else if (encoded_len == -1) {
514         DPRINTF("Overflow\n");
515         acct_info.xbzrle_overflows++;
516         /* update data in the cache */
517         if (!last_stage) {
518             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
519             *current_data = prev_cached_page;
520         }
521         return -1;
522     }
523
524     /* we need to update the data in the cache, in order to get the same data */
525     if (!last_stage) {
526         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
527     }
528
529     /* Send XBZRLE based compressed page */
530     bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
531     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
532     qemu_put_be16(f, encoded_len);
533     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
534     bytes_xbzrle += encoded_len + 1 + 2;
535     acct_info.xbzrle_pages++;
536     acct_info.xbzrle_bytes += bytes_xbzrle;
537     *bytes_transferred += bytes_xbzrle;
538
539     return 1;
540 }
541
542 /* Called with rcu_read_lock() to protect migration_bitmap
543  * rb: The RAMBlock  to search for dirty pages in
544  * start: Start address (typically so we can continue from previous page)
545  * ram_addr_abs: Pointer into which to store the address of the dirty page
546  *               within the global ram_addr space
547  *
548  * Returns: byte offset within memory region of the start of a dirty page
549  */
550 static inline
551 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
552                                        ram_addr_t start,
553                                        ram_addr_t *ram_addr_abs)
554 {
555     unsigned long base = rb->offset >> TARGET_PAGE_BITS;
556     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
557     uint64_t rb_size = rb->used_length;
558     unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
559     unsigned long *bitmap;
560
561     unsigned long next;
562
563     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
564     if (ram_bulk_stage && nr > base) {
565         next = nr + 1;
566     } else {
567         next = find_next_bit(bitmap, size, nr);
568     }
569
570     *ram_addr_abs = next << TARGET_PAGE_BITS;
571     return (next - base) << TARGET_PAGE_BITS;
572 }
573
574 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
575 {
576     bool ret;
577     int nr = addr >> TARGET_PAGE_BITS;
578     unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
579
580     ret = test_and_clear_bit(nr, bitmap);
581
582     if (ret) {
583         migration_dirty_pages--;
584     }
585     return ret;
586 }
587
588 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
589 {
590     unsigned long *bitmap;
591     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
592     migration_dirty_pages +=
593         cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
594 }
595
596 /* Fix me: there are too many global variables used in migration process. */
597 static int64_t start_time;
598 static int64_t bytes_xfer_prev;
599 static int64_t num_dirty_pages_period;
600 static uint64_t xbzrle_cache_miss_prev;
601 static uint64_t iterations_prev;
602
603 static void migration_bitmap_sync_init(void)
604 {
605     start_time = 0;
606     bytes_xfer_prev = 0;
607     num_dirty_pages_period = 0;
608     xbzrle_cache_miss_prev = 0;
609     iterations_prev = 0;
610 }
611
612 /* Called with iothread lock held, to protect ram_list.dirty_memory[] */
613 static void migration_bitmap_sync(void)
614 {
615     RAMBlock *block;
616     uint64_t num_dirty_pages_init = migration_dirty_pages;
617     MigrationState *s = migrate_get_current();
618     int64_t end_time;
619     int64_t bytes_xfer_now;
620
621     bitmap_sync_count++;
622
623     if (!bytes_xfer_prev) {
624         bytes_xfer_prev = ram_bytes_transferred();
625     }
626
627     if (!start_time) {
628         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629     }
630
631     trace_migration_bitmap_sync_start();
632     address_space_sync_dirty_bitmap(&address_space_memory);
633
634     qemu_mutex_lock(&migration_bitmap_mutex);
635     rcu_read_lock();
636     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
637         migration_bitmap_sync_range(block->offset, block->used_length);
638     }
639     rcu_read_unlock();
640     qemu_mutex_unlock(&migration_bitmap_mutex);
641
642     trace_migration_bitmap_sync_end(migration_dirty_pages
643                                     - num_dirty_pages_init);
644     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
646
647     /* more than 1 second = 1000 millisecons */
648     if (end_time > start_time + 1000) {
649         if (migrate_auto_converge()) {
650             /* The following detection logic can be refined later. For now:
651                Check to see if the dirtied bytes is 50% more than the approx.
652                amount of bytes that just got transferred since the last time we
653                were in this routine. If that happens twice, start or increase
654                throttling */
655             bytes_xfer_now = ram_bytes_transferred();
656
657             if (s->dirty_pages_rate &&
658                (num_dirty_pages_period * TARGET_PAGE_SIZE >
659                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
660                (dirty_rate_high_cnt++ >= 2)) {
661                     trace_migration_throttle();
662                     dirty_rate_high_cnt = 0;
663                     mig_throttle_guest_down();
664              }
665              bytes_xfer_prev = bytes_xfer_now;
666         }
667
668         if (migrate_use_xbzrle()) {
669             if (iterations_prev != acct_info.iterations) {
670                 acct_info.xbzrle_cache_miss_rate =
671                    (double)(acct_info.xbzrle_cache_miss -
672                             xbzrle_cache_miss_prev) /
673                    (acct_info.iterations - iterations_prev);
674             }
675             iterations_prev = acct_info.iterations;
676             xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
677         }
678         s->dirty_pages_rate = num_dirty_pages_period * 1000
679             / (end_time - start_time);
680         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681         start_time = end_time;
682         num_dirty_pages_period = 0;
683     }
684     s->dirty_sync_count = bitmap_sync_count;
685 }
686
687 /**
688  * save_zero_page: Send the zero page to the stream
689  *
690  * Returns: Number of pages written.
691  *
692  * @f: QEMUFile where to send the data
693  * @block: block that contains the page we want to send
694  * @offset: offset inside the block for the page
695  * @p: pointer to the page
696  * @bytes_transferred: increase it with the number of transferred bytes
697  */
698 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
699                           uint8_t *p, uint64_t *bytes_transferred)
700 {
701     int pages = -1;
702
703     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
704         acct_info.dup_pages++;
705         *bytes_transferred += save_page_header(f, block,
706                                                offset | RAM_SAVE_FLAG_COMPRESS);
707         qemu_put_byte(f, 0);
708         *bytes_transferred += 1;
709         pages = 1;
710     }
711
712     return pages;
713 }
714
715 /**
716  * ram_save_page: Send the given page to the stream
717  *
718  * Returns: Number of pages written.
719  *
720  * @f: QEMUFile where to send the data
721  * @block: block that contains the page we want to send
722  * @offset: offset inside the block for the page
723  * @last_stage: if we are at the completion stage
724  * @bytes_transferred: increase it with the number of transferred bytes
725  */
726 static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
727                          bool last_stage, uint64_t *bytes_transferred)
728 {
729     int pages = -1;
730     uint64_t bytes_xmit;
731     ram_addr_t current_addr;
732     uint8_t *p;
733     int ret;
734     bool send_async = true;
735
736     p = block->host + offset;
737
738     /* In doubt sent page as normal */
739     bytes_xmit = 0;
740     ret = ram_control_save_page(f, block->offset,
741                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
742     if (bytes_xmit) {
743         *bytes_transferred += bytes_xmit;
744         pages = 1;
745     }
746
747     XBZRLE_cache_lock();
748
749     current_addr = block->offset + offset;
750
751     if (block == last_sent_block) {
752         offset |= RAM_SAVE_FLAG_CONTINUE;
753     }
754     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
755         if (ret != RAM_SAVE_CONTROL_DELAYED) {
756             if (bytes_xmit > 0) {
757                 acct_info.norm_pages++;
758             } else if (bytes_xmit == 0) {
759                 acct_info.dup_pages++;
760             }
761         }
762     } else {
763         pages = save_zero_page(f, block, offset, p, bytes_transferred);
764         if (pages > 0) {
765             /* Must let xbzrle know, otherwise a previous (now 0'd) cached
766              * page would be stale
767              */
768             xbzrle_cache_zero_page(current_addr);
769         } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
770             pages = save_xbzrle_page(f, &p, current_addr, block,
771                                      offset, last_stage, bytes_transferred);
772             if (!last_stage) {
773                 /* Can't send this cached data async, since the cache page
774                  * might get updated before it gets to the wire
775                  */
776                 send_async = false;
777             }
778         }
779     }
780
781     /* XBZRLE overflow or normal page */
782     if (pages == -1) {
783         *bytes_transferred += save_page_header(f, block,
784                                                offset | RAM_SAVE_FLAG_PAGE);
785         if (send_async) {
786             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
787         } else {
788             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
789         }
790         *bytes_transferred += TARGET_PAGE_SIZE;
791         pages = 1;
792         acct_info.norm_pages++;
793     }
794
795     XBZRLE_cache_unlock();
796
797     return pages;
798 }
799
800 static int do_compress_ram_page(CompressParam *param)
801 {
802     int bytes_sent, blen;
803     uint8_t *p;
804     RAMBlock *block = param->block;
805     ram_addr_t offset = param->offset;
806
807     p = block->host + (offset & TARGET_PAGE_MASK);
808
809     bytes_sent = save_page_header(param->file, block, offset |
810                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
811     blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
812                                      migrate_compress_level());
813     bytes_sent += blen;
814
815     return bytes_sent;
816 }
817
818 static inline void start_compression(CompressParam *param)
819 {
820     param->done = false;
821     qemu_mutex_lock(&param->mutex);
822     param->start = true;
823     qemu_cond_signal(&param->cond);
824     qemu_mutex_unlock(&param->mutex);
825 }
826
827 static inline void start_decompression(DecompressParam *param)
828 {
829     qemu_mutex_lock(&param->mutex);
830     param->start = true;
831     qemu_cond_signal(&param->cond);
832     qemu_mutex_unlock(&param->mutex);
833 }
834
835 static uint64_t bytes_transferred;
836
837 static void flush_compressed_data(QEMUFile *f)
838 {
839     int idx, len, thread_count;
840
841     if (!migrate_use_compression()) {
842         return;
843     }
844     thread_count = migrate_compress_threads();
845     for (idx = 0; idx < thread_count; idx++) {
846         if (!comp_param[idx].done) {
847             qemu_mutex_lock(comp_done_lock);
848             while (!comp_param[idx].done && !quit_comp_thread) {
849                 qemu_cond_wait(comp_done_cond, comp_done_lock);
850             }
851             qemu_mutex_unlock(comp_done_lock);
852         }
853         if (!quit_comp_thread) {
854             len = qemu_put_qemu_file(f, comp_param[idx].file);
855             bytes_transferred += len;
856         }
857     }
858 }
859
860 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
861                                        ram_addr_t offset)
862 {
863     param->block = block;
864     param->offset = offset;
865 }
866
867 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
868                                            ram_addr_t offset,
869                                            uint64_t *bytes_transferred)
870 {
871     int idx, thread_count, bytes_xmit = -1, pages = -1;
872
873     thread_count = migrate_compress_threads();
874     qemu_mutex_lock(comp_done_lock);
875     while (true) {
876         for (idx = 0; idx < thread_count; idx++) {
877             if (comp_param[idx].done) {
878                 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
879                 set_compress_params(&comp_param[idx], block, offset);
880                 start_compression(&comp_param[idx]);
881                 pages = 1;
882                 acct_info.norm_pages++;
883                 *bytes_transferred += bytes_xmit;
884                 break;
885             }
886         }
887         if (pages > 0) {
888             break;
889         } else {
890             qemu_cond_wait(comp_done_cond, comp_done_lock);
891         }
892     }
893     qemu_mutex_unlock(comp_done_lock);
894
895     return pages;
896 }
897
898 /**
899  * ram_save_compressed_page: compress the given page and send it to the stream
900  *
901  * Returns: Number of pages written.
902  *
903  * @f: QEMUFile where to send the data
904  * @block: block that contains the page we want to send
905  * @offset: offset inside the block for the page
906  * @last_stage: if we are at the completion stage
907  * @bytes_transferred: increase it with the number of transferred bytes
908  */
909 static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
910                                     ram_addr_t offset, bool last_stage,
911                                     uint64_t *bytes_transferred)
912 {
913     int pages = -1;
914     uint64_t bytes_xmit;
915     uint8_t *p;
916     int ret;
917
918     p = block->host + offset;
919
920     bytes_xmit = 0;
921     ret = ram_control_save_page(f, block->offset,
922                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
923     if (bytes_xmit) {
924         *bytes_transferred += bytes_xmit;
925         pages = 1;
926     }
927     if (block == last_sent_block) {
928         offset |= RAM_SAVE_FLAG_CONTINUE;
929     }
930     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
931         if (ret != RAM_SAVE_CONTROL_DELAYED) {
932             if (bytes_xmit > 0) {
933                 acct_info.norm_pages++;
934             } else if (bytes_xmit == 0) {
935                 acct_info.dup_pages++;
936             }
937         }
938     } else {
939         /* When starting the process of a new block, the first page of
940          * the block should be sent out before other pages in the same
941          * block, and all the pages in last block should have been sent
942          * out, keeping this order is important, because the 'cont' flag
943          * is used to avoid resending the block name.
944          */
945         if (block != last_sent_block) {
946             flush_compressed_data(f);
947             pages = save_zero_page(f, block, offset, p, bytes_transferred);
948             if (pages == -1) {
949                 set_compress_params(&comp_param[0], block, offset);
950                 /* Use the qemu thread to compress the data to make sure the
951                  * first page is sent out before other pages
952                  */
953                 bytes_xmit = do_compress_ram_page(&comp_param[0]);
954                 acct_info.norm_pages++;
955                 qemu_put_qemu_file(f, comp_param[0].file);
956                 *bytes_transferred += bytes_xmit;
957                 pages = 1;
958             }
959         } else {
960             pages = save_zero_page(f, block, offset, p, bytes_transferred);
961             if (pages == -1) {
962                 pages = compress_page_with_multi_thread(f, block, offset,
963                                                         bytes_transferred);
964             }
965         }
966     }
967
968     return pages;
969 }
970
971 /*
972  * Find the next dirty page and update any state associated with
973  * the search process.
974  *
975  * Returns: True if a page is found
976  *
977  * @f: Current migration stream.
978  * @pss: Data about the state of the current dirty page scan.
979  * @*again: Set to false if the search has scanned the whole of RAM
980  * *ram_addr_abs: Pointer into which to store the address of the dirty page
981  *               within the global ram_addr space
982  */
983 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
984                              bool *again, ram_addr_t *ram_addr_abs)
985 {
986     pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
987                                               ram_addr_abs);
988     if (pss->complete_round && pss->block == last_seen_block &&
989         pss->offset >= last_offset) {
990         /*
991          * We've been once around the RAM and haven't found anything.
992          * Give up.
993          */
994         *again = false;
995         return false;
996     }
997     if (pss->offset >= pss->block->used_length) {
998         /* Didn't find anything in this RAM Block */
999         pss->offset = 0;
1000         pss->block = QLIST_NEXT_RCU(pss->block, next);
1001         if (!pss->block) {
1002             /* Hit the end of the list */
1003             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1004             /* Flag that we've looped */
1005             pss->complete_round = true;
1006             ram_bulk_stage = false;
1007             if (migrate_use_xbzrle()) {
1008                 /* If xbzrle is on, stop using the data compression at this
1009                  * point. In theory, xbzrle can do better than compression.
1010                  */
1011                 flush_compressed_data(f);
1012                 compression_switch = false;
1013             }
1014         }
1015         /* Didn't find anything this time, but try again on the new block */
1016         *again = true;
1017         return false;
1018     } else {
1019         /* Can go around again, but... */
1020         *again = true;
1021         /* We've found something so probably don't need to */
1022         return true;
1023     }
1024 }
1025
1026 /*
1027  * Helper for 'get_queued_page' - gets a page off the queue
1028  *      ms:      MigrationState in
1029  * *offset:      Used to return the offset within the RAMBlock
1030  * ram_addr_abs: global offset in the dirty/sent bitmaps
1031  *
1032  * Returns:      block (or NULL if none available)
1033  */
1034 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1035                               ram_addr_t *ram_addr_abs)
1036 {
1037     RAMBlock *block = NULL;
1038
1039     qemu_mutex_lock(&ms->src_page_req_mutex);
1040     if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1041         struct MigrationSrcPageRequest *entry =
1042                                 QSIMPLEQ_FIRST(&ms->src_page_requests);
1043         block = entry->rb;
1044         *offset = entry->offset;
1045         *ram_addr_abs = (entry->offset + entry->rb->offset) &
1046                         TARGET_PAGE_MASK;
1047
1048         if (entry->len > TARGET_PAGE_SIZE) {
1049             entry->len -= TARGET_PAGE_SIZE;
1050             entry->offset += TARGET_PAGE_SIZE;
1051         } else {
1052             memory_region_unref(block->mr);
1053             QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1054             g_free(entry);
1055         }
1056     }
1057     qemu_mutex_unlock(&ms->src_page_req_mutex);
1058
1059     return block;
1060 }
1061
1062 /*
1063  * Unqueue a page from the queue fed by postcopy page requests; skips pages
1064  * that are already sent (!dirty)
1065  *
1066  *      ms:      MigrationState in
1067  *     pss:      PageSearchStatus structure updated with found block/offset
1068  * ram_addr_abs: global offset in the dirty/sent bitmaps
1069  *
1070  * Returns:      true if a queued page is found
1071  */
1072 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1073                             ram_addr_t *ram_addr_abs)
1074 {
1075     RAMBlock  *block;
1076     ram_addr_t offset;
1077     bool dirty;
1078
1079     do {
1080         block = unqueue_page(ms, &offset, ram_addr_abs);
1081         /*
1082          * We're sending this page, and since it's postcopy nothing else
1083          * will dirty it, and we must make sure it doesn't get sent again
1084          * even if this queue request was received after the background
1085          * search already sent it.
1086          */
1087         if (block) {
1088             unsigned long *bitmap;
1089             bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1090             dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1091             if (!dirty) {
1092                 trace_get_queued_page_not_dirty(
1093                     block->idstr, (uint64_t)offset,
1094                     (uint64_t)*ram_addr_abs,
1095                     test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1096                          atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1097             } else {
1098                 trace_get_queued_page(block->idstr,
1099                                       (uint64_t)offset,
1100                                       (uint64_t)*ram_addr_abs);
1101             }
1102         }
1103
1104     } while (block && !dirty);
1105
1106     if (block) {
1107         /*
1108          * As soon as we start servicing pages out of order, then we have
1109          * to kill the bulk stage, since the bulk stage assumes
1110          * in (migration_bitmap_find_and_reset_dirty) that every page is
1111          * dirty, that's no longer true.
1112          */
1113         ram_bulk_stage = false;
1114
1115         /*
1116          * We want the background search to continue from the queued page
1117          * since the guest is likely to want other pages near to the page
1118          * it just requested.
1119          */
1120         pss->block = block;
1121         pss->offset = offset;
1122     }
1123
1124     return !!block;
1125 }
1126
1127 /**
1128  * flush_page_queue: Flush any remaining pages in the ram request queue
1129  *    it should be empty at the end anyway, but in error cases there may be
1130  *    some left.
1131  *
1132  * ms: MigrationState
1133  */
1134 void flush_page_queue(MigrationState *ms)
1135 {
1136     struct MigrationSrcPageRequest *mspr, *next_mspr;
1137     /* This queue generally should be empty - but in the case of a failed
1138      * migration might have some droppings in.
1139      */
1140     rcu_read_lock();
1141     QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1142         memory_region_unref(mspr->rb->mr);
1143         QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1144         g_free(mspr);
1145     }
1146     rcu_read_unlock();
1147 }
1148
1149 /**
1150  * Queue the pages for transmission, e.g. a request from postcopy destination
1151  *   ms: MigrationStatus in which the queue is held
1152  *   rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1153  *   start: Offset from the start of the RAMBlock
1154  *   len: Length (in bytes) to send
1155  *   Return: 0 on success
1156  */
1157 int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1158                          ram_addr_t start, ram_addr_t len)
1159 {
1160     RAMBlock *ramblock;
1161
1162     rcu_read_lock();
1163     if (!rbname) {
1164         /* Reuse last RAMBlock */
1165         ramblock = ms->last_req_rb;
1166
1167         if (!ramblock) {
1168             /*
1169              * Shouldn't happen, we can't reuse the last RAMBlock if
1170              * it's the 1st request.
1171              */
1172             error_report("ram_save_queue_pages no previous block");
1173             goto err;
1174         }
1175     } else {
1176         ramblock = qemu_ram_block_by_name(rbname);
1177
1178         if (!ramblock) {
1179             /* We shouldn't be asked for a non-existent RAMBlock */
1180             error_report("ram_save_queue_pages no block '%s'", rbname);
1181             goto err;
1182         }
1183         ms->last_req_rb = ramblock;
1184     }
1185     trace_ram_save_queue_pages(ramblock->idstr, start, len);
1186     if (start+len > ramblock->used_length) {
1187         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1188                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1189                      __func__, start, len, ramblock->used_length);
1190         goto err;
1191     }
1192
1193     struct MigrationSrcPageRequest *new_entry =
1194         g_malloc0(sizeof(struct MigrationSrcPageRequest));
1195     new_entry->rb = ramblock;
1196     new_entry->offset = start;
1197     new_entry->len = len;
1198
1199     memory_region_ref(ramblock->mr);
1200     qemu_mutex_lock(&ms->src_page_req_mutex);
1201     QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1202     qemu_mutex_unlock(&ms->src_page_req_mutex);
1203     rcu_read_unlock();
1204
1205     return 0;
1206
1207 err:
1208     rcu_read_unlock();
1209     return -1;
1210 }
1211
1212 /**
1213  * ram_save_target_page: Save one target page
1214  *
1215  *
1216  * @f: QEMUFile where to send the data
1217  * @block: pointer to block that contains the page we want to send
1218  * @offset: offset inside the block for the page;
1219  * @last_stage: if we are at the completion stage
1220  * @bytes_transferred: increase it with the number of transferred bytes
1221  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1222  *
1223  * Returns: Number of pages written.
1224  */
1225 static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1226                                 RAMBlock *block, ram_addr_t offset,
1227                                 bool last_stage,
1228                                 uint64_t *bytes_transferred,
1229                                 ram_addr_t dirty_ram_abs)
1230 {
1231     int res = 0;
1232
1233     /* Check the pages is dirty and if it is send it */
1234     if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1235         unsigned long *unsentmap;
1236         if (compression_switch && migrate_use_compression()) {
1237             res = ram_save_compressed_page(f, block, offset,
1238                                            last_stage,
1239                                            bytes_transferred);
1240         } else {
1241             res = ram_save_page(f, block, offset, last_stage,
1242                                 bytes_transferred);
1243         }
1244
1245         if (res < 0) {
1246             return res;
1247         }
1248         unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1249         if (unsentmap) {
1250             clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1251         }
1252     }
1253
1254     return res;
1255 }
1256
1257 /**
1258  * ram_save_host_page: Starting at *offset send pages upto the end
1259  *                     of the current host page.  It's valid for the initial
1260  *                     offset to point into the middle of a host page
1261  *                     in which case the remainder of the hostpage is sent.
1262  *                     Only dirty target pages are sent.
1263  *
1264  * Returns: Number of pages written.
1265  *
1266  * @f: QEMUFile where to send the data
1267  * @block: pointer to block that contains the page we want to send
1268  * @offset: offset inside the block for the page; updated to last target page
1269  *          sent
1270  * @last_stage: if we are at the completion stage
1271  * @bytes_transferred: increase it with the number of transferred bytes
1272  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1273  */
1274 static int ram_save_host_page(MigrationState *ms, QEMUFile *f, RAMBlock *block,
1275                               ram_addr_t *offset, bool last_stage,
1276                               uint64_t *bytes_transferred,
1277                               ram_addr_t dirty_ram_abs)
1278 {
1279     int tmppages, pages = 0;
1280     do {
1281         tmppages = ram_save_target_page(ms, f, block, *offset, last_stage,
1282                                         bytes_transferred, dirty_ram_abs);
1283         if (tmppages < 0) {
1284             return tmppages;
1285         }
1286
1287         pages += tmppages;
1288         *offset += TARGET_PAGE_SIZE;
1289         dirty_ram_abs += TARGET_PAGE_SIZE;
1290     } while (*offset & (qemu_host_page_size - 1));
1291
1292     /* The offset we leave with is the last one we looked at */
1293     *offset -= TARGET_PAGE_SIZE;
1294     return pages;
1295 }
1296
1297 /**
1298  * ram_find_and_save_block: Finds a dirty page and sends it to f
1299  *
1300  * Called within an RCU critical section.
1301  *
1302  * Returns:  The number of pages written
1303  *           0 means no dirty pages
1304  *
1305  * @f: QEMUFile where to send the data
1306  * @last_stage: if we are at the completion stage
1307  * @bytes_transferred: increase it with the number of transferred bytes
1308  *
1309  * On systems where host-page-size > target-page-size it will send all the
1310  * pages in a host page that are dirty.
1311  */
1312
1313 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1314                                    uint64_t *bytes_transferred)
1315 {
1316     PageSearchStatus pss;
1317     MigrationState *ms = migrate_get_current();
1318     int pages = 0;
1319     bool again, found;
1320     ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1321                                  ram_addr_t space */
1322
1323     pss.block = last_seen_block;
1324     pss.offset = last_offset;
1325     pss.complete_round = false;
1326
1327     if (!pss.block) {
1328         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1329     }
1330
1331     do {
1332         again = true;
1333         found = get_queued_page(ms, &pss, &dirty_ram_abs);
1334
1335         if (!found) {
1336             /* priority queue empty, so just search for something dirty */
1337             found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1338         }
1339
1340         if (found) {
1341             pages = ram_save_host_page(ms, f, pss.block, &pss.offset,
1342                                        last_stage, bytes_transferred,
1343                                        dirty_ram_abs);
1344         }
1345     } while (!pages && again);
1346
1347     last_seen_block = pss.block;
1348     last_offset = pss.offset;
1349
1350     return pages;
1351 }
1352
1353 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1354 {
1355     uint64_t pages = size / TARGET_PAGE_SIZE;
1356     if (zero) {
1357         acct_info.dup_pages += pages;
1358     } else {
1359         acct_info.norm_pages += pages;
1360         bytes_transferred += size;
1361         qemu_update_position(f, size);
1362     }
1363 }
1364
1365 static ram_addr_t ram_save_remaining(void)
1366 {
1367     return migration_dirty_pages;
1368 }
1369
1370 uint64_t ram_bytes_remaining(void)
1371 {
1372     return ram_save_remaining() * TARGET_PAGE_SIZE;
1373 }
1374
1375 uint64_t ram_bytes_transferred(void)
1376 {
1377     return bytes_transferred;
1378 }
1379
1380 uint64_t ram_bytes_total(void)
1381 {
1382     RAMBlock *block;
1383     uint64_t total = 0;
1384
1385     rcu_read_lock();
1386     QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1387         total += block->used_length;
1388     rcu_read_unlock();
1389     return total;
1390 }
1391
1392 void free_xbzrle_decoded_buf(void)
1393 {
1394     g_free(xbzrle_decoded_buf);
1395     xbzrle_decoded_buf = NULL;
1396 }
1397
1398 static void migration_bitmap_free(struct BitmapRcu *bmap)
1399 {
1400     g_free(bmap->bmap);
1401     g_free(bmap->unsentmap);
1402     g_free(bmap);
1403 }
1404
1405 static void ram_migration_cleanup(void *opaque)
1406 {
1407     /* caller have hold iothread lock or is in a bh, so there is
1408      * no writing race against this migration_bitmap
1409      */
1410     struct BitmapRcu *bitmap = migration_bitmap_rcu;
1411     atomic_rcu_set(&migration_bitmap_rcu, NULL);
1412     if (bitmap) {
1413         memory_global_dirty_log_stop();
1414         call_rcu(bitmap, migration_bitmap_free, rcu);
1415     }
1416
1417     XBZRLE_cache_lock();
1418     if (XBZRLE.cache) {
1419         cache_fini(XBZRLE.cache);
1420         g_free(XBZRLE.encoded_buf);
1421         g_free(XBZRLE.current_buf);
1422         XBZRLE.cache = NULL;
1423         XBZRLE.encoded_buf = NULL;
1424         XBZRLE.current_buf = NULL;
1425     }
1426     XBZRLE_cache_unlock();
1427 }
1428
1429 static void reset_ram_globals(void)
1430 {
1431     last_seen_block = NULL;
1432     last_sent_block = NULL;
1433     last_offset = 0;
1434     last_version = ram_list.version;
1435     ram_bulk_stage = true;
1436 }
1437
1438 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1439
1440 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1441 {
1442     /* called in qemu main thread, so there is
1443      * no writing race against this migration_bitmap
1444      */
1445     if (migration_bitmap_rcu) {
1446         struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1447         bitmap = g_new(struct BitmapRcu, 1);
1448         bitmap->bmap = bitmap_new(new);
1449
1450         /* prevent migration_bitmap content from being set bit
1451          * by migration_bitmap_sync_range() at the same time.
1452          * it is safe to migration if migration_bitmap is cleared bit
1453          * at the same time.
1454          */
1455         qemu_mutex_lock(&migration_bitmap_mutex);
1456         bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1457         bitmap_set(bitmap->bmap, old, new - old);
1458
1459         /* We don't have a way to safely extend the sentmap
1460          * with RCU; so mark it as missing, entry to postcopy
1461          * will fail.
1462          */
1463         bitmap->unsentmap = NULL;
1464
1465         atomic_rcu_set(&migration_bitmap_rcu, bitmap);
1466         qemu_mutex_unlock(&migration_bitmap_mutex);
1467         migration_dirty_pages += new - old;
1468         call_rcu(old_bitmap, migration_bitmap_free, rcu);
1469     }
1470 }
1471
1472 /*
1473  * 'expected' is the value you expect the bitmap mostly to be full
1474  * of; it won't bother printing lines that are all this value.
1475  * If 'todump' is null the migration bitmap is dumped.
1476  */
1477 void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1478 {
1479     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1480
1481     int64_t cur;
1482     int64_t linelen = 128;
1483     char linebuf[129];
1484
1485     if (!todump) {
1486         todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1487     }
1488
1489     for (cur = 0; cur < ram_pages; cur += linelen) {
1490         int64_t curb;
1491         bool found = false;
1492         /*
1493          * Last line; catch the case where the line length
1494          * is longer than remaining ram
1495          */
1496         if (cur + linelen > ram_pages) {
1497             linelen = ram_pages - cur;
1498         }
1499         for (curb = 0; curb < linelen; curb++) {
1500             bool thisbit = test_bit(cur + curb, todump);
1501             linebuf[curb] = thisbit ? '1' : '.';
1502             found = found || (thisbit != expected);
1503         }
1504         if (found) {
1505             linebuf[curb] = '\0';
1506             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
1507         }
1508     }
1509 }
1510
1511 /* **** functions for postcopy ***** */
1512
1513 /*
1514  * Callback from postcopy_each_ram_send_discard for each RAMBlock
1515  * Note: At this point the 'unsentmap' is the processed bitmap combined
1516  *       with the dirtymap; so a '1' means it's either dirty or unsent.
1517  * start,length: Indexes into the bitmap for the first bit
1518  *            representing the named block and length in target-pages
1519  */
1520 static int postcopy_send_discard_bm_ram(MigrationState *ms,
1521                                         PostcopyDiscardState *pds,
1522                                         unsigned long start,
1523                                         unsigned long length)
1524 {
1525     unsigned long end = start + length; /* one after the end */
1526     unsigned long current;
1527     unsigned long *unsentmap;
1528
1529     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1530     for (current = start; current < end; ) {
1531         unsigned long one = find_next_bit(unsentmap, end, current);
1532
1533         if (one <= end) {
1534             unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1535             unsigned long discard_length;
1536
1537             if (zero >= end) {
1538                 discard_length = end - one;
1539             } else {
1540                 discard_length = zero - one;
1541             }
1542             postcopy_discard_send_range(ms, pds, one, discard_length);
1543             current = one + discard_length;
1544         } else {
1545             current = one;
1546         }
1547     }
1548
1549     return 0;
1550 }
1551
1552 /*
1553  * Utility for the outgoing postcopy code.
1554  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
1555  *   passing it bitmap indexes and name.
1556  * Returns: 0 on success
1557  * (qemu_ram_foreach_block ends up passing unscaled lengths
1558  *  which would mean postcopy code would have to deal with target page)
1559  */
1560 static int postcopy_each_ram_send_discard(MigrationState *ms)
1561 {
1562     struct RAMBlock *block;
1563     int ret;
1564
1565     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1566         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1567         PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1568                                                                first,
1569                                                                block->idstr);
1570
1571         /*
1572          * Postcopy sends chunks of bitmap over the wire, but it
1573          * just needs indexes at this point, avoids it having
1574          * target page specific code.
1575          */
1576         ret = postcopy_send_discard_bm_ram(ms, pds, first,
1577                                     block->used_length >> TARGET_PAGE_BITS);
1578         postcopy_discard_send_finish(ms, pds);
1579         if (ret) {
1580             return ret;
1581         }
1582     }
1583
1584     return 0;
1585 }
1586
1587 /*
1588  * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1589  *   the two bitmaps, that are similar, but one is inverted.
1590  *
1591  * We search for runs of target-pages that don't start or end on a
1592  * host page boundary;
1593  * unsent_pass=true: Cleans up partially unsent host pages by searching
1594  *                 the unsentmap
1595  * unsent_pass=false: Cleans up partially dirty host pages by searching
1596  *                 the main migration bitmap
1597  *
1598  */
1599 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1600                                           RAMBlock *block,
1601                                           PostcopyDiscardState *pds)
1602 {
1603     unsigned long *bitmap;
1604     unsigned long *unsentmap;
1605     unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1606     unsigned long first = block->offset >> TARGET_PAGE_BITS;
1607     unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1608     unsigned long last = first + (len - 1);
1609     unsigned long run_start;
1610
1611     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1612     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1613
1614     if (unsent_pass) {
1615         /* Find a sent page */
1616         run_start = find_next_zero_bit(unsentmap, last + 1, first);
1617     } else {
1618         /* Find a dirty page */
1619         run_start = find_next_bit(bitmap, last + 1, first);
1620     }
1621
1622     while (run_start <= last) {
1623         bool do_fixup = false;
1624         unsigned long fixup_start_addr;
1625         unsigned long host_offset;
1626
1627         /*
1628          * If the start of this run of pages is in the middle of a host
1629          * page, then we need to fixup this host page.
1630          */
1631         host_offset = run_start % host_ratio;
1632         if (host_offset) {
1633             do_fixup = true;
1634             run_start -= host_offset;
1635             fixup_start_addr = run_start;
1636             /* For the next pass */
1637             run_start = run_start + host_ratio;
1638         } else {
1639             /* Find the end of this run */
1640             unsigned long run_end;
1641             if (unsent_pass) {
1642                 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1643             } else {
1644                 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1645             }
1646             /*
1647              * If the end isn't at the start of a host page, then the
1648              * run doesn't finish at the end of a host page
1649              * and we need to discard.
1650              */
1651             host_offset = run_end % host_ratio;
1652             if (host_offset) {
1653                 do_fixup = true;
1654                 fixup_start_addr = run_end - host_offset;
1655                 /*
1656                  * This host page has gone, the next loop iteration starts
1657                  * from after the fixup
1658                  */
1659                 run_start = fixup_start_addr + host_ratio;
1660             } else {
1661                 /*
1662                  * No discards on this iteration, next loop starts from
1663                  * next sent/dirty page
1664                  */
1665                 run_start = run_end + 1;
1666             }
1667         }
1668
1669         if (do_fixup) {
1670             unsigned long page;
1671
1672             /* Tell the destination to discard this page */
1673             if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1674                 /* For the unsent_pass we:
1675                  *     discard partially sent pages
1676                  * For the !unsent_pass (dirty) we:
1677                  *     discard partially dirty pages that were sent
1678                  *     (any partially sent pages were already discarded
1679                  *     by the previous unsent_pass)
1680                  */
1681                 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1682                                             host_ratio);
1683             }
1684
1685             /* Clean up the bitmap */
1686             for (page = fixup_start_addr;
1687                  page < fixup_start_addr + host_ratio; page++) {
1688                 /* All pages in this host page are now not sent */
1689                 set_bit(page, unsentmap);
1690
1691                 /*
1692                  * Remark them as dirty, updating the count for any pages
1693                  * that weren't previously dirty.
1694                  */
1695                 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1696             }
1697         }
1698
1699         if (unsent_pass) {
1700             /* Find the next sent page for the next iteration */
1701             run_start = find_next_zero_bit(unsentmap, last + 1,
1702                                            run_start);
1703         } else {
1704             /* Find the next dirty page for the next iteration */
1705             run_start = find_next_bit(bitmap, last + 1, run_start);
1706         }
1707     }
1708 }
1709
1710 /*
1711  * Utility for the outgoing postcopy code.
1712  *
1713  * Discard any partially sent host-page size chunks, mark any partially
1714  * dirty host-page size chunks as all dirty.
1715  *
1716  * Returns: 0 on success
1717  */
1718 static int postcopy_chunk_hostpages(MigrationState *ms)
1719 {
1720     struct RAMBlock *block;
1721
1722     if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1723         /* Easy case - TPS==HPS - nothing to be done */
1724         return 0;
1725     }
1726
1727     /* Easiest way to make sure we don't resume in the middle of a host-page */
1728     last_seen_block = NULL;
1729     last_sent_block = NULL;
1730     last_offset     = 0;
1731
1732     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1733         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1734
1735         PostcopyDiscardState *pds =
1736                          postcopy_discard_send_init(ms, first, block->idstr);
1737
1738         /* First pass: Discard all partially sent host pages */
1739         postcopy_chunk_hostpages_pass(ms, true, block, pds);
1740         /*
1741          * Second pass: Ensure that all partially dirty host pages are made
1742          * fully dirty.
1743          */
1744         postcopy_chunk_hostpages_pass(ms, false, block, pds);
1745
1746         postcopy_discard_send_finish(ms, pds);
1747     } /* ram_list loop */
1748
1749     return 0;
1750 }
1751
1752 /*
1753  * Transmit the set of pages to be discarded after precopy to the target
1754  * these are pages that:
1755  *     a) Have been previously transmitted but are now dirty again
1756  *     b) Pages that have never been transmitted, this ensures that
1757  *        any pages on the destination that have been mapped by background
1758  *        tasks get discarded (transparent huge pages is the specific concern)
1759  * Hopefully this is pretty sparse
1760  */
1761 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1762 {
1763     int ret;
1764     unsigned long *bitmap, *unsentmap;
1765
1766     rcu_read_lock();
1767
1768     /* This should be our last sync, the src is now paused */
1769     migration_bitmap_sync();
1770
1771     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1772     if (!unsentmap) {
1773         /* We don't have a safe way to resize the sentmap, so
1774          * if the bitmap was resized it will be NULL at this
1775          * point.
1776          */
1777         error_report("migration ram resized during precopy phase");
1778         rcu_read_unlock();
1779         return -EINVAL;
1780     }
1781
1782     /* Deal with TPS != HPS */
1783     ret = postcopy_chunk_hostpages(ms);
1784     if (ret) {
1785         rcu_read_unlock();
1786         return ret;
1787     }
1788
1789     /*
1790      * Update the unsentmap to be unsentmap = unsentmap | dirty
1791      */
1792     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1793     bitmap_or(unsentmap, unsentmap, bitmap,
1794                last_ram_offset() >> TARGET_PAGE_BITS);
1795
1796
1797     trace_ram_postcopy_send_discard_bitmap();
1798 #ifdef DEBUG_POSTCOPY
1799     ram_debug_dump_bitmap(unsentmap, true);
1800 #endif
1801
1802     ret = postcopy_each_ram_send_discard(ms);
1803     rcu_read_unlock();
1804
1805     return ret;
1806 }
1807
1808 /*
1809  * At the start of the postcopy phase of migration, any now-dirty
1810  * precopied pages are discarded.
1811  *
1812  * start, length describe a byte address range within the RAMBlock
1813  *
1814  * Returns 0 on success.
1815  */
1816 int ram_discard_range(MigrationIncomingState *mis,
1817                       const char *block_name,
1818                       uint64_t start, size_t length)
1819 {
1820     int ret = -1;
1821
1822     rcu_read_lock();
1823     RAMBlock *rb = qemu_ram_block_by_name(block_name);
1824
1825     if (!rb) {
1826         error_report("ram_discard_range: Failed to find block '%s'",
1827                      block_name);
1828         goto err;
1829     }
1830
1831     uint8_t *host_startaddr = rb->host + start;
1832
1833     if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1834         error_report("ram_discard_range: Unaligned start address: %p",
1835                      host_startaddr);
1836         goto err;
1837     }
1838
1839     if ((start + length) <= rb->used_length) {
1840         uint8_t *host_endaddr = host_startaddr + length;
1841         if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1842             error_report("ram_discard_range: Unaligned end address: %p",
1843                          host_endaddr);
1844             goto err;
1845         }
1846         ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1847     } else {
1848         error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1849                      "/%zx/" RAM_ADDR_FMT")",
1850                      block_name, start, length, rb->used_length);
1851     }
1852
1853 err:
1854     rcu_read_unlock();
1855
1856     return ret;
1857 }
1858
1859
1860 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1861  * long-running RCU critical section.  When rcu-reclaims in the code
1862  * start to become numerous it will be necessary to reduce the
1863  * granularity of these critical sections.
1864  */
1865
1866 static int ram_save_setup(QEMUFile *f, void *opaque)
1867 {
1868     RAMBlock *block;
1869     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1870
1871     dirty_rate_high_cnt = 0;
1872     bitmap_sync_count = 0;
1873     migration_bitmap_sync_init();
1874     qemu_mutex_init(&migration_bitmap_mutex);
1875
1876     if (migrate_use_xbzrle()) {
1877         XBZRLE_cache_lock();
1878         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1879                                   TARGET_PAGE_SIZE,
1880                                   TARGET_PAGE_SIZE);
1881         if (!XBZRLE.cache) {
1882             XBZRLE_cache_unlock();
1883             error_report("Error creating cache");
1884             return -1;
1885         }
1886         XBZRLE_cache_unlock();
1887
1888         /* We prefer not to abort if there is no memory */
1889         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1890         if (!XBZRLE.encoded_buf) {
1891             error_report("Error allocating encoded_buf");
1892             return -1;
1893         }
1894
1895         XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1896         if (!XBZRLE.current_buf) {
1897             error_report("Error allocating current_buf");
1898             g_free(XBZRLE.encoded_buf);
1899             XBZRLE.encoded_buf = NULL;
1900             return -1;
1901         }
1902
1903         acct_clear();
1904     }
1905
1906     /* iothread lock needed for ram_list.dirty_memory[] */
1907     qemu_mutex_lock_iothread();
1908     qemu_mutex_lock_ramlist();
1909     rcu_read_lock();
1910     bytes_transferred = 0;
1911     reset_ram_globals();
1912
1913     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1914     migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
1915     migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1916     bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
1917
1918     if (migrate_postcopy_ram()) {
1919         migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1920         bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1921     }
1922
1923     /*
1924      * Count the total number of pages used by ram blocks not including any
1925      * gaps due to alignment or unplugs.
1926      */
1927     migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1928
1929     memory_global_dirty_log_start();
1930     migration_bitmap_sync();
1931     qemu_mutex_unlock_ramlist();
1932     qemu_mutex_unlock_iothread();
1933
1934     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1935
1936     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1937         qemu_put_byte(f, strlen(block->idstr));
1938         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1939         qemu_put_be64(f, block->used_length);
1940     }
1941
1942     rcu_read_unlock();
1943
1944     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1945     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1946
1947     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1948
1949     return 0;
1950 }
1951
1952 static int ram_save_iterate(QEMUFile *f, void *opaque)
1953 {
1954     int ret;
1955     int i;
1956     int64_t t0;
1957     int pages_sent = 0;
1958
1959     rcu_read_lock();
1960     if (ram_list.version != last_version) {
1961         reset_ram_globals();
1962     }
1963
1964     /* Read version before ram_list.blocks */
1965     smp_rmb();
1966
1967     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1968
1969     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1970     i = 0;
1971     while ((ret = qemu_file_rate_limit(f)) == 0) {
1972         int pages;
1973
1974         pages = ram_find_and_save_block(f, false, &bytes_transferred);
1975         /* no more pages to sent */
1976         if (pages == 0) {
1977             break;
1978         }
1979         pages_sent += pages;
1980         acct_info.iterations++;
1981
1982         /* we want to check in the 1st loop, just in case it was the 1st time
1983            and we had to sync the dirty bitmap.
1984            qemu_get_clock_ns() is a bit expensive, so we only check each some
1985            iterations
1986         */
1987         if ((i & 63) == 0) {
1988             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
1989             if (t1 > MAX_WAIT) {
1990                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
1991                         t1, i);
1992                 break;
1993             }
1994         }
1995         i++;
1996     }
1997     flush_compressed_data(f);
1998     rcu_read_unlock();
1999
2000     /*
2001      * Must occur before EOS (or any QEMUFile operation)
2002      * because of RDMA protocol.
2003      */
2004     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2005
2006     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2007     bytes_transferred += 8;
2008
2009     ret = qemu_file_get_error(f);
2010     if (ret < 0) {
2011         return ret;
2012     }
2013
2014     return pages_sent;
2015 }
2016
2017 /* Called with iothread lock */
2018 static int ram_save_complete(QEMUFile *f, void *opaque)
2019 {
2020     rcu_read_lock();
2021
2022     if (!migration_in_postcopy(migrate_get_current())) {
2023         migration_bitmap_sync();
2024     }
2025
2026     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2027
2028     /* try transferring iterative blocks of memory */
2029
2030     /* flush all remaining blocks regardless of rate limiting */
2031     while (true) {
2032         int pages;
2033
2034         pages = ram_find_and_save_block(f, true, &bytes_transferred);
2035         /* no more blocks to sent */
2036         if (pages == 0) {
2037             break;
2038         }
2039     }
2040
2041     flush_compressed_data(f);
2042     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2043
2044     rcu_read_unlock();
2045
2046     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2047
2048     return 0;
2049 }
2050
2051 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2052                              uint64_t *non_postcopiable_pending,
2053                              uint64_t *postcopiable_pending)
2054 {
2055     uint64_t remaining_size;
2056
2057     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2058
2059     if (!migration_in_postcopy(migrate_get_current()) &&
2060         remaining_size < max_size) {
2061         qemu_mutex_lock_iothread();
2062         rcu_read_lock();
2063         migration_bitmap_sync();
2064         rcu_read_unlock();
2065         qemu_mutex_unlock_iothread();
2066         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2067     }
2068
2069     /* We can do postcopy, and all the data is postcopiable */
2070     *postcopiable_pending += remaining_size;
2071 }
2072
2073 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2074 {
2075     unsigned int xh_len;
2076     int xh_flags;
2077
2078     if (!xbzrle_decoded_buf) {
2079         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2080     }
2081
2082     /* extract RLE header */
2083     xh_flags = qemu_get_byte(f);
2084     xh_len = qemu_get_be16(f);
2085
2086     if (xh_flags != ENCODING_FLAG_XBZRLE) {
2087         error_report("Failed to load XBZRLE page - wrong compression!");
2088         return -1;
2089     }
2090
2091     if (xh_len > TARGET_PAGE_SIZE) {
2092         error_report("Failed to load XBZRLE page - len overflow!");
2093         return -1;
2094     }
2095     /* load data and decode */
2096     qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
2097
2098     /* decode RLE */
2099     if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
2100                              TARGET_PAGE_SIZE) == -1) {
2101         error_report("Failed to load XBZRLE page - decode error!");
2102         return -1;
2103     }
2104
2105     return 0;
2106 }
2107
2108 /* Must be called from within a rcu critical section.
2109  * Returns a pointer from within the RCU-protected ram_list.
2110  */
2111 /*
2112  * Read a RAMBlock ID from the stream f, find the host address of the
2113  * start of that block and add on 'offset'
2114  *
2115  * f: Stream to read from
2116  * offset: Offset within the block
2117  * flags: Page flags (mostly to see if it's a continuation of previous block)
2118  */
2119 static inline void *host_from_stream_offset(QEMUFile *f,
2120                                             ram_addr_t offset,
2121                                             int flags)
2122 {
2123     static RAMBlock *block = NULL;
2124     char id[256];
2125     uint8_t len;
2126
2127     if (flags & RAM_SAVE_FLAG_CONTINUE) {
2128         if (!block || block->max_length <= offset) {
2129             error_report("Ack, bad migration stream!");
2130             return NULL;
2131         }
2132
2133         return block->host + offset;
2134     }
2135
2136     len = qemu_get_byte(f);
2137     qemu_get_buffer(f, (uint8_t *)id, len);
2138     id[len] = 0;
2139
2140     block = qemu_ram_block_by_name(id);
2141     if (block && block->max_length > offset) {
2142         return block->host + offset;
2143     }
2144
2145     error_report("Can't find block %s", id);
2146     return NULL;
2147 }
2148
2149 /*
2150  * If a page (or a whole RDMA chunk) has been
2151  * determined to be zero, then zap it.
2152  */
2153 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2154 {
2155     if (ch != 0 || !is_zero_range(host, size)) {
2156         memset(host, ch, size);
2157     }
2158 }
2159
2160 static void *do_data_decompress(void *opaque)
2161 {
2162     DecompressParam *param = opaque;
2163     unsigned long pagesize;
2164
2165     while (!quit_decomp_thread) {
2166         qemu_mutex_lock(&param->mutex);
2167         while (!param->start && !quit_decomp_thread) {
2168             qemu_cond_wait(&param->cond, &param->mutex);
2169             pagesize = TARGET_PAGE_SIZE;
2170             if (!quit_decomp_thread) {
2171                 /* uncompress() will return failed in some case, especially
2172                  * when the page is dirted when doing the compression, it's
2173                  * not a problem because the dirty page will be retransferred
2174                  * and uncompress() won't break the data in other pages.
2175                  */
2176                 uncompress((Bytef *)param->des, &pagesize,
2177                            (const Bytef *)param->compbuf, param->len);
2178             }
2179             param->start = false;
2180         }
2181         qemu_mutex_unlock(&param->mutex);
2182     }
2183
2184     return NULL;
2185 }
2186
2187 void migrate_decompress_threads_create(void)
2188 {
2189     int i, thread_count;
2190
2191     thread_count = migrate_decompress_threads();
2192     decompress_threads = g_new0(QemuThread, thread_count);
2193     decomp_param = g_new0(DecompressParam, thread_count);
2194     compressed_data_buf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2195     quit_decomp_thread = false;
2196     for (i = 0; i < thread_count; i++) {
2197         qemu_mutex_init(&decomp_param[i].mutex);
2198         qemu_cond_init(&decomp_param[i].cond);
2199         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2200         qemu_thread_create(decompress_threads + i, "decompress",
2201                            do_data_decompress, decomp_param + i,
2202                            QEMU_THREAD_JOINABLE);
2203     }
2204 }
2205
2206 void migrate_decompress_threads_join(void)
2207 {
2208     int i, thread_count;
2209
2210     quit_decomp_thread = true;
2211     thread_count = migrate_decompress_threads();
2212     for (i = 0; i < thread_count; i++) {
2213         qemu_mutex_lock(&decomp_param[i].mutex);
2214         qemu_cond_signal(&decomp_param[i].cond);
2215         qemu_mutex_unlock(&decomp_param[i].mutex);
2216     }
2217     for (i = 0; i < thread_count; i++) {
2218         qemu_thread_join(decompress_threads + i);
2219         qemu_mutex_destroy(&decomp_param[i].mutex);
2220         qemu_cond_destroy(&decomp_param[i].cond);
2221         g_free(decomp_param[i].compbuf);
2222     }
2223     g_free(decompress_threads);
2224     g_free(decomp_param);
2225     g_free(compressed_data_buf);
2226     decompress_threads = NULL;
2227     decomp_param = NULL;
2228     compressed_data_buf = NULL;
2229 }
2230
2231 static void decompress_data_with_multi_threads(uint8_t *compbuf,
2232                                                void *host, int len)
2233 {
2234     int idx, thread_count;
2235
2236     thread_count = migrate_decompress_threads();
2237     while (true) {
2238         for (idx = 0; idx < thread_count; idx++) {
2239             if (!decomp_param[idx].start) {
2240                 memcpy(decomp_param[idx].compbuf, compbuf, len);
2241                 decomp_param[idx].des = host;
2242                 decomp_param[idx].len = len;
2243                 start_decompression(&decomp_param[idx]);
2244                 break;
2245             }
2246         }
2247         if (idx < thread_count) {
2248             break;
2249         }
2250     }
2251 }
2252
2253 /*
2254  * Allocate data structures etc needed by incoming migration with postcopy-ram
2255  * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2256  */
2257 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2258 {
2259     size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2260
2261     return postcopy_ram_incoming_init(mis, ram_pages);
2262 }
2263
2264 /*
2265  * Called in postcopy mode by ram_load().
2266  * rcu_read_lock is taken prior to this being called.
2267  */
2268 static int ram_load_postcopy(QEMUFile *f)
2269 {
2270     int flags = 0, ret = 0;
2271     bool place_needed = false;
2272     bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2273     MigrationIncomingState *mis = migration_incoming_get_current();
2274     /* Temporary page that is later 'placed' */
2275     void *postcopy_host_page = postcopy_get_tmp_page(mis);
2276     void *last_host = NULL;
2277     bool all_zero = false;
2278
2279     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2280         ram_addr_t addr;
2281         void *host = NULL;
2282         void *page_buffer = NULL;
2283         void *place_source = NULL;
2284         uint8_t ch;
2285
2286         addr = qemu_get_be64(f);
2287         flags = addr & ~TARGET_PAGE_MASK;
2288         addr &= TARGET_PAGE_MASK;
2289
2290         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2291         place_needed = false;
2292         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2293             host = host_from_stream_offset(f, addr, flags);
2294             if (!host) {
2295                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2296                 ret = -EINVAL;
2297                 break;
2298             }
2299             page_buffer = host;
2300             /*
2301              * Postcopy requires that we place whole host pages atomically.
2302              * To make it atomic, the data is read into a temporary page
2303              * that's moved into place later.
2304              * The migration protocol uses,  possibly smaller, target-pages
2305              * however the source ensures it always sends all the components
2306              * of a host page in order.
2307              */
2308             page_buffer = postcopy_host_page +
2309                           ((uintptr_t)host & ~qemu_host_page_mask);
2310             /* If all TP are zero then we can optimise the place */
2311             if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2312                 all_zero = true;
2313             } else {
2314                 /* not the 1st TP within the HP */
2315                 if (host != (last_host + TARGET_PAGE_SIZE)) {
2316                     error_report("Non-sequential target page %p/%p\n",
2317                                   host, last_host);
2318                     ret = -EINVAL;
2319                     break;
2320                 }
2321             }
2322
2323
2324             /*
2325              * If it's the last part of a host page then we place the host
2326              * page
2327              */
2328             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2329                                      ~qemu_host_page_mask) == 0;
2330             place_source = postcopy_host_page;
2331         }
2332         last_host = host;
2333
2334         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2335         case RAM_SAVE_FLAG_COMPRESS:
2336             ch = qemu_get_byte(f);
2337             memset(page_buffer, ch, TARGET_PAGE_SIZE);
2338             if (ch) {
2339                 all_zero = false;
2340             }
2341             break;
2342
2343         case RAM_SAVE_FLAG_PAGE:
2344             all_zero = false;
2345             if (!place_needed || !matching_page_sizes) {
2346                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2347             } else {
2348                 /* Avoids the qemu_file copy during postcopy, which is
2349                  * going to do a copy later; can only do it when we
2350                  * do this read in one go (matching page sizes)
2351                  */
2352                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2353                                          TARGET_PAGE_SIZE);
2354             }
2355             break;
2356         case RAM_SAVE_FLAG_EOS:
2357             /* normal exit */
2358             break;
2359         default:
2360             error_report("Unknown combination of migration flags: %#x"
2361                          " (postcopy mode)", flags);
2362             ret = -EINVAL;
2363         }
2364
2365         if (place_needed) {
2366             /* This gets called at the last target page in the host page */
2367             if (all_zero) {
2368                 ret = postcopy_place_page_zero(mis,
2369                                                host + TARGET_PAGE_SIZE -
2370                                                qemu_host_page_size);
2371             } else {
2372                 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2373                                                qemu_host_page_size,
2374                                                place_source);
2375             }
2376         }
2377         if (!ret) {
2378             ret = qemu_file_get_error(f);
2379         }
2380     }
2381
2382     return ret;
2383 }
2384
2385 static int ram_load(QEMUFile *f, void *opaque, int version_id)
2386 {
2387     int flags = 0, ret = 0;
2388     static uint64_t seq_iter;
2389     int len = 0;
2390     /*
2391      * If system is running in postcopy mode, page inserts to host memory must
2392      * be atomic
2393      */
2394     bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2395
2396     seq_iter++;
2397
2398     if (version_id != 4) {
2399         ret = -EINVAL;
2400     }
2401
2402     /* This RCU critical section can be very long running.
2403      * When RCU reclaims in the code start to become numerous,
2404      * it will be necessary to reduce the granularity of this
2405      * critical section.
2406      */
2407     rcu_read_lock();
2408
2409     if (postcopy_running) {
2410         ret = ram_load_postcopy(f);
2411     }
2412
2413     while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2414         ram_addr_t addr, total_ram_bytes;
2415         void *host = NULL;
2416         uint8_t ch;
2417
2418         addr = qemu_get_be64(f);
2419         flags = addr & ~TARGET_PAGE_MASK;
2420         addr &= TARGET_PAGE_MASK;
2421
2422         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2423                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2424             host = host_from_stream_offset(f, addr, flags);
2425             if (!host) {
2426                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2427                 ret = -EINVAL;
2428                 break;
2429             }
2430         }
2431
2432         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2433         case RAM_SAVE_FLAG_MEM_SIZE:
2434             /* Synchronize RAM block list */
2435             total_ram_bytes = addr;
2436             while (!ret && total_ram_bytes) {
2437                 RAMBlock *block;
2438                 char id[256];
2439                 ram_addr_t length;
2440
2441                 len = qemu_get_byte(f);
2442                 qemu_get_buffer(f, (uint8_t *)id, len);
2443                 id[len] = 0;
2444                 length = qemu_get_be64(f);
2445
2446                 block = qemu_ram_block_by_name(id);
2447                 if (block) {
2448                     if (length != block->used_length) {
2449                         Error *local_err = NULL;
2450
2451                         ret = qemu_ram_resize(block->offset, length,
2452                                               &local_err);
2453                         if (local_err) {
2454                             error_report_err(local_err);
2455                         }
2456                     }
2457                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2458                                           block->idstr);
2459                 } else {
2460                     error_report("Unknown ramblock \"%s\", cannot "
2461                                  "accept migration", id);
2462                     ret = -EINVAL;
2463                 }
2464
2465                 total_ram_bytes -= length;
2466             }
2467             break;
2468
2469         case RAM_SAVE_FLAG_COMPRESS:
2470             ch = qemu_get_byte(f);
2471             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2472             break;
2473
2474         case RAM_SAVE_FLAG_PAGE:
2475             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2476             break;
2477
2478         case RAM_SAVE_FLAG_COMPRESS_PAGE:
2479             len = qemu_get_be32(f);
2480             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2481                 error_report("Invalid compressed data length: %d", len);
2482                 ret = -EINVAL;
2483                 break;
2484             }
2485             qemu_get_buffer(f, compressed_data_buf, len);
2486             decompress_data_with_multi_threads(compressed_data_buf, host, len);
2487             break;
2488
2489         case RAM_SAVE_FLAG_XBZRLE:
2490             if (load_xbzrle(f, addr, host) < 0) {
2491                 error_report("Failed to decompress XBZRLE page at "
2492                              RAM_ADDR_FMT, addr);
2493                 ret = -EINVAL;
2494                 break;
2495             }
2496             break;
2497         case RAM_SAVE_FLAG_EOS:
2498             /* normal exit */
2499             break;
2500         default:
2501             if (flags & RAM_SAVE_FLAG_HOOK) {
2502                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2503             } else {
2504                 error_report("Unknown combination of migration flags: %#x",
2505                              flags);
2506                 ret = -EINVAL;
2507             }
2508         }
2509         if (!ret) {
2510             ret = qemu_file_get_error(f);
2511         }
2512     }
2513
2514     rcu_read_unlock();
2515     DPRINTF("Completed load of VM with exit code %d seq iteration "
2516             "%" PRIu64 "\n", ret, seq_iter);
2517     return ret;
2518 }
2519
2520 static SaveVMHandlers savevm_ram_handlers = {
2521     .save_live_setup = ram_save_setup,
2522     .save_live_iterate = ram_save_iterate,
2523     .save_live_complete_postcopy = ram_save_complete,
2524     .save_live_complete_precopy = ram_save_complete,
2525     .save_live_pending = ram_save_pending,
2526     .load_state = ram_load,
2527     .cleanup = ram_migration_cleanup,
2528 };
2529
2530 void ram_mig_init(void)
2531 {
2532     qemu_mutex_init(&XBZRLE.lock);
2533     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2534 }
This page took 0.160534 seconds and 4 git commands to generate.