]> Git Repo - qemu.git/blobdiff - migration/ram.c
msf2: Add Smartfusion2 SPI controller
[qemu.git] / migration / ram.c
index 14d54fd052536e97aee8af36822f58660c1602ff..e18b3e2d4f457eb6dfdeebc516587674d9da0a06 100644 (file)
  * THE SOFTWARE.
  */
 #include "qemu/osdep.h"
  * THE SOFTWARE.
  */
 #include "qemu/osdep.h"
-#include "qemu-common.h"
 #include "cpu.h"
 #include <zlib.h>
 #include "qapi-event.h"
 #include "qemu/cutils.h"
 #include "qemu/bitops.h"
 #include "qemu/bitmap.h"
 #include "cpu.h"
 #include <zlib.h>
 #include "qapi-event.h"
 #include "qemu/cutils.h"
 #include "qemu/bitops.h"
 #include "qemu/bitmap.h"
-#include "qemu/timer.h"
 #include "qemu/main-loop.h"
 #include "qemu/main-loop.h"
-#include "migration/migration.h"
-#include "migration/postcopy-ram.h"
-#include "exec/address-spaces.h"
+#include "xbzrle.h"
+#include "ram.h"
+#include "migration.h"
+#include "migration/register.h"
+#include "migration/misc.h"
+#include "qemu-file.h"
+#include "postcopy-ram.h"
 #include "migration/page_cache.h"
 #include "qemu/error-report.h"
 #include "trace.h"
 #include "migration/page_cache.h"
 #include "qemu/error-report.h"
 #include "trace.h"
 /***********************************************************/
 /* ram save/restore */
 
 /***********************************************************/
 /* ram save/restore */
 
+/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
+ * worked for pages that where filled with the same char.  We switched
+ * it to only search for the zero value.  And to avoid confusion with
+ * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
+ */
+
 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
-#define RAM_SAVE_FLAG_COMPRESS 0x02
+#define RAM_SAVE_FLAG_ZERO     0x02
 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
 #define RAM_SAVE_FLAG_PAGE     0x08
 #define RAM_SAVE_FLAG_EOS      0x10
 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
 #define RAM_SAVE_FLAG_PAGE     0x08
 #define RAM_SAVE_FLAG_EOS      0x10
 /* 0x80 is reserved in migration.h start with 0x100 next */
 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
 
 /* 0x80 is reserved in migration.h start with 0x100 next */
 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
 
-static uint8_t *ZERO_TARGET_PAGE;
-
 static inline bool is_zero_range(uint8_t *p, uint64_t size)
 {
     return buffer_is_zero(p, size);
 }
 
 static inline bool is_zero_range(uint8_t *p, uint64_t size)
 {
     return buffer_is_zero(p, size);
 }
 
+XBZRLECacheStats xbzrle_counters;
+
 /* struct contains XBZRLE cache and a static page
    used by the compression */
 static struct {
 /* struct contains XBZRLE cache and a static page
    used by the compression */
 static struct {
@@ -75,11 +83,12 @@ static struct {
     /* Cache for XBZRLE, Protected by lock. */
     PageCache *cache;
     QemuMutex lock;
     /* Cache for XBZRLE, Protected by lock. */
     PageCache *cache;
     QemuMutex lock;
+    /* it will store a page full of zeros */
+    uint8_t *zero_target_page;
+    /* buffer used for XBZRLE decoding */
+    uint8_t *decoded_buf;
 } XBZRLE;
 
 } XBZRLE;
 
-/* buffer used for XBZRLE decoding */
-static uint8_t *xbzrle_decoded_buf;
-
 static void XBZRLE_cache_lock(void)
 {
     if (migrate_use_xbzrle())
 static void XBZRLE_cache_lock(void)
 {
     if (migrate_use_xbzrle())
@@ -138,19 +147,6 @@ out:
     return ret;
 }
 
     return ret;
 }
 
-struct RAMBitmap {
-    struct rcu_head rcu;
-    /* Main migration bitmap */
-    unsigned long *bmap;
-    /* bitmap of pages that haven't been sent even once
-     * only maintained and used in postcopy at the moment
-     * where it's used to send the dirtymap at the start
-     * of the postcopy phase
-     */
-    unsigned long *unsentmap;
-};
-typedef struct RAMBitmap RAMBitmap;
-
 /*
  * An outstanding page request, on the source, having been received
  * and queued
 /*
  * An outstanding page request, on the source, having been received
  * and queued
@@ -171,16 +167,14 @@ struct RAMState {
     RAMBlock *last_seen_block;
     /* Last block from where we have sent data */
     RAMBlock *last_sent_block;
     RAMBlock *last_seen_block;
     /* Last block from where we have sent data */
     RAMBlock *last_sent_block;
-    /* Last offset we have sent data from */
-    ram_addr_t last_offset;
+    /* Last dirty target page we have sent */
+    ram_addr_t last_page;
     /* last ram version we have seen */
     uint32_t last_version;
     /* We are in the first round */
     bool ram_bulk_stage;
     /* How many times we have dirty too many pages */
     int dirty_rate_high_cnt;
     /* last ram version we have seen */
     uint32_t last_version;
     /* We are in the first round */
     bool ram_bulk_stage;
     /* How many times we have dirty too many pages */
     int dirty_rate_high_cnt;
-    /* How many times we have synchronized the bitmap */
-    uint64_t bitmap_sync_count;
     /* these variables are used for bitmap sync */
     /* last time we did a full bitmap_sync */
     int64_t time_last_bitmap_sync;
     /* these variables are used for bitmap sync */
     /* last time we did a full bitmap_sync */
     int64_t time_last_bitmap_sync;
@@ -192,36 +186,12 @@ struct RAMState {
     uint64_t xbzrle_cache_miss_prev;
     /* number of iterations at the beginning of period */
     uint64_t iterations_prev;
     uint64_t xbzrle_cache_miss_prev;
     /* number of iterations at the beginning of period */
     uint64_t iterations_prev;
-    /* Accounting fields */
-    /* number of zero pages.  It used to be pages filled by the same char. */
-    uint64_t zero_pages;
-    /* number of normal transferred pages */
-    uint64_t norm_pages;
     /* Iterations since start */
     uint64_t iterations;
     /* Iterations since start */
     uint64_t iterations;
-    /* xbzrle transmitted bytes.  Notice that this is with
-     * compression, they can't be calculated from the pages */
-    uint64_t xbzrle_bytes;
-    /* xbzrle transmmited pages */
-    uint64_t xbzrle_pages;
-    /* xbzrle number of cache miss */
-    uint64_t xbzrle_cache_miss;
-    /* xbzrle miss rate */
-    double xbzrle_cache_miss_rate;
-    /* xbzrle number of overflows */
-    uint64_t xbzrle_overflows;
     /* number of dirty bits in the bitmap */
     uint64_t migration_dirty_pages;
     /* number of dirty bits in the bitmap */
     uint64_t migration_dirty_pages;
-    /* total number of bytes transferred */
-    uint64_t bytes_transferred;
-    /* number of dirtied pages in the last second */
-    uint64_t dirty_pages_rate;
-    /* Count of requests incoming from destination */
-    uint64_t postcopy_requests;
     /* protects modification of the bitmap */
     QemuMutex bitmap_mutex;
     /* protects modification of the bitmap */
     QemuMutex bitmap_mutex;
-    /* Ram Bitmap protected by RCU */
-    RAMBitmap *ram_bitmap;
     /* The RAMBlock used in the last src_page_requests */
     RAMBlock *last_req_rb;
     /* Queue of outstanding page requests from the destination */
     /* The RAMBlock used in the last src_page_requests */
     RAMBlock *last_req_rb;
     /* Queue of outstanding page requests from the destination */
@@ -230,74 +200,21 @@ struct RAMState {
 };
 typedef struct RAMState RAMState;
 
 };
 typedef struct RAMState RAMState;
 
-static RAMState ram_state;
-
-uint64_t dup_mig_pages_transferred(void)
-{
-    return ram_state.zero_pages;
-}
-
-uint64_t norm_mig_pages_transferred(void)
-{
-    return ram_state.norm_pages;
-}
-
-uint64_t xbzrle_mig_bytes_transferred(void)
-{
-    return ram_state.xbzrle_bytes;
-}
-
-uint64_t xbzrle_mig_pages_transferred(void)
-{
-    return ram_state.xbzrle_pages;
-}
-
-uint64_t xbzrle_mig_pages_cache_miss(void)
-{
-    return ram_state.xbzrle_cache_miss;
-}
-
-double xbzrle_mig_cache_miss_rate(void)
-{
-    return ram_state.xbzrle_cache_miss_rate;
-}
-
-uint64_t xbzrle_mig_pages_overflow(void)
-{
-    return ram_state.xbzrle_overflows;
-}
-
-uint64_t ram_bytes_transferred(void)
-{
-    return ram_state.bytes_transferred;
-}
+static RAMState *ram_state;
 
 uint64_t ram_bytes_remaining(void)
 {
 
 uint64_t ram_bytes_remaining(void)
 {
-    return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
-}
-
-uint64_t ram_dirty_sync_count(void)
-{
-    return ram_state.bitmap_sync_count;
-}
-
-uint64_t ram_dirty_pages_rate(void)
-{
-    return ram_state.dirty_pages_rate;
+    return ram_state->migration_dirty_pages * TARGET_PAGE_SIZE;
 }
 
 }
 
-uint64_t ram_postcopy_requests(void)
-{
-    return ram_state.postcopy_requests;
-}
+MigrationStats ram_counters;
 
 /* used by the search for pages to send */
 struct PageSearchStatus {
     /* Current block being searched */
     RAMBlock    *block;
 
 /* used by the search for pages to send */
 struct PageSearchStatus {
     /* Current block being searched */
     RAMBlock    *block;
-    /* Current offset to search from */
-    ram_addr_t   offset;
+    /* Current page to search from */
+    unsigned long page;
     /* Set once we wrap around */
     bool         complete_round;
 };
     /* Set once we wrap around */
     bool         complete_round;
 };
@@ -336,7 +253,6 @@ static QemuCond comp_done_cond;
 /* The empty QEMUFileOps will be used by file in CompressParam */
 static const QEMUFileOps empty_ops = { };
 
 /* The empty QEMUFileOps will be used by file in CompressParam */
 static const QEMUFileOps empty_ops = { };
 
-static bool compression_switch;
 static DecompressParam *decomp_param;
 static QemuThread *decompress_threads;
 static QemuMutex decomp_done_lock;
 static DecompressParam *decomp_param;
 static QemuThread *decompress_threads;
 static QemuMutex decomp_done_lock;
@@ -390,7 +306,7 @@ static inline void terminate_compression_threads(void)
     }
 }
 
     }
 }
 
-void migrate_compress_threads_join(void)
+static void compress_threads_save_cleanup(void)
 {
     int i, thread_count;
 
 {
     int i, thread_count;
 
@@ -413,14 +329,13 @@ void migrate_compress_threads_join(void)
     comp_param = NULL;
 }
 
     comp_param = NULL;
 }
 
-void migrate_compress_threads_create(void)
+static void compress_threads_save_setup(void)
 {
     int i, thread_count;
 
     if (!migrate_use_compression()) {
         return;
     }
 {
     int i, thread_count;
 
     if (!migrate_use_compression()) {
         return;
     }
-    compression_switch = true;
     thread_count = migrate_compress_threads();
     compress_threads = g_new0(QemuThread, thread_count);
     comp_param = g_new0(CompressParam, thread_count);
     thread_count = migrate_compress_threads();
     compress_threads = g_new0(QemuThread, thread_count);
     comp_param = g_new0(CompressParam, thread_count);
@@ -453,10 +368,14 @@ void migrate_compress_threads_create(void)
  * @offset: offset inside the block for the page
  *          in the lower bits, it contains flags
  */
  * @offset: offset inside the block for the page
  *          in the lower bits, it contains flags
  */
-static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
+static size_t save_page_header(RAMState *rs, QEMUFile *f,  RAMBlock *block,
+                               ram_addr_t offset)
 {
     size_t size, len;
 
 {
     size_t size, len;
 
+    if (block == rs->last_sent_block) {
+        offset |= RAM_SAVE_FLAG_CONTINUE;
+    }
     qemu_put_be64(f, offset);
     size = 8;
 
     qemu_put_be64(f, offset);
     size = 8;
 
@@ -465,6 +384,7 @@ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
         qemu_put_byte(f, len);
         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
         size += 1 + len;
         qemu_put_byte(f, len);
         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
         size += 1 + len;
+        rs->last_sent_block = block;
     }
     return size;
 }
     }
     return size;
 }
@@ -513,8 +433,8 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
 
     /* We don't care if this fails to allocate a new cache page
      * as long as it updated an old one */
 
     /* We don't care if this fails to allocate a new cache page
      * as long as it updated an old one */
-    cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
-                 rs->bitmap_sync_count);
+    cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
+                 ram_counters.dirty_sync_count);
 }
 
 #define ENCODING_FLAG_XBZRLE 0x1
 }
 
 #define ENCODING_FLAG_XBZRLE 0x1
@@ -540,11 +460,12 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
     int encoded_len = 0, bytes_xbzrle;
     uint8_t *prev_cached_page;
 
     int encoded_len = 0, bytes_xbzrle;
     uint8_t *prev_cached_page;
 
-    if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
-        rs->xbzrle_cache_miss++;
+    if (!cache_is_cached(XBZRLE.cache, current_addr,
+                         ram_counters.dirty_sync_count)) {
+        xbzrle_counters.cache_miss++;
         if (!last_stage) {
             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
         if (!last_stage) {
             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
-                             rs->bitmap_sync_count) == -1) {
+                             ram_counters.dirty_sync_count) == -1) {
                 return -1;
             } else {
                 /* update *current_data when the page has been
                 return -1;
             } else {
                 /* update *current_data when the page has been
@@ -569,7 +490,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
         return 0;
     } else if (encoded_len == -1) {
         trace_save_xbzrle_page_overflow();
         return 0;
     } else if (encoded_len == -1) {
         trace_save_xbzrle_page_overflow();
-        rs->xbzrle_overflows++;
+        xbzrle_counters.overflow++;
         /* update data in the cache */
         if (!last_stage) {
             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
         /* update data in the cache */
         if (!last_stage) {
             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
@@ -584,15 +505,15 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
     }
 
     /* Send XBZRLE based compressed page */
     }
 
     /* Send XBZRLE based compressed page */
-    bytes_xbzrle = save_page_header(rs->f, block,
+    bytes_xbzrle = save_page_header(rs, rs->f, block,
                                     offset | RAM_SAVE_FLAG_XBZRLE);
     qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
     qemu_put_be16(rs->f, encoded_len);
     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
     bytes_xbzrle += encoded_len + 1 + 2;
                                     offset | RAM_SAVE_FLAG_XBZRLE);
     qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
     qemu_put_be16(rs->f, encoded_len);
     qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
     bytes_xbzrle += encoded_len + 1 + 2;
-    rs->xbzrle_pages++;
-    rs->xbzrle_bytes += bytes_xbzrle;
-    rs->bytes_transferred += bytes_xbzrle;
+    xbzrle_counters.pages++;
+    xbzrle_counters.bytes += bytes_xbzrle;
+    ram_counters.transferred += bytes_xbzrle;
 
     return 1;
 }
 
     return 1;
 }
@@ -606,41 +527,32 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
  *
  * @rs: current RAM state
  * @rb: RAMBlock where to search for dirty pages
  *
  * @rs: current RAM state
  * @rb: RAMBlock where to search for dirty pages
- * @start: starting address (typically so we can continue from previous page)
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
+ * @start: page where we start the search
  */
 static inline
  */
 static inline
-ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
-                                       ram_addr_t start,
-                                       ram_addr_t *ram_addr_abs)
+unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
+                                          unsigned long start)
 {
 {
-    unsigned long base = rb->offset >> TARGET_PAGE_BITS;
-    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
-    uint64_t rb_size = rb->used_length;
-    unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
-    unsigned long *bitmap;
-
+    unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
+    unsigned long *bitmap = rb->bmap;
     unsigned long next;
 
     unsigned long next;
 
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    if (rs->ram_bulk_stage && nr > base) {
-        next = nr + 1;
+    if (rs->ram_bulk_stage && start > 0) {
+        next = start + 1;
     } else {
     } else {
-        next = find_next_bit(bitmap, size, nr);
+        next = find_next_bit(bitmap, size, start);
     }
 
     }
 
-    *ram_addr_abs = next << TARGET_PAGE_BITS;
-    return (next - base) << TARGET_PAGE_BITS;
+    return next;
 }
 
 }
 
-static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr)
+static inline bool migration_bitmap_clear_dirty(RAMState *rs,
+                                                RAMBlock *rb,
+                                                unsigned long page)
 {
     bool ret;
 {
     bool ret;
-    int nr = addr >> TARGET_PAGE_BITS;
-    unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
 
 
-    ret = test_and_clear_bit(nr, bitmap);
+    ret = test_and_clear_bit(page, rb->bmap);
 
     if (ret) {
         rs->migration_dirty_pages--;
 
     if (ret) {
         rs->migration_dirty_pages--;
@@ -648,13 +560,11 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr)
     return ret;
 }
 
     return ret;
 }
 
-static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start,
-                                        ram_addr_t length)
+static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
+                                        ram_addr_t start, ram_addr_t length)
 {
 {
-    unsigned long *bitmap;
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
     rs->migration_dirty_pages +=
     rs->migration_dirty_pages +=
-        cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length,
+        cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
                                               &rs->num_dirty_pages_period);
 }
 
                                               &rs->num_dirty_pages_period);
 }
 
@@ -672,7 +582,7 @@ uint64_t ram_pagesize_summary(void)
     RAMBlock *block;
     uint64_t summary = 0;
 
     RAMBlock *block;
     uint64_t summary = 0;
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+    RAMBLOCK_FOREACH(block) {
         summary |= block->page_size;
     }
 
         summary |= block->page_size;
     }
 
@@ -685,11 +595,7 @@ static void migration_bitmap_sync(RAMState *rs)
     int64_t end_time;
     uint64_t bytes_xfer_now;
 
     int64_t end_time;
     uint64_t bytes_xfer_now;
 
-    rs->bitmap_sync_count++;
-
-    if (!rs->bytes_xfer_prev) {
-        rs->bytes_xfer_prev = ram_bytes_transferred();
-    }
+    ram_counters.dirty_sync_count++;
 
     if (!rs->time_last_bitmap_sync) {
         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
 
     if (!rs->time_last_bitmap_sync) {
         rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
@@ -700,8 +606,8 @@ static void migration_bitmap_sync(RAMState *rs)
 
     qemu_mutex_lock(&rs->bitmap_mutex);
     rcu_read_lock();
 
     qemu_mutex_lock(&rs->bitmap_mutex);
     rcu_read_lock();
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        migration_bitmap_sync_range(rs, block->offset, block->used_length);
+    RAMBLOCK_FOREACH(block) {
+        migration_bitmap_sync_range(rs, block, 0, block->used_length);
     }
     rcu_read_unlock();
     qemu_mutex_unlock(&rs->bitmap_mutex);
     }
     rcu_read_unlock();
     qemu_mutex_unlock(&rs->bitmap_mutex);
@@ -712,42 +618,45 @@ static void migration_bitmap_sync(RAMState *rs)
 
     /* more than 1 second = 1000 millisecons */
     if (end_time > rs->time_last_bitmap_sync + 1000) {
 
     /* more than 1 second = 1000 millisecons */
     if (end_time > rs->time_last_bitmap_sync + 1000) {
+        /* calculate period counters */
+        ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
+            / (end_time - rs->time_last_bitmap_sync);
+        bytes_xfer_now = ram_counters.transferred;
+
         if (migrate_auto_converge()) {
             /* The following detection logic can be refined later. For now:
                Check to see if the dirtied bytes is 50% more than the approx.
                amount of bytes that just got transferred since the last time we
                were in this routine. If that happens twice, start or increase
                throttling */
         if (migrate_auto_converge()) {
             /* The following detection logic can be refined later. For now:
                Check to see if the dirtied bytes is 50% more than the approx.
                amount of bytes that just got transferred since the last time we
                were in this routine. If that happens twice, start or increase
                throttling */
-            bytes_xfer_now = ram_bytes_transferred();
 
 
-            if (rs->dirty_pages_rate &&
-               (rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
+            if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
                    (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
-               (rs->dirty_rate_high_cnt++ >= 2)) {
+                (++rs->dirty_rate_high_cnt >= 2)) {
                     trace_migration_throttle();
                     rs->dirty_rate_high_cnt = 0;
                     mig_throttle_guest_down();
                     trace_migration_throttle();
                     rs->dirty_rate_high_cnt = 0;
                     mig_throttle_guest_down();
-             }
-             rs->bytes_xfer_prev = bytes_xfer_now;
+            }
         }
 
         if (migrate_use_xbzrle()) {
             if (rs->iterations_prev != rs->iterations) {
         }
 
         if (migrate_use_xbzrle()) {
             if (rs->iterations_prev != rs->iterations) {
-                rs->xbzrle_cache_miss_rate =
-                   (double)(rs->xbzrle_cache_miss -
+                xbzrle_counters.cache_miss_rate =
+                   (double)(xbzrle_counters.cache_miss -
                             rs->xbzrle_cache_miss_prev) /
                    (rs->iterations - rs->iterations_prev);
             }
             rs->iterations_prev = rs->iterations;
                             rs->xbzrle_cache_miss_prev) /
                    (rs->iterations - rs->iterations_prev);
             }
             rs->iterations_prev = rs->iterations;
-            rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
+            rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
         }
         }
-        rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
-            / (end_time - rs->time_last_bitmap_sync);
+
+        /* reset period counters */
         rs->time_last_bitmap_sync = end_time;
         rs->num_dirty_pages_period = 0;
         rs->time_last_bitmap_sync = end_time;
         rs->num_dirty_pages_period = 0;
+        rs->bytes_xfer_prev = bytes_xfer_now;
     }
     if (migrate_use_events()) {
     }
     if (migrate_use_events()) {
-        qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
+        qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
     }
 }
 
     }
 }
 
@@ -767,25 +676,24 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
     int pages = -1;
 
     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
     int pages = -1;
 
     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
-        rs->zero_pages++;
-        rs->bytes_transferred +=
-            save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_COMPRESS);
+        ram_counters.duplicate++;
+        ram_counters.transferred +=
+            save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
         qemu_put_byte(rs->f, 0);
         qemu_put_byte(rs->f, 0);
-        rs->bytes_transferred += 1;
+        ram_counters.transferred += 1;
         pages = 1;
     }
 
     return pages;
 }
 
         pages = 1;
     }
 
     return pages;
 }
 
-static void ram_release_pages(MigrationState *ms, const char *rbname,
-                              uint64_t offset, int pages)
+static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
 {
 {
-    if (!migrate_release_ram() || !migration_in_postcopy(ms)) {
+    if (!migrate_release_ram() || !migration_in_postcopy()) {
         return;
     }
 
         return;
     }
 
-    ram_discard_range(NULL, rbname, offset, pages << TARGET_PAGE_BITS);
+    ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
 }
 
 /**
 }
 
 /**
@@ -797,13 +705,11 @@ static void ram_release_pages(MigrationState *ms, const char *rbname,
  *                if xbzrle noticed the page was the same.
  *
  * @rs: current RAM state
  *                if xbzrle noticed the page was the same.
  *
  * @rs: current RAM state
- * @ms: current migration state
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
  */
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
  */
-static int ram_save_page(RAMState *rs, MigrationState *ms,
-                         PageSearchStatus *pss, bool last_stage)
+static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
 {
     int pages = -1;
     uint64_t bytes_xmit;
 {
     int pages = -1;
     uint64_t bytes_xmit;
@@ -812,16 +718,17 @@ static int ram_save_page(RAMState *rs, MigrationState *ms,
     int ret;
     bool send_async = true;
     RAMBlock *block = pss->block;
     int ret;
     bool send_async = true;
     RAMBlock *block = pss->block;
-    ram_addr_t offset = pss->offset;
+    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
 
     p = block->host + offset;
 
     p = block->host + offset;
+    trace_ram_save_page(block->idstr, (uint64_t)offset, p);
 
     /* In doubt sent page as normal */
     bytes_xmit = 0;
     ret = ram_control_save_page(rs->f, block->offset,
                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
 
     /* In doubt sent page as normal */
     bytes_xmit = 0;
     ret = ram_control_save_page(rs->f, block->offset,
                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
-        rs->bytes_transferred += bytes_xmit;
+        ram_counters.transferred += bytes_xmit;
         pages = 1;
     }
 
         pages = 1;
     }
 
@@ -829,15 +736,12 @@ static int ram_save_page(RAMState *rs, MigrationState *ms,
 
     current_addr = block->offset + offset;
 
 
     current_addr = block->offset + offset;
 
-    if (block == rs->last_sent_block) {
-        offset |= RAM_SAVE_FLAG_CONTINUE;
-    }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
-                rs->norm_pages++;
+                ram_counters.normal++;
             } else if (bytes_xmit == 0) {
             } else if (bytes_xmit == 0) {
-                rs->zero_pages++;
+                ram_counters.duplicate++;
             }
         }
     } else {
             }
         }
     } else {
@@ -847,9 +751,9 @@ static int ram_save_page(RAMState *rs, MigrationState *ms,
              * page would be stale
              */
             xbzrle_cache_zero_page(rs, current_addr);
              * page would be stale
              */
             xbzrle_cache_zero_page(rs, current_addr);
-            ram_release_pages(ms, block->idstr, pss->offset, pages);
+            ram_release_pages(block->idstr, offset, pages);
         } else if (!rs->ram_bulk_stage &&
         } else if (!rs->ram_bulk_stage &&
-                   !migration_in_postcopy(ms) && migrate_use_xbzrle()) {
+                   !migration_in_postcopy() && migrate_use_xbzrle()) {
             pages = save_xbzrle_page(rs, &p, current_addr, block,
                                      offset, last_stage);
             if (!last_stage) {
             pages = save_xbzrle_page(rs, &p, current_addr, block,
                                      offset, last_stage);
             if (!last_stage) {
@@ -863,18 +767,18 @@ static int ram_save_page(RAMState *rs, MigrationState *ms,
 
     /* XBZRLE overflow or normal page */
     if (pages == -1) {
 
     /* XBZRLE overflow or normal page */
     if (pages == -1) {
-        rs->bytes_transferred += save_page_header(rs->f, block,
-                                               offset | RAM_SAVE_FLAG_PAGE);
+        ram_counters.transferred +=
+            save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_PAGE);
         if (send_async) {
             qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
                                   migrate_release_ram() &
         if (send_async) {
             qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
                                   migrate_release_ram() &
-                                  migration_in_postcopy(ms));
+                                  migration_in_postcopy());
         } else {
             qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
         }
         } else {
             qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
         }
-        rs->bytes_transferred += TARGET_PAGE_SIZE;
+        ram_counters.transferred += TARGET_PAGE_SIZE;
         pages = 1;
         pages = 1;
-        rs->norm_pages++;
+        ram_counters.normal++;
     }
 
     XBZRLE_cache_unlock();
     }
 
     XBZRLE_cache_unlock();
@@ -885,10 +789,11 @@ static int ram_save_page(RAMState *rs, MigrationState *ms,
 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
                                 ram_addr_t offset)
 {
 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
                                 ram_addr_t offset)
 {
+    RAMState *rs = ram_state;
     int bytes_sent, blen;
     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
 
     int bytes_sent, blen;
     uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
 
-    bytes_sent = save_page_header(f, block, offset |
+    bytes_sent = save_page_header(rs, f, block, offset |
                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
     blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
                                      migrate_compress_level());
                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
     blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
                                      migrate_compress_level());
@@ -898,8 +803,7 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
         error_report("compressed data failed!");
     } else {
         bytes_sent += blen;
         error_report("compressed data failed!");
     } else {
         bytes_sent += blen;
-        ram_release_pages(migrate_get_current(), block->idstr,
-                          offset & TARGET_PAGE_MASK, 1);
+        ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
     }
 
     return bytes_sent;
     }
 
     return bytes_sent;
@@ -926,7 +830,7 @@ static void flush_compressed_data(RAMState *rs)
         qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
         qemu_mutex_lock(&comp_param[idx].mutex);
         if (!comp_param[idx].quit) {
             len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
-            rs->bytes_transferred += len;
+            ram_counters.transferred += len;
         }
         qemu_mutex_unlock(&comp_param[idx].mutex);
     }
         }
         qemu_mutex_unlock(&comp_param[idx].mutex);
     }
@@ -956,8 +860,8 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
                 qemu_cond_signal(&comp_param[idx].cond);
                 qemu_mutex_unlock(&comp_param[idx].mutex);
                 pages = 1;
                 qemu_cond_signal(&comp_param[idx].cond);
                 qemu_mutex_unlock(&comp_param[idx].mutex);
                 pages = 1;
-                rs->norm_pages++;
-                rs->bytes_transferred += bytes_xmit;
+                ram_counters.normal++;
+                ram_counters.transferred += bytes_xmit;
                 break;
             }
         }
                 break;
             }
         }
@@ -978,35 +882,34 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
  * Returns the number of pages written.
  *
  * @rs: current RAM state
  * Returns the number of pages written.
  *
  * @rs: current RAM state
- * @ms: current migration state
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
  */
  * @block: block that contains the page we want to send
  * @offset: offset inside the block for the page
  * @last_stage: if we are at the completion stage
  */
-static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
-                                    PageSearchStatus *pss, bool last_stage)
+static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
+                                    bool last_stage)
 {
     int pages = -1;
     uint64_t bytes_xmit = 0;
     uint8_t *p;
     int ret, blen;
     RAMBlock *block = pss->block;
 {
     int pages = -1;
     uint64_t bytes_xmit = 0;
     uint8_t *p;
     int ret, blen;
     RAMBlock *block = pss->block;
-    ram_addr_t offset = pss->offset;
+    ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
 
     p = block->host + offset;
 
     ret = ram_control_save_page(rs->f, block->offset,
                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
 
     p = block->host + offset;
 
     ret = ram_control_save_page(rs->f, block->offset,
                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
     if (bytes_xmit) {
-        rs->bytes_transferred += bytes_xmit;
+        ram_counters.transferred += bytes_xmit;
         pages = 1;
     }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
         pages = 1;
     }
     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
         if (ret != RAM_SAVE_CONTROL_DELAYED) {
             if (bytes_xmit > 0) {
-                rs->norm_pages++;
+                ram_counters.normal++;
             } else if (bytes_xmit == 0) {
             } else if (bytes_xmit == 0) {
-                rs->zero_pages++;
+                ram_counters.duplicate++;
             }
         }
     } else {
             }
         }
     } else {
@@ -1021,13 +924,13 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
             pages = save_zero_page(rs, block, offset, p);
             if (pages == -1) {
                 /* Make sure the first page is sent out before other pages */
             pages = save_zero_page(rs, block, offset, p);
             if (pages == -1) {
                 /* Make sure the first page is sent out before other pages */
-                bytes_xmit = save_page_header(rs->f, block, offset |
+                bytes_xmit = save_page_header(rs, rs->f, block, offset |
                                               RAM_SAVE_FLAG_COMPRESS_PAGE);
                 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
                                                  migrate_compress_level());
                 if (blen > 0) {
                                               RAM_SAVE_FLAG_COMPRESS_PAGE);
                 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
                                                  migrate_compress_level());
                 if (blen > 0) {
-                    rs->bytes_transferred += bytes_xmit + blen;
-                    rs->norm_pages++;
+                    ram_counters.transferred += bytes_xmit + blen;
+                    ram_counters.normal++;
                     pages = 1;
                 } else {
                     qemu_file_set_error(rs->f, blen);
                     pages = 1;
                 } else {
                     qemu_file_set_error(rs->f, blen);
@@ -1035,15 +938,14 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
                 }
             }
             if (pages > 0) {
                 }
             }
             if (pages > 0) {
-                ram_release_pages(ms, block->idstr, pss->offset, pages);
+                ram_release_pages(block->idstr, offset, pages);
             }
         } else {
             }
         } else {
-            offset |= RAM_SAVE_FLAG_CONTINUE;
             pages = save_zero_page(rs, block, offset, p);
             if (pages == -1) {
                 pages = compress_page_with_multi_thread(rs, block, offset);
             } else {
             pages = save_zero_page(rs, block, offset, p);
             if (pages == -1) {
                 pages = compress_page_with_multi_thread(rs, block, offset);
             } else {
-                ram_release_pages(ms, block->idstr, pss->offset, pages);
+                ram_release_pages(block->idstr, offset, pages);
             }
         }
     }
             }
         }
     }
@@ -1060,16 +962,12 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
  * @rs: current RAM state
  * @pss: data about the state of the current dirty page scan
  * @again: set to false if the search has scanned the whole of RAM
  * @rs: current RAM state
  * @pss: data about the state of the current dirty page scan
  * @again: set to false if the search has scanned the whole of RAM
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
  */
  */
-static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
-                             bool *again, ram_addr_t *ram_addr_abs)
+static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
 {
 {
-    pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset,
-                                              ram_addr_abs);
+    pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
     if (pss->complete_round && pss->block == rs->last_seen_block &&
     if (pss->complete_round && pss->block == rs->last_seen_block &&
-        pss->offset >= rs->last_offset) {
+        pss->page >= rs->last_page) {
         /*
          * We've been once around the RAM and haven't found anything.
          * Give up.
         /*
          * We've been once around the RAM and haven't found anything.
          * Give up.
@@ -1077,9 +975,9 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
         *again = false;
         return false;
     }
         *again = false;
         return false;
     }
-    if (pss->offset >= pss->block->used_length) {
+    if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
         /* Didn't find anything in this RAM Block */
         /* Didn't find anything in this RAM Block */
-        pss->offset = 0;
+        pss->page = 0;
         pss->block = QLIST_NEXT_RCU(pss->block, next);
         if (!pss->block) {
             /* Hit the end of the list */
         pss->block = QLIST_NEXT_RCU(pss->block, next);
         if (!pss->block) {
             /* Hit the end of the list */
@@ -1092,7 +990,6 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
                  * point. In theory, xbzrle can do better than compression.
                  */
                 flush_compressed_data(rs);
                  * point. In theory, xbzrle can do better than compression.
                  */
                 flush_compressed_data(rs);
-                compression_switch = false;
             }
         }
         /* Didn't find anything this time, but try again on the new block */
             }
         }
         /* Didn't find anything this time, but try again on the new block */
@@ -1115,11 +1012,8 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss,
  *
  * @rs: current RAM state
  * @offset: used to return the offset within the RAMBlock
  *
  * @rs: current RAM state
  * @offset: used to return the offset within the RAMBlock
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
  */
  */
-static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
-                              ram_addr_t *ram_addr_abs)
+static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
 {
     RAMBlock *block = NULL;
 
 {
     RAMBlock *block = NULL;
 
@@ -1129,8 +1023,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
                                 QSIMPLEQ_FIRST(&rs->src_page_requests);
         block = entry->rb;
         *offset = entry->offset;
                                 QSIMPLEQ_FIRST(&rs->src_page_requests);
         block = entry->rb;
         *offset = entry->offset;
-        *ram_addr_abs = (entry->offset + entry->rb->offset) &
-                        TARGET_PAGE_MASK;
 
         if (entry->len > TARGET_PAGE_SIZE) {
             entry->len -= TARGET_PAGE_SIZE;
 
         if (entry->len > TARGET_PAGE_SIZE) {
             entry->len -= TARGET_PAGE_SIZE;
@@ -1155,18 +1047,15 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
  *
  * @rs: current RAM state
  * @pss: data about the state of the current dirty page scan
  *
  * @rs: current RAM state
  * @pss: data about the state of the current dirty page scan
- * @ram_addr_abs: pointer into which to store the address of the dirty page
- *                within the global ram_addr space
  */
  */
-static bool get_queued_page(RAMState *rs, PageSearchStatus *pss,
-                            ram_addr_t *ram_addr_abs)
+static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
 {
     RAMBlock  *block;
     ram_addr_t offset;
     bool dirty;
 
     do {
 {
     RAMBlock  *block;
     ram_addr_t offset;
     bool dirty;
 
     do {
-        block = unqueue_page(rs, &offset, ram_addr_abs);
+        block = unqueue_page(rs, &offset);
         /*
          * We're sending this page, and since it's postcopy nothing else
          * will dirty it, and we must make sure it doesn't get sent again
         /*
          * We're sending this page, and since it's postcopy nothing else
          * will dirty it, and we must make sure it doesn't get sent again
@@ -1174,19 +1063,15 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss,
          * search already sent it.
          */
         if (block) {
          * search already sent it.
          */
         if (block) {
-            unsigned long *bitmap;
-            bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-            dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
+            unsigned long page;
+
+            page = offset >> TARGET_PAGE_BITS;
+            dirty = test_bit(page, block->bmap);
             if (!dirty) {
             if (!dirty) {
-                trace_get_queued_page_not_dirty(
-                    block->idstr, (uint64_t)offset,
-                    (uint64_t)*ram_addr_abs,
-                    test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
-                         atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
+                trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
+                       page, test_bit(page, block->unsentmap));
             } else {
             } else {
-                trace_get_queued_page(block->idstr,
-                                      (uint64_t)offset,
-                                      (uint64_t)*ram_addr_abs);
+                trace_get_queued_page(block->idstr, (uint64_t)offset, page);
             }
         }
 
             }
         }
 
@@ -1207,7 +1092,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss,
          * it just requested.
          */
         pss->block = block;
          * it just requested.
          */
         pss->block = block;
-        pss->offset = offset;
+        pss->page = offset >> TARGET_PAGE_BITS;
     }
 
     return !!block;
     }
 
     return !!block;
@@ -1221,10 +1106,9 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss,
  * be some left.  in case that there is any page left, we drop it.
  *
  */
  * be some left.  in case that there is any page left, we drop it.
  *
  */
-void migration_page_queue_free(void)
+static void migration_page_queue_free(RAMState *rs)
 {
     struct RAMSrcPageRequest *mspr, *next_mspr;
 {
     struct RAMSrcPageRequest *mspr, *next_mspr;
-    RAMState *rs = &ram_state;
     /* This queue generally should be empty - but in the case of a failed
      * migration might have some droppings in.
      */
     /* This queue generally should be empty - but in the case of a failed
      * migration might have some droppings in.
      */
@@ -1252,9 +1136,9 @@ void migration_page_queue_free(void)
 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
 {
     RAMBlock *ramblock;
 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
 {
     RAMBlock *ramblock;
-    RAMState *rs = &ram_state;
+    RAMState *rs = ram_state;
 
 
-    rs->postcopy_requests++;
+    ram_counters.postcopy_requests++;
     rcu_read_lock();
     if (!rbname) {
         /* Reuse last RAMBlock */
     rcu_read_lock();
     if (!rbname) {
         /* Reuse last RAMBlock */
@@ -1314,37 +1198,31 @@ err:
  * @ms: current migration state
  * @pss: data about the page we want to send
  * @last_stage: if we are at the completion stage
  * @ms: current migration state
  * @pss: data about the page we want to send
  * @last_stage: if we are at the completion stage
- * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space
  */
  */
-static int ram_save_target_page(RAMState *rs, MigrationState *ms,
-                                PageSearchStatus *pss,
-                                bool last_stage,
-                                ram_addr_t dirty_ram_abs)
+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
+                                bool last_stage)
 {
     int res = 0;
 
     /* Check the pages is dirty and if it is send it */
 {
     int res = 0;
 
     /* Check the pages is dirty and if it is send it */
-    if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
-        unsigned long *unsentmap;
-        if (compression_switch && migrate_use_compression()) {
-            res = ram_save_compressed_page(rs, ms, pss, last_stage);
+    if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
+        /*
+         * If xbzrle is on, stop using the data compression after first
+         * round of migration even if compression is enabled. In theory,
+         * xbzrle can do better than compression.
+         */
+        if (migrate_use_compression() &&
+            (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
+            res = ram_save_compressed_page(rs, pss, last_stage);
         } else {
         } else {
-            res = ram_save_page(rs, ms, pss, last_stage);
+            res = ram_save_page(rs, pss, last_stage);
         }
 
         if (res < 0) {
             return res;
         }
         }
 
         if (res < 0) {
             return res;
         }
-        unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-        if (unsentmap) {
-            clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
-        }
-        /* Only update last_sent_block if a block was actually sent; xbzrle
-         * might have decided the page was identical so didn't bother writing
-         * to the stream.
-         */
-        if (res > 0) {
-            rs->last_sent_block = pss->block;
+        if (pss->block->unsentmap) {
+            clear_bit(pss->page, pss->block->unsentmap);
         }
     }
 
         }
     }
 
@@ -1359,6 +1237,8 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms,
  * a host page in which case the remainder of the hostpage is sent.
  * Only dirty target pages are sent. Note that the host page size may
  * be a huge page for this block.
  * a host page in which case the remainder of the hostpage is sent.
  * Only dirty target pages are sent. Note that the host page size may
  * be a huge page for this block.
+ * The saving stops at the boundary of the used_length of the block
+ * if the RAMBlock isn't a multiple of the host page size.
  *
  * Returns the number of pages written or negative on error
  *
  *
  * Returns the number of pages written or negative on error
  *
@@ -1366,29 +1246,27 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms,
  * @ms: current migration state
  * @pss: data about the page we want to send
  * @last_stage: if we are at the completion stage
  * @ms: current migration state
  * @pss: data about the page we want to send
  * @last_stage: if we are at the completion stage
- * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
  */
  */
-static int ram_save_host_page(RAMState *rs, MigrationState *ms,
-                              PageSearchStatus *pss,
-                              bool last_stage,
-                              ram_addr_t dirty_ram_abs)
+static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
+                              bool last_stage)
 {
     int tmppages, pages = 0;
 {
     int tmppages, pages = 0;
-    size_t pagesize = qemu_ram_pagesize(pss->block);
+    size_t pagesize_bits =
+        qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
 
     do {
 
     do {
-        tmppages = ram_save_target_page(rs, ms, pss, last_stage, dirty_ram_abs);
+        tmppages = ram_save_target_page(rs, pss, last_stage);
         if (tmppages < 0) {
             return tmppages;
         }
 
         pages += tmppages;
         if (tmppages < 0) {
             return tmppages;
         }
 
         pages += tmppages;
-        pss->offset += TARGET_PAGE_SIZE;
-        dirty_ram_abs += TARGET_PAGE_SIZE;
-    } while (pss->offset & (pagesize - 1));
+        pss->page++;
+    } while ((pss->page & (pagesize_bits - 1)) &&
+             offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
 
     /* The offset we leave with is the last one we looked at */
 
     /* The offset we leave with is the last one we looked at */
-    pss->offset -= TARGET_PAGE_SIZE;
+    pss->page--;
     return pages;
 }
 
     return pages;
 }
 
@@ -1409,11 +1287,8 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms,
 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
 {
     PageSearchStatus pss;
 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
 {
     PageSearchStatus pss;
-    MigrationState *ms = migrate_get_current();
     int pages = 0;
     bool again, found;
     int pages = 0;
     bool again, found;
-    ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
-                                 ram_addr_t space */
 
     /* No dirty page as there is zero RAM */
     if (!ram_bytes_total()) {
 
     /* No dirty page as there is zero RAM */
     if (!ram_bytes_total()) {
@@ -1421,7 +1296,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
     }
 
     pss.block = rs->last_seen_block;
     }
 
     pss.block = rs->last_seen_block;
-    pss.offset = rs->last_offset;
+    pss.page = rs->last_page;
     pss.complete_round = false;
 
     if (!pss.block) {
     pss.complete_round = false;
 
     if (!pss.block) {
@@ -1430,20 +1305,20 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
 
     do {
         again = true;
 
     do {
         again = true;
-        found = get_queued_page(rs, &pss, &dirty_ram_abs);
+        found = get_queued_page(rs, &pss);
 
         if (!found) {
             /* priority queue empty, so just search for something dirty */
 
         if (!found) {
             /* priority queue empty, so just search for something dirty */
-            found = find_dirty_block(rs, &pss, &again, &dirty_ram_abs);
+            found = find_dirty_block(rs, &pss, &again);
         }
 
         if (found) {
         }
 
         if (found) {
-            pages = ram_save_host_page(rs, ms, &pss, last_stage, dirty_ram_abs);
+            pages = ram_save_host_page(rs, &pss, last_stage);
         }
     } while (!pages && again);
 
     rs->last_seen_block = pss.block;
         }
     } while (!pages && again);
 
     rs->last_seen_block = pss.block;
-    rs->last_offset = pss.offset;
+    rs->last_page = pss.page;
 
     return pages;
 }
 
     return pages;
 }
@@ -1451,13 +1326,12 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
 void acct_update_position(QEMUFile *f, size_t size, bool zero)
 {
     uint64_t pages = size / TARGET_PAGE_SIZE;
 void acct_update_position(QEMUFile *f, size_t size, bool zero)
 {
     uint64_t pages = size / TARGET_PAGE_SIZE;
-    RAMState *rs = &ram_state;
 
     if (zero) {
 
     if (zero) {
-        rs->zero_pages += pages;
+        ram_counters.duplicate += pages;
     } else {
     } else {
-        rs->norm_pages += pages;
-        rs->bytes_transferred += size;
+        ram_counters.normal += pages;
+        ram_counters.transferred += size;
         qemu_update_position(f, size);
     }
 }
         qemu_update_position(f, size);
     }
 }
@@ -1468,37 +1342,39 @@ uint64_t ram_bytes_total(void)
     uint64_t total = 0;
 
     rcu_read_lock();
     uint64_t total = 0;
 
     rcu_read_lock();
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
+    RAMBLOCK_FOREACH(block) {
         total += block->used_length;
         total += block->used_length;
+    }
     rcu_read_unlock();
     return total;
 }
 
     rcu_read_unlock();
     return total;
 }
 
-void free_xbzrle_decoded_buf(void)
+static void xbzrle_load_setup(void)
 {
 {
-    g_free(xbzrle_decoded_buf);
-    xbzrle_decoded_buf = NULL;
+    XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
 }
 
 }
 
-static void migration_bitmap_free(struct RAMBitmap *bmap)
+static void xbzrle_load_cleanup(void)
 {
 {
-    g_free(bmap->bmap);
-    g_free(bmap->unsentmap);
-    g_free(bmap);
+    g_free(XBZRLE.decoded_buf);
+    XBZRLE.decoded_buf = NULL;
 }
 
 }
 
-static void ram_migration_cleanup(void *opaque)
+static void ram_save_cleanup(void *opaque)
 {
 {
-    RAMState *rs = opaque;
+    RAMState **rsp = opaque;
+    RAMBlock *block;
 
     /* caller have hold iothread lock or is in a bh, so there is
      * no writing race against this migration_bitmap
      */
 
     /* caller have hold iothread lock or is in a bh, so there is
      * no writing race against this migration_bitmap
      */
-    struct RAMBitmap *bitmap = rs->ram_bitmap;
-    atomic_rcu_set(&rs->ram_bitmap, NULL);
-    if (bitmap) {
-        memory_global_dirty_log_stop();
-        call_rcu(bitmap, migration_bitmap_free, rcu);
+    memory_global_dirty_log_stop();
+
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        g_free(block->bmap);
+        block->bmap = NULL;
+        g_free(block->unsentmap);
+        block->unsentmap = NULL;
     }
 
     XBZRLE_cache_lock();
     }
 
     XBZRLE_cache_lock();
@@ -1506,85 +1382,51 @@ static void ram_migration_cleanup(void *opaque)
         cache_fini(XBZRLE.cache);
         g_free(XBZRLE.encoded_buf);
         g_free(XBZRLE.current_buf);
         cache_fini(XBZRLE.cache);
         g_free(XBZRLE.encoded_buf);
         g_free(XBZRLE.current_buf);
-        g_free(ZERO_TARGET_PAGE);
+        g_free(XBZRLE.zero_target_page);
         XBZRLE.cache = NULL;
         XBZRLE.encoded_buf = NULL;
         XBZRLE.current_buf = NULL;
         XBZRLE.cache = NULL;
         XBZRLE.encoded_buf = NULL;
         XBZRLE.current_buf = NULL;
+        XBZRLE.zero_target_page = NULL;
     }
     XBZRLE_cache_unlock();
     }
     XBZRLE_cache_unlock();
+    migration_page_queue_free(*rsp);
+    compress_threads_save_cleanup();
+    g_free(*rsp);
+    *rsp = NULL;
 }
 
 static void ram_state_reset(RAMState *rs)
 {
     rs->last_seen_block = NULL;
     rs->last_sent_block = NULL;
 }
 
 static void ram_state_reset(RAMState *rs)
 {
     rs->last_seen_block = NULL;
     rs->last_sent_block = NULL;
-    rs->last_offset = 0;
+    rs->last_page = 0;
     rs->last_version = ram_list.version;
     rs->ram_bulk_stage = true;
 }
 
 #define MAX_WAIT 50 /* ms, half buffered_file limit */
 
     rs->last_version = ram_list.version;
     rs->ram_bulk_stage = true;
 }
 
 #define MAX_WAIT 50 /* ms, half buffered_file limit */
 
-void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
-{
-    RAMState *rs = &ram_state;
-
-    /* called in qemu main thread, so there is
-     * no writing race against this migration_bitmap
-     */
-    if (rs->ram_bitmap) {
-        struct RAMBitmap *old_bitmap = rs->ram_bitmap, *bitmap;
-        bitmap = g_new(struct RAMBitmap, 1);
-        bitmap->bmap = bitmap_new(new);
-
-        /* prevent migration_bitmap content from being set bit
-         * by migration_bitmap_sync_range() at the same time.
-         * it is safe to migration if migration_bitmap is cleared bit
-         * at the same time.
-         */
-        qemu_mutex_lock(&rs->bitmap_mutex);
-        bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
-        bitmap_set(bitmap->bmap, old, new - old);
-
-        /* We don't have a way to safely extend the sentmap
-         * with RCU; so mark it as missing, entry to postcopy
-         * will fail.
-         */
-        bitmap->unsentmap = NULL;
-
-        atomic_rcu_set(&rs->ram_bitmap, bitmap);
-        qemu_mutex_unlock(&rs->bitmap_mutex);
-        rs->migration_dirty_pages += new - old;
-        call_rcu(old_bitmap, migration_bitmap_free, rcu);
-    }
-}
-
 /*
  * 'expected' is the value you expect the bitmap mostly to be full
  * of; it won't bother printing lines that are all this value.
  * If 'todump' is null the migration bitmap is dumped.
  */
 /*
  * 'expected' is the value you expect the bitmap mostly to be full
  * of; it won't bother printing lines that are all this value.
  * If 'todump' is null the migration bitmap is dumped.
  */
-void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
+void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
+                           unsigned long pages)
 {
 {
-    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
-    RAMState *rs = &ram_state;
     int64_t cur;
     int64_t linelen = 128;
     char linebuf[129];
 
     int64_t cur;
     int64_t linelen = 128;
     char linebuf[129];
 
-    if (!todump) {
-        todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    }
-
-    for (cur = 0; cur < ram_pages; cur += linelen) {
+    for (cur = 0; cur < pages; cur += linelen) {
         int64_t curb;
         bool found = false;
         /*
          * Last line; catch the case where the line length
          * is longer than remaining ram
          */
         int64_t curb;
         bool found = false;
         /*
          * Last line; catch the case where the line length
          * is longer than remaining ram
          */
-        if (cur + linelen > ram_pages) {
-            linelen = ram_pages - cur;
+        if (cur + linelen > pages) {
+            linelen = pages - cur;
         }
         for (curb = 0; curb < linelen; curb++) {
             bool thisbit = test_bit(cur + curb, todump);
         }
         for (curb = 0; curb < linelen; curb++) {
             bool thisbit = test_bit(cur + curb, todump);
@@ -1602,18 +1444,16 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
 
 void ram_postcopy_migrated_memory_release(MigrationState *ms)
 {
 
 void ram_postcopy_migrated_memory_release(MigrationState *ms)
 {
-    RAMState *rs = &ram_state;
     struct RAMBlock *block;
     struct RAMBlock *block;
-    unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
 
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-        unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
-        unsigned long run_start = find_next_zero_bit(bitmap, range, first);
+    RAMBLOCK_FOREACH(block) {
+        unsigned long *bitmap = block->bmap;
+        unsigned long range = block->used_length >> TARGET_PAGE_BITS;
+        unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
 
         while (run_start < range) {
             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
 
         while (run_start < range) {
             unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
-            ram_discard_range(NULL, block->idstr, run_start << TARGET_PAGE_BITS,
+            ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
                               (run_end - run_start) << TARGET_PAGE_BITS);
             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
         }
                               (run_end - run_start) << TARGET_PAGE_BITS);
             run_start = find_next_zero_bit(bitmap, range, run_end + 1);
         }
@@ -1636,16 +1476,13 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
  */
 static int postcopy_send_discard_bm_ram(MigrationState *ms,
                                         PostcopyDiscardState *pds,
  */
 static int postcopy_send_discard_bm_ram(MigrationState *ms,
                                         PostcopyDiscardState *pds,
-                                        unsigned long start,
-                                        unsigned long length)
+                                        RAMBlock *block)
 {
 {
-    RAMState *rs = &ram_state;
-    unsigned long end = start + length; /* one after the end */
+    unsigned long end = block->used_length >> TARGET_PAGE_BITS;
     unsigned long current;
     unsigned long current;
-    unsigned long *unsentmap;
+    unsigned long *unsentmap = block->unsentmap;
 
 
-    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-    for (current = start; current < end; ) {
+    for (current = 0; current < end; ) {
         unsigned long one = find_next_bit(unsentmap, end, current);
 
         if (one <= end) {
         unsigned long one = find_next_bit(unsentmap, end, current);
 
         if (one <= end) {
@@ -1687,19 +1524,16 @@ static int postcopy_each_ram_send_discard(MigrationState *ms)
     struct RAMBlock *block;
     int ret;
 
     struct RAMBlock *block;
     int ret;
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-        PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
-                                                               first,
-                                                               block->idstr);
+    RAMBLOCK_FOREACH(block) {
+        PostcopyDiscardState *pds =
+            postcopy_discard_send_init(ms, block->idstr);
 
         /*
          * Postcopy sends chunks of bitmap over the wire, but it
          * just needs indexes at this point, avoids it having
          * target page specific code.
          */
 
         /*
          * Postcopy sends chunks of bitmap over the wire, but it
          * just needs indexes at this point, avoids it having
          * target page specific code.
          */
-        ret = postcopy_send_discard_bm_ram(ms, pds, first,
-                                    block->used_length >> TARGET_PAGE_BITS);
+        ret = postcopy_send_discard_bm_ram(ms, pds, block);
         postcopy_discard_send_finish(ms, pds);
         if (ret) {
             return ret;
         postcopy_discard_send_finish(ms, pds);
         if (ret) {
             return ret;
@@ -1729,13 +1563,11 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
                                           RAMBlock *block,
                                           PostcopyDiscardState *pds)
 {
                                           RAMBlock *block,
                                           PostcopyDiscardState *pds)
 {
-    RAMState *rs = &ram_state;
-    unsigned long *bitmap;
-    unsigned long *unsentmap;
+    RAMState *rs = ram_state;
+    unsigned long *bitmap = block->bmap;
+    unsigned long *unsentmap = block->unsentmap;
     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
     unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
-    unsigned long first = block->offset >> TARGET_PAGE_BITS;
-    unsigned long len = block->used_length >> TARGET_PAGE_BITS;
-    unsigned long last = first + (len - 1);
+    unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
     unsigned long run_start;
 
     if (block->page_size == TARGET_PAGE_SIZE) {
     unsigned long run_start;
 
     if (block->page_size == TARGET_PAGE_SIZE) {
@@ -1743,18 +1575,15 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
         return;
     }
 
         return;
     }
 
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-
     if (unsent_pass) {
         /* Find a sent page */
     if (unsent_pass) {
         /* Find a sent page */
-        run_start = find_next_zero_bit(unsentmap, last + 1, first);
+        run_start = find_next_zero_bit(unsentmap, pages, 0);
     } else {
         /* Find a dirty page */
     } else {
         /* Find a dirty page */
-        run_start = find_next_bit(bitmap, last + 1, first);
+        run_start = find_next_bit(bitmap, pages, 0);
     }
 
     }
 
-    while (run_start <= last) {
+    while (run_start < pages) {
         bool do_fixup = false;
         unsigned long fixup_start_addr;
         unsigned long host_offset;
         bool do_fixup = false;
         unsigned long fixup_start_addr;
         unsigned long host_offset;
@@ -1774,9 +1603,9 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
             /* Find the end of this run */
             unsigned long run_end;
             if (unsent_pass) {
             /* Find the end of this run */
             unsigned long run_end;
             if (unsent_pass) {
-                run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
+                run_end = find_next_bit(unsentmap, pages, run_start + 1);
             } else {
             } else {
-                run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
+                run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
             }
             /*
              * If the end isn't at the start of a host page, then the
             }
             /*
              * If the end isn't at the start of a host page, then the
@@ -1833,11 +1662,10 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
 
         if (unsent_pass) {
             /* Find the next sent page for the next iteration */
 
         if (unsent_pass) {
             /* Find the next sent page for the next iteration */
-            run_start = find_next_zero_bit(unsentmap, last + 1,
-                                           run_start);
+            run_start = find_next_zero_bit(unsentmap, pages, run_start);
         } else {
             /* Find the next dirty page for the next iteration */
         } else {
             /* Find the next dirty page for the next iteration */
-            run_start = find_next_bit(bitmap, last + 1, run_start);
+            run_start = find_next_bit(bitmap, pages, run_start);
         }
     }
 }
         }
     }
 }
@@ -1854,34 +1682,22 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
  * Returns zero on success
  *
  * @ms: current migration state
  * Returns zero on success
  *
  * @ms: current migration state
+ * @block: block we want to work with
  */
  */
-static int postcopy_chunk_hostpages(MigrationState *ms)
+static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
 {
 {
-    RAMState *rs = &ram_state;
-    struct RAMBlock *block;
+    PostcopyDiscardState *pds =
+        postcopy_discard_send_init(ms, block->idstr);
 
 
-    /* Easiest way to make sure we don't resume in the middle of a host-page */
-    rs->last_seen_block = NULL;
-    rs->last_sent_block = NULL;
-    rs->last_offset     = 0;
-
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
-        unsigned long first = block->offset >> TARGET_PAGE_BITS;
-
-        PostcopyDiscardState *pds =
-                         postcopy_discard_send_init(ms, first, block->idstr);
-
-        /* First pass: Discard all partially sent host pages */
-        postcopy_chunk_hostpages_pass(ms, true, block, pds);
-        /*
-         * Second pass: Ensure that all partially dirty host pages are made
-         * fully dirty.
-         */
-        postcopy_chunk_hostpages_pass(ms, false, block, pds);
-
-        postcopy_discard_send_finish(ms, pds);
-    } /* ram_list loop */
+    /* First pass: Discard all partially sent host pages */
+    postcopy_chunk_hostpages_pass(ms, true, block, pds);
+    /*
+     * Second pass: Ensure that all partially dirty host pages are made
+     * fully dirty.
+     */
+    postcopy_chunk_hostpages_pass(ms, false, block, pds);
 
 
+    postcopy_discard_send_finish(ms, pds);
     return 0;
 }
 
     return 0;
 }
 
@@ -1902,45 +1718,50 @@ static int postcopy_chunk_hostpages(MigrationState *ms)
  */
 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
 {
  */
 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
 {
-    RAMState *rs = &ram_state;
+    RAMState *rs = ram_state;
+    RAMBlock *block;
     int ret;
     int ret;
-    unsigned long *bitmap, *unsentmap;
 
     rcu_read_lock();
 
     /* This should be our last sync, the src is now paused */
     migration_bitmap_sync(rs);
 
 
     rcu_read_lock();
 
     /* This should be our last sync, the src is now paused */
     migration_bitmap_sync(rs);
 
-    unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
-    if (!unsentmap) {
-        /* We don't have a safe way to resize the sentmap, so
-         * if the bitmap was resized it will be NULL at this
-         * point.
-         */
-        error_report("migration ram resized during precopy phase");
-        rcu_read_unlock();
-        return -EINVAL;
-    }
-
-    /* Deal with TPS != HPS and huge pages */
-    ret = postcopy_chunk_hostpages(ms);
-    if (ret) {
-        rcu_read_unlock();
-        return ret;
-    }
-
-    /*
-     * Update the unsentmap to be unsentmap = unsentmap | dirty
-     */
-    bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
-    bitmap_or(unsentmap, unsentmap, bitmap,
-               last_ram_offset() >> TARGET_PAGE_BITS);
+    /* Easiest way to make sure we don't resume in the middle of a host-page */
+    rs->last_seen_block = NULL;
+    rs->last_sent_block = NULL;
+    rs->last_page = 0;
 
 
+    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+        unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
+        unsigned long *bitmap = block->bmap;
+        unsigned long *unsentmap = block->unsentmap;
+
+        if (!unsentmap) {
+            /* We don't have a safe way to resize the sentmap, so
+             * if the bitmap was resized it will be NULL at this
+             * point.
+             */
+            error_report("migration ram resized during precopy phase");
+            rcu_read_unlock();
+            return -EINVAL;
+        }
+        /* Deal with TPS != HPS and huge pages */
+        ret = postcopy_chunk_hostpages(ms, block);
+        if (ret) {
+            rcu_read_unlock();
+            return ret;
+        }
 
 
-    trace_ram_postcopy_send_discard_bitmap();
+        /*
+         * Update the unsentmap to be unsentmap = unsentmap | dirty
+         */
+        bitmap_or(unsentmap, unsentmap, bitmap, pages);
 #ifdef DEBUG_POSTCOPY
 #ifdef DEBUG_POSTCOPY
-    ram_debug_dump_bitmap(unsentmap, true);
+        ram_debug_dump_bitmap(unsentmap, true, pages);
 #endif
 #endif
+    }
+    trace_ram_postcopy_send_discard_bitmap();
 
     ret = postcopy_each_ram_send_discard(ms);
     rcu_read_unlock();
 
     ret = postcopy_each_ram_send_discard(ms);
     rcu_read_unlock();
@@ -1953,15 +1774,12 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms)
  *
  * Returns zero on success
  *
  *
  * Returns zero on success
  *
- * @mis: current migration incoming state
  * @rbname: name of the RAMBlock of the request. NULL means the
  *          same that last one.
  * @start: RAMBlock starting page
  * @length: RAMBlock size
  */
  * @rbname: name of the RAMBlock of the request. NULL means the
  *          same that last one.
  * @start: RAMBlock starting page
  * @length: RAMBlock size
  */
-int ram_discard_range(MigrationIncomingState *mis,
-                      const char *rbname,
-                      uint64_t start, size_t length)
+int ram_discard_range(const char *rbname, uint64_t start, size_t length)
 {
     int ret = -1;
 
 {
     int ret = -1;
 
@@ -1983,24 +1801,25 @@ err:
     return ret;
 }
 
     return ret;
 }
 
-static int ram_state_init(RAMState *rs)
+static int ram_state_init(RAMState **rsp)
 {
 {
-    int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
+    *rsp = g_new0(RAMState, 1);
 
 
-    memset(rs, 0, sizeof(*rs));
-    qemu_mutex_init(&rs->bitmap_mutex);
-    qemu_mutex_init(&rs->src_page_req_mutex);
-    QSIMPLEQ_INIT(&rs->src_page_requests);
+    qemu_mutex_init(&(*rsp)->bitmap_mutex);
+    qemu_mutex_init(&(*rsp)->src_page_req_mutex);
+    QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
 
     if (migrate_use_xbzrle()) {
         XBZRLE_cache_lock();
 
     if (migrate_use_xbzrle()) {
         XBZRLE_cache_lock();
-        ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
+        XBZRLE.zero_target_page = g_malloc0(TARGET_PAGE_SIZE);
         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
                                   TARGET_PAGE_SIZE,
                                   TARGET_PAGE_SIZE);
         if (!XBZRLE.cache) {
             XBZRLE_cache_unlock();
             error_report("Error creating cache");
         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
                                   TARGET_PAGE_SIZE,
                                   TARGET_PAGE_SIZE);
         if (!XBZRLE.cache) {
             XBZRLE_cache_unlock();
             error_report("Error creating cache");
+            g_free(*rsp);
+            *rsp = NULL;
             return -1;
         }
         XBZRLE_cache_unlock();
             return -1;
         }
         XBZRLE_cache_unlock();
@@ -2009,6 +1828,8 @@ static int ram_state_init(RAMState *rs)
         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
         if (!XBZRLE.encoded_buf) {
             error_report("Error allocating encoded_buf");
         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
         if (!XBZRLE.encoded_buf) {
             error_report("Error allocating encoded_buf");
+            g_free(*rsp);
+            *rsp = NULL;
             return -1;
         }
 
             return -1;
         }
 
@@ -2017,6 +1838,8 @@ static int ram_state_init(RAMState *rs)
             error_report("Error allocating current_buf");
             g_free(XBZRLE.encoded_buf);
             XBZRLE.encoded_buf = NULL;
             error_report("Error allocating current_buf");
             g_free(XBZRLE.encoded_buf);
             XBZRLE.encoded_buf = NULL;
+            g_free(*rsp);
+            *rsp = NULL;
             return -1;
         }
     }
             return -1;
         }
     }
@@ -2026,18 +1849,21 @@ static int ram_state_init(RAMState *rs)
 
     qemu_mutex_lock_ramlist();
     rcu_read_lock();
 
     qemu_mutex_lock_ramlist();
     rcu_read_lock();
-    ram_state_reset(rs);
+    ram_state_reset(*rsp);
 
 
-    rs->ram_bitmap = g_new0(struct RAMBitmap, 1);
     /* Skip setting bitmap if there is no RAM */
     if (ram_bytes_total()) {
     /* Skip setting bitmap if there is no RAM */
     if (ram_bytes_total()) {
-        ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
-        rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
-        bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
+        RAMBlock *block;
+
+        QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+            unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
 
 
-        if (migrate_postcopy_ram()) {
-            rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
-            bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
+            block->bmap = bitmap_new(pages);
+            bitmap_set(block->bmap, 0, pages);
+            if (migrate_postcopy_ram()) {
+                block->unsentmap = bitmap_new(pages);
+                bitmap_set(block->unsentmap, 0, pages);
+            }
         }
     }
 
         }
     }
 
@@ -2045,10 +1871,10 @@ static int ram_state_init(RAMState *rs)
      * Count the total number of pages used by ram blocks not including any
      * gaps due to alignment or unplugs.
      */
      * Count the total number of pages used by ram blocks not including any
      * gaps due to alignment or unplugs.
      */
-    rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
+    (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
 
     memory_global_dirty_log_start();
 
     memory_global_dirty_log_start();
-    migration_bitmap_sync(rs);
+    migration_bitmap_sync(*rsp);
     qemu_mutex_unlock_ramlist();
     qemu_mutex_unlock_iothread();
     rcu_read_unlock();
     qemu_mutex_unlock_ramlist();
     qemu_mutex_unlock_iothread();
     rcu_read_unlock();
@@ -2073,22 +1899,22 @@ static int ram_state_init(RAMState *rs)
  */
 static int ram_save_setup(QEMUFile *f, void *opaque)
 {
  */
 static int ram_save_setup(QEMUFile *f, void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMState **rsp = opaque;
     RAMBlock *block;
 
     /* migration has already setup the bitmap, reuse it. */
     if (!migration_in_colo_state()) {
     RAMBlock *block;
 
     /* migration has already setup the bitmap, reuse it. */
     if (!migration_in_colo_state()) {
-        if (ram_state_init(rs) < 0) {
+        if (ram_state_init(rsp) != 0) {
             return -1;
             return -1;
-         }
+        }
     }
     }
-    rs->f = f;
+    (*rsp)->f = f;
 
     rcu_read_lock();
 
     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
 
 
     rcu_read_lock();
 
     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
 
-    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+    RAMBLOCK_FOREACH(block) {
         qemu_put_byte(f, strlen(block->idstr));
         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
         qemu_put_be64(f, block->used_length);
         qemu_put_byte(f, strlen(block->idstr));
         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
         qemu_put_be64(f, block->used_length);
@@ -2098,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     }
 
     rcu_read_unlock();
     }
 
     rcu_read_unlock();
+    compress_threads_save_setup();
 
     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
 
     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@@ -2117,7 +1944,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
  */
 static int ram_save_iterate(QEMUFile *f, void *opaque)
 {
  */
 static int ram_save_iterate(QEMUFile *f, void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMState **temp = opaque;
+    RAMState *rs = *temp;
     int ret;
     int i;
     int64_t t0;
     int ret;
     int i;
     int64_t t0;
@@ -2170,7 +1998,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
 
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
 
     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
-    rs->bytes_transferred += 8;
+    ram_counters.transferred += 8;
 
     ret = qemu_file_get_error(f);
     if (ret < 0) {
 
     ret = qemu_file_get_error(f);
     if (ret < 0) {
@@ -2192,11 +2020,12 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
  */
 static int ram_save_complete(QEMUFile *f, void *opaque)
 {
  */
 static int ram_save_complete(QEMUFile *f, void *opaque)
 {
-    RAMState *rs = opaque;
+    RAMState **temp = opaque;
+    RAMState *rs = *temp;
 
     rcu_read_lock();
 
 
     rcu_read_lock();
 
-    if (!migration_in_postcopy(migrate_get_current())) {
+    if (!migration_in_postcopy()) {
         migration_bitmap_sync(rs);
     }
 
         migration_bitmap_sync(rs);
     }
 
@@ -2229,12 +2058,13 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
                              uint64_t *non_postcopiable_pending,
                              uint64_t *postcopiable_pending)
 {
                              uint64_t *non_postcopiable_pending,
                              uint64_t *postcopiable_pending)
 {
-    RAMState *rs = opaque;
+    RAMState **temp = opaque;
+    RAMState *rs = *temp;
     uint64_t remaining_size;
 
     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
 
     uint64_t remaining_size;
 
     remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
 
-    if (!migration_in_postcopy(migrate_get_current()) &&
+    if (!migration_in_postcopy() &&
         remaining_size < max_size) {
         qemu_mutex_lock_iothread();
         rcu_read_lock();
         remaining_size < max_size) {
         qemu_mutex_lock_iothread();
         rcu_read_lock();
@@ -2254,11 +2084,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
     int xh_flags;
     uint8_t *loaded_data;
 
     int xh_flags;
     uint8_t *loaded_data;
 
-    if (!xbzrle_decoded_buf) {
-        xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
-    }
-    loaded_data = xbzrle_decoded_buf;
-
     /* extract RLE header */
     xh_flags = qemu_get_byte(f);
     xh_len = qemu_get_be16(f);
     /* extract RLE header */
     xh_flags = qemu_get_byte(f);
     xh_len = qemu_get_be16(f);
@@ -2272,7 +2097,9 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
         error_report("Failed to load XBZRLE page - len overflow!");
         return -1;
     }
         error_report("Failed to load XBZRLE page - len overflow!");
         return -1;
     }
+    loaded_data = XBZRLE.decoded_buf;
     /* load data and decode */
     /* load data and decode */
+    /* it can change loaded_data to point to an internal buffer */
     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
 
     /* decode RLE */
     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
 
     /* decode RLE */
@@ -2406,10 +2233,13 @@ static void wait_for_decompress_done(void)
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
-void migrate_decompress_threads_create(void)
+static void compress_threads_load_setup(void)
 {
     int i, thread_count;
 
 {
     int i, thread_count;
 
+    if (!migrate_use_compression()) {
+        return;
+    }
     thread_count = migrate_decompress_threads();
     decompress_threads = g_new0(QemuThread, thread_count);
     decomp_param = g_new0(DecompressParam, thread_count);
     thread_count = migrate_decompress_threads();
     decompress_threads = g_new0(QemuThread, thread_count);
     decomp_param = g_new0(DecompressParam, thread_count);
@@ -2427,10 +2257,13 @@ void migrate_decompress_threads_create(void)
     }
 }
 
     }
 }
 
-void migrate_decompress_threads_join(void)
+static void compress_threads_load_cleanup(void)
 {
     int i, thread_count;
 
 {
     int i, thread_count;
 
+    if (!migrate_use_compression()) {
+        return;
+    }
     thread_count = migrate_decompress_threads();
     for (i = 0; i < thread_count; i++) {
         qemu_mutex_lock(&decomp_param[i].mutex);
     thread_count = migrate_decompress_threads();
     for (i = 0; i < thread_count; i++) {
         qemu_mutex_lock(&decomp_param[i].mutex);
@@ -2479,6 +2312,28 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
     qemu_mutex_unlock(&decomp_done_lock);
 }
 
+/**
+ * ram_load_setup: Setup RAM for migration incoming side
+ *
+ * Returns zero to indicate success and negative for error
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: RAMState pointer
+ */
+static int ram_load_setup(QEMUFile *f, void *opaque)
+{
+    xbzrle_load_setup();
+    compress_threads_load_setup();
+    return 0;
+}
+
+static int ram_load_cleanup(void *opaque)
+{
+    xbzrle_load_cleanup();
+    compress_threads_load_cleanup();
+    return 0;
+}
+
 /**
  * ram_postcopy_incoming_init: allocate postcopy data structures
  *
 /**
  * ram_postcopy_incoming_init: allocate postcopy data structures
  *
@@ -2492,7 +2347,7 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
  */
 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
 {
  */
 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
 {
-    size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
+    unsigned long ram_pages = last_ram_page();
 
     return postcopy_ram_incoming_init(mis, ram_pages);
 }
 
     return postcopy_ram_incoming_init(mis, ram_pages);
 }
@@ -2532,7 +2387,7 @@ static int ram_load_postcopy(QEMUFile *f)
 
         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
         place_needed = false;
 
         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
         place_needed = false;
-        if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
+        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
             block = ram_block_from_stream(f, flags);
 
             host = host_from_ram_block_offset(block, addr);
             block = ram_block_from_stream(f, flags);
 
             host = host_from_ram_block_offset(block, addr);
@@ -2579,7 +2434,7 @@ static int ram_load_postcopy(QEMUFile *f)
         last_host = host;
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
         last_host = host;
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
-        case RAM_SAVE_FLAG_COMPRESS:
+        case RAM_SAVE_FLAG_ZERO:
             ch = qemu_get_byte(f);
             memset(page_buffer, ch, TARGET_PAGE_SIZE);
             if (ch) {
             ch = qemu_get_byte(f);
             memset(page_buffer, ch, TARGET_PAGE_SIZE);
             if (ch) {
@@ -2631,7 +2486,7 @@ static int ram_load_postcopy(QEMUFile *f)
 
 static int ram_load(QEMUFile *f, void *opaque, int version_id)
 {
 
 static int ram_load(QEMUFile *f, void *opaque, int version_id)
 {
-    int flags = 0, ret = 0;
+    int flags = 0, ret = 0, invalid_flags = 0;
     static uint64_t seq_iter;
     int len = 0;
     /*
     static uint64_t seq_iter;
     int len = 0;
     /*
@@ -2648,6 +2503,9 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
         ret = -EINVAL;
     }
 
         ret = -EINVAL;
     }
 
+    if (!migrate_use_compression()) {
+        invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
+    }
     /* This RCU critical section can be very long running.
      * When RCU reclaims in the code start to become numerous,
      * it will be necessary to reduce the granularity of this
     /* This RCU critical section can be very long running.
      * When RCU reclaims in the code start to become numerous,
      * it will be necessary to reduce the granularity of this
@@ -2668,7 +2526,16 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
         flags = addr & ~TARGET_PAGE_MASK;
         addr &= TARGET_PAGE_MASK;
 
         flags = addr & ~TARGET_PAGE_MASK;
         addr &= TARGET_PAGE_MASK;
 
-        if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
+        if (flags & invalid_flags) {
+            if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
+                error_report("Received an unexpected compressed page");
+            }
+
+            ret = -EINVAL;
+            break;
+        }
+
+        if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
             RAMBlock *block = ram_block_from_stream(f, flags);
 
                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
             RAMBlock *block = ram_block_from_stream(f, flags);
 
@@ -2678,6 +2545,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
                 ret = -EINVAL;
                 break;
             }
                 ret = -EINVAL;
                 break;
             }
+            trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
         }
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
         }
 
         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
@@ -2729,7 +2597,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
             }
             break;
 
             }
             break;
 
-        case RAM_SAVE_FLAG_COMPRESS:
+        case RAM_SAVE_FLAG_ZERO:
             ch = qemu_get_byte(f);
             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
             break;
             ch = qemu_get_byte(f);
             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
             break;
@@ -2780,13 +2648,15 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
 }
 
 static SaveVMHandlers savevm_ram_handlers = {
 }
 
 static SaveVMHandlers savevm_ram_handlers = {
-    .save_live_setup = ram_save_setup,
+    .save_setup = ram_save_setup,
     .save_live_iterate = ram_save_iterate,
     .save_live_complete_postcopy = ram_save_complete,
     .save_live_complete_precopy = ram_save_complete,
     .save_live_pending = ram_save_pending,
     .load_state = ram_load,
     .save_live_iterate = ram_save_iterate,
     .save_live_complete_postcopy = ram_save_complete,
     .save_live_complete_precopy = ram_save_complete,
     .save_live_pending = ram_save_pending,
     .load_state = ram_load,
-    .cleanup = ram_migration_cleanup,
+    .save_cleanup = ram_save_cleanup,
+    .load_setup = ram_load_setup,
+    .load_cleanup = ram_load_cleanup,
 };
 
 void ram_mig_init(void)
 };
 
 void ram_mig_init(void)
This page took 0.091451 seconds and 4 git commands to generate.