* THE SOFTWARE.
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
#include "cpu.h"
#include <zlib.h>
#include "qapi-event.h"
#include "qemu/cutils.h"
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
-#include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "xbzrle.h"
-#include "migration/migration.h"
-#include "migration/qemu-file.h"
+#include "ram.h"
+#include "migration.h"
+#include "migration/register.h"
+#include "migration/misc.h"
+#include "qemu-file.h"
#include "postcopy-ram.h"
-#include "exec/address-spaces.h"
#include "migration/page_cache.h"
#include "qemu/error-report.h"
#include "trace.h"
/* 0x80 is reserved in migration.h start with 0x100 next */
#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
-static uint8_t *ZERO_TARGET_PAGE;
-
static inline bool is_zero_range(uint8_t *p, uint64_t size)
{
return buffer_is_zero(p, size);
}
+XBZRLECacheStats xbzrle_counters;
+
/* struct contains XBZRLE cache and a static page
used by the compression */
static struct {
/* Cache for XBZRLE, Protected by lock. */
PageCache *cache;
QemuMutex lock;
+ /* it will store a page full of zeros */
+ uint8_t *zero_target_page;
+ /* buffer used for XBZRLE decoding */
+ uint8_t *decoded_buf;
} XBZRLE;
-/* buffer used for XBZRLE decoding */
-static uint8_t *xbzrle_decoded_buf;
-
static void XBZRLE_cache_lock(void)
{
if (migrate_use_xbzrle())
bool ram_bulk_stage;
/* How many times we have dirty too many pages */
int dirty_rate_high_cnt;
- /* How many times we have synchronized the bitmap */
- uint64_t bitmap_sync_count;
/* these variables are used for bitmap sync */
/* last time we did a full bitmap_sync */
int64_t time_last_bitmap_sync;
uint64_t xbzrle_cache_miss_prev;
/* number of iterations at the beginning of period */
uint64_t iterations_prev;
- /* Accounting fields */
- /* number of zero pages. It used to be pages filled by the same char. */
- uint64_t zero_pages;
- /* number of normal transferred pages */
- uint64_t norm_pages;
/* Iterations since start */
uint64_t iterations;
- /* xbzrle transmitted bytes. Notice that this is with
- * compression, they can't be calculated from the pages */
- uint64_t xbzrle_bytes;
- /* xbzrle transmmited pages */
- uint64_t xbzrle_pages;
- /* xbzrle number of cache miss */
- uint64_t xbzrle_cache_miss;
- /* xbzrle miss rate */
- double xbzrle_cache_miss_rate;
- /* xbzrle number of overflows */
- uint64_t xbzrle_overflows;
- /* number of dirty bits in the bitmap */
- uint64_t migration_dirty_pages;
- /* total number of bytes transferred */
- uint64_t bytes_transferred;
- /* number of dirtied pages in the last second */
- uint64_t dirty_pages_rate;
- /* Count of requests incoming from destination */
- uint64_t postcopy_requests;
/* protects modification of the bitmap */
+ uint64_t migration_dirty_pages;
+ /* number of dirty bits in the bitmap */
QemuMutex bitmap_mutex;
/* The RAMBlock used in the last src_page_requests */
RAMBlock *last_req_rb;
};
typedef struct RAMState RAMState;
-static RAMState ram_state;
-
-uint64_t dup_mig_pages_transferred(void)
-{
- return ram_state.zero_pages;
-}
-
-uint64_t norm_mig_pages_transferred(void)
-{
- return ram_state.norm_pages;
-}
-
-uint64_t xbzrle_mig_bytes_transferred(void)
-{
- return ram_state.xbzrle_bytes;
-}
-
-uint64_t xbzrle_mig_pages_transferred(void)
-{
- return ram_state.xbzrle_pages;
-}
-
-uint64_t xbzrle_mig_pages_cache_miss(void)
-{
- return ram_state.xbzrle_cache_miss;
-}
-
-double xbzrle_mig_cache_miss_rate(void)
-{
- return ram_state.xbzrle_cache_miss_rate;
-}
-
-uint64_t xbzrle_mig_pages_overflow(void)
-{
- return ram_state.xbzrle_overflows;
-}
-
-uint64_t ram_bytes_transferred(void)
-{
- return ram_state.bytes_transferred;
-}
+static RAMState *ram_state;
uint64_t ram_bytes_remaining(void)
{
- return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
+ return ram_state->migration_dirty_pages * TARGET_PAGE_SIZE;
}
-uint64_t ram_dirty_sync_count(void)
-{
- return ram_state.bitmap_sync_count;
-}
-
-uint64_t ram_dirty_pages_rate(void)
-{
- return ram_state.dirty_pages_rate;
-}
-
-uint64_t ram_postcopy_requests(void)
-{
- return ram_state.postcopy_requests;
-}
+MigrationStats ram_counters;
/* used by the search for pages to send */
struct PageSearchStatus {
}
}
-void migrate_compress_threads_join(void)
+static void compress_threads_save_cleanup(void)
{
int i, thread_count;
comp_param = NULL;
}
-void migrate_compress_threads_create(void)
+static void compress_threads_save_setup(void)
{
int i, thread_count;
/* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */
- cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
- rs->bitmap_sync_count);
+ cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
+ ram_counters.dirty_sync_count);
}
#define ENCODING_FLAG_XBZRLE 0x1
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
- if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
- rs->xbzrle_cache_miss++;
+ if (!cache_is_cached(XBZRLE.cache, current_addr,
+ ram_counters.dirty_sync_count)) {
+ xbzrle_counters.cache_miss++;
if (!last_stage) {
if (cache_insert(XBZRLE.cache, current_addr, *current_data,
- rs->bitmap_sync_count) == -1) {
+ ram_counters.dirty_sync_count) == -1) {
return -1;
} else {
/* update *current_data when the page has been
return 0;
} else if (encoded_len == -1) {
trace_save_xbzrle_page_overflow();
- rs->xbzrle_overflows++;
+ xbzrle_counters.overflow++;
/* update data in the cache */
if (!last_stage) {
memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
qemu_put_be16(rs->f, encoded_len);
qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
bytes_xbzrle += encoded_len + 1 + 2;
- rs->xbzrle_pages++;
- rs->xbzrle_bytes += bytes_xbzrle;
- rs->bytes_transferred += bytes_xbzrle;
+ xbzrle_counters.pages++;
+ xbzrle_counters.bytes += bytes_xbzrle;
+ ram_counters.transferred += bytes_xbzrle;
return 1;
}
int64_t end_time;
uint64_t bytes_xfer_now;
- rs->bitmap_sync_count++;
-
- if (!rs->bytes_xfer_prev) {
- rs->bytes_xfer_prev = ram_bytes_transferred();
- }
+ ram_counters.dirty_sync_count++;
if (!rs->time_last_bitmap_sync) {
rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* more than 1 second = 1000 millisecons */
if (end_time > rs->time_last_bitmap_sync + 1000) {
+ /* calculate period counters */
+ ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
+ / (end_time - rs->time_last_bitmap_sync);
+ bytes_xfer_now = ram_counters.transferred;
+
if (migrate_auto_converge()) {
/* The following detection logic can be refined later. For now:
Check to see if the dirtied bytes is 50% more than the approx.
amount of bytes that just got transferred since the last time we
were in this routine. If that happens twice, start or increase
throttling */
- bytes_xfer_now = ram_bytes_transferred();
- if (rs->dirty_pages_rate &&
- (rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
+ if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
(bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
- (rs->dirty_rate_high_cnt++ >= 2)) {
+ (++rs->dirty_rate_high_cnt >= 2)) {
trace_migration_throttle();
rs->dirty_rate_high_cnt = 0;
mig_throttle_guest_down();
- }
- rs->bytes_xfer_prev = bytes_xfer_now;
+ }
}
if (migrate_use_xbzrle()) {
if (rs->iterations_prev != rs->iterations) {
- rs->xbzrle_cache_miss_rate =
- (double)(rs->xbzrle_cache_miss -
+ xbzrle_counters.cache_miss_rate =
+ (double)(xbzrle_counters.cache_miss -
rs->xbzrle_cache_miss_prev) /
(rs->iterations - rs->iterations_prev);
}
rs->iterations_prev = rs->iterations;
- rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
+ rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
}
- rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
- / (end_time - rs->time_last_bitmap_sync);
+
+ /* reset period counters */
rs->time_last_bitmap_sync = end_time;
rs->num_dirty_pages_period = 0;
+ rs->bytes_xfer_prev = bytes_xfer_now;
}
if (migrate_use_events()) {
- qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
+ qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
}
}
int pages = -1;
if (is_zero_range(p, TARGET_PAGE_SIZE)) {
- rs->zero_pages++;
- rs->bytes_transferred +=
+ ram_counters.duplicate++;
+ ram_counters.transferred +=
save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(rs->f, 0);
- rs->bytes_transferred += 1;
+ ram_counters.transferred += 1;
pages = 1;
}
ret = ram_control_save_page(rs->f, block->offset,
offset, TARGET_PAGE_SIZE, &bytes_xmit);
if (bytes_xmit) {
- rs->bytes_transferred += bytes_xmit;
+ ram_counters.transferred += bytes_xmit;
pages = 1;
}
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
if (ret != RAM_SAVE_CONTROL_DELAYED) {
if (bytes_xmit > 0) {
- rs->norm_pages++;
+ ram_counters.normal++;
} else if (bytes_xmit == 0) {
- rs->zero_pages++;
+ ram_counters.duplicate++;
}
}
} else {
/* XBZRLE overflow or normal page */
if (pages == -1) {
- rs->bytes_transferred += save_page_header(rs, rs->f, block,
- offset | RAM_SAVE_FLAG_PAGE);
+ ram_counters.transferred +=
+ save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_PAGE);
if (send_async) {
qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
migrate_release_ram() &
} else {
qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
}
- rs->bytes_transferred += TARGET_PAGE_SIZE;
+ ram_counters.transferred += TARGET_PAGE_SIZE;
pages = 1;
- rs->norm_pages++;
+ ram_counters.normal++;
}
XBZRLE_cache_unlock();
static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
ram_addr_t offset)
{
- RAMState *rs = &ram_state;
+ RAMState *rs = ram_state;
int bytes_sent, blen;
uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
qemu_mutex_lock(&comp_param[idx].mutex);
if (!comp_param[idx].quit) {
len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
- rs->bytes_transferred += len;
+ ram_counters.transferred += len;
}
qemu_mutex_unlock(&comp_param[idx].mutex);
}
qemu_cond_signal(&comp_param[idx].cond);
qemu_mutex_unlock(&comp_param[idx].mutex);
pages = 1;
- rs->norm_pages++;
- rs->bytes_transferred += bytes_xmit;
+ ram_counters.normal++;
+ ram_counters.transferred += bytes_xmit;
break;
}
}
ret = ram_control_save_page(rs->f, block->offset,
offset, TARGET_PAGE_SIZE, &bytes_xmit);
if (bytes_xmit) {
- rs->bytes_transferred += bytes_xmit;
+ ram_counters.transferred += bytes_xmit;
pages = 1;
}
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
if (ret != RAM_SAVE_CONTROL_DELAYED) {
if (bytes_xmit > 0) {
- rs->norm_pages++;
+ ram_counters.normal++;
} else if (bytes_xmit == 0) {
- rs->zero_pages++;
+ ram_counters.duplicate++;
}
}
} else {
blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
migrate_compress_level());
if (blen > 0) {
- rs->bytes_transferred += bytes_xmit + blen;
- rs->norm_pages++;
+ ram_counters.transferred += bytes_xmit + blen;
+ ram_counters.normal++;
pages = 1;
} else {
qemu_file_set_error(rs->f, blen);
* be some left. in case that there is any page left, we drop it.
*
*/
-void migration_page_queue_free(void)
+static void migration_page_queue_free(RAMState *rs)
{
struct RAMSrcPageRequest *mspr, *next_mspr;
- RAMState *rs = &ram_state;
/* This queue generally should be empty - but in the case of a failed
* migration might have some droppings in.
*/
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
{
RAMBlock *ramblock;
- RAMState *rs = &ram_state;
+ RAMState *rs = ram_state;
- rs->postcopy_requests++;
+ ram_counters.postcopy_requests++;
rcu_read_lock();
if (!rbname) {
/* Reuse last RAMBlock */
void acct_update_position(QEMUFile *f, size_t size, bool zero)
{
uint64_t pages = size / TARGET_PAGE_SIZE;
- RAMState *rs = &ram_state;
if (zero) {
- rs->zero_pages += pages;
+ ram_counters.duplicate += pages;
} else {
- rs->norm_pages += pages;
- rs->bytes_transferred += size;
+ ram_counters.normal += pages;
+ ram_counters.transferred += size;
qemu_update_position(f, size);
}
}
return total;
}
-void free_xbzrle_decoded_buf(void)
+static void xbzrle_load_setup(void)
{
- g_free(xbzrle_decoded_buf);
- xbzrle_decoded_buf = NULL;
+ XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
-static void ram_migration_cleanup(void *opaque)
+static void xbzrle_load_cleanup(void)
{
+ g_free(XBZRLE.decoded_buf);
+ XBZRLE.decoded_buf = NULL;
+}
+
+static void ram_save_cleanup(void *opaque)
+{
+ RAMState **rsp = opaque;
RAMBlock *block;
/* caller have hold iothread lock or is in a bh, so there is
cache_fini(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
- g_free(ZERO_TARGET_PAGE);
+ g_free(XBZRLE.zero_target_page);
XBZRLE.cache = NULL;
XBZRLE.encoded_buf = NULL;
XBZRLE.current_buf = NULL;
+ XBZRLE.zero_target_page = NULL;
}
XBZRLE_cache_unlock();
+ migration_page_queue_free(*rsp);
+ compress_threads_save_cleanup();
+ g_free(*rsp);
+ *rsp = NULL;
}
static void ram_state_reset(RAMState *rs)
RAMBlock *block,
PostcopyDiscardState *pds)
{
- RAMState *rs = &ram_state;
+ RAMState *rs = ram_state;
unsigned long *bitmap = block->bmap;
unsigned long *unsentmap = block->unsentmap;
unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
*/
int ram_postcopy_send_discard_bitmap(MigrationState *ms)
{
- RAMState *rs = &ram_state;
+ RAMState *rs = ram_state;
RAMBlock *block;
int ret;
return ret;
}
-static int ram_state_init(RAMState *rs)
+static int ram_state_init(RAMState **rsp)
{
- memset(rs, 0, sizeof(*rs));
- qemu_mutex_init(&rs->bitmap_mutex);
- qemu_mutex_init(&rs->src_page_req_mutex);
- QSIMPLEQ_INIT(&rs->src_page_requests);
+ *rsp = g_new0(RAMState, 1);
+
+ qemu_mutex_init(&(*rsp)->bitmap_mutex);
+ qemu_mutex_init(&(*rsp)->src_page_req_mutex);
+ QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
if (migrate_use_xbzrle()) {
XBZRLE_cache_lock();
- ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
+ XBZRLE.zero_target_page = g_malloc0(TARGET_PAGE_SIZE);
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
if (!XBZRLE.cache) {
XBZRLE_cache_unlock();
error_report("Error creating cache");
+ g_free(*rsp);
+ *rsp = NULL;
return -1;
}
XBZRLE_cache_unlock();
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.encoded_buf) {
error_report("Error allocating encoded_buf");
+ g_free(*rsp);
+ *rsp = NULL;
return -1;
}
error_report("Error allocating current_buf");
g_free(XBZRLE.encoded_buf);
XBZRLE.encoded_buf = NULL;
+ g_free(*rsp);
+ *rsp = NULL;
return -1;
}
}
qemu_mutex_lock_ramlist();
rcu_read_lock();
- ram_state_reset(rs);
+ ram_state_reset(*rsp);
/* Skip setting bitmap if there is no RAM */
if (ram_bytes_total()) {
* Count the total number of pages used by ram blocks not including any
* gaps due to alignment or unplugs.
*/
- rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
+ (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
memory_global_dirty_log_start();
- migration_bitmap_sync(rs);
+ migration_bitmap_sync(*rsp);
qemu_mutex_unlock_ramlist();
qemu_mutex_unlock_iothread();
rcu_read_unlock();
*/
static int ram_save_setup(QEMUFile *f, void *opaque)
{
- RAMState *rs = opaque;
+ RAMState **rsp = opaque;
RAMBlock *block;
/* migration has already setup the bitmap, reuse it. */
if (!migration_in_colo_state()) {
- if (ram_state_init(rs) < 0) {
+ if (ram_state_init(rsp) != 0) {
return -1;
- }
+ }
}
- rs->f = f;
+ (*rsp)->f = f;
rcu_read_lock();
}
rcu_read_unlock();
+ compress_threads_save_setup();
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
*/
static int ram_save_iterate(QEMUFile *f, void *opaque)
{
- RAMState *rs = opaque;
+ RAMState **temp = opaque;
+ RAMState *rs = *temp;
int ret;
int i;
int64_t t0;
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- rs->bytes_transferred += 8;
+ ram_counters.transferred += 8;
ret = qemu_file_get_error(f);
if (ret < 0) {
*/
static int ram_save_complete(QEMUFile *f, void *opaque)
{
- RAMState *rs = opaque;
+ RAMState **temp = opaque;
+ RAMState *rs = *temp;
rcu_read_lock();
uint64_t *non_postcopiable_pending,
uint64_t *postcopiable_pending)
{
- RAMState *rs = opaque;
+ RAMState **temp = opaque;
+ RAMState *rs = *temp;
uint64_t remaining_size;
remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
int xh_flags;
uint8_t *loaded_data;
- if (!xbzrle_decoded_buf) {
- xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
- }
- loaded_data = xbzrle_decoded_buf;
-
/* extract RLE header */
xh_flags = qemu_get_byte(f);
xh_len = qemu_get_be16(f);
error_report("Failed to load XBZRLE page - len overflow!");
return -1;
}
+ loaded_data = XBZRLE.decoded_buf;
/* load data and decode */
+ /* it can change loaded_data to point to an internal buffer */
qemu_get_buffer_in_place(f, &loaded_data, xh_len);
/* decode RLE */
qemu_mutex_unlock(&decomp_done_lock);
}
-void migrate_decompress_threads_create(void)
+static void compress_threads_load_setup(void)
{
int i, thread_count;
+ if (!migrate_use_compression()) {
+ return;
+ }
thread_count = migrate_decompress_threads();
decompress_threads = g_new0(QemuThread, thread_count);
decomp_param = g_new0(DecompressParam, thread_count);
}
}
-void migrate_decompress_threads_join(void)
+static void compress_threads_load_cleanup(void)
{
int i, thread_count;
+ if (!migrate_use_compression()) {
+ return;
+ }
thread_count = migrate_decompress_threads();
for (i = 0; i < thread_count; i++) {
qemu_mutex_lock(&decomp_param[i].mutex);
qemu_mutex_unlock(&decomp_done_lock);
}
+/**
+ * ram_load_setup: Setup RAM for migration incoming side
+ *
+ * Returns zero to indicate success and negative for error
+ *
+ * @f: QEMUFile where to receive the data
+ * @opaque: RAMState pointer
+ */
+static int ram_load_setup(QEMUFile *f, void *opaque)
+{
+ xbzrle_load_setup();
+ compress_threads_load_setup();
+ return 0;
+}
+
+static int ram_load_cleanup(void *opaque)
+{
+ xbzrle_load_cleanup();
+ compress_threads_load_cleanup();
+ return 0;
+}
+
/**
* ram_postcopy_incoming_init: allocate postcopy data structures
*
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
- int flags = 0, ret = 0;
+ int flags = 0, ret = 0, invalid_flags = 0;
static uint64_t seq_iter;
int len = 0;
/*
ret = -EINVAL;
}
+ if (!migrate_use_compression()) {
+ invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
+ }
/* This RCU critical section can be very long running.
* When RCU reclaims in the code start to become numerous,
* it will be necessary to reduce the granularity of this
flags = addr & ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
+ if (flags & invalid_flags) {
+ if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
+ error_report("Received an unexpected compressed page");
+ }
+
+ ret = -EINVAL;
+ break;
+ }
+
if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
RAMBlock *block = ram_block_from_stream(f, flags);
}
static SaveVMHandlers savevm_ram_handlers = {
- .save_live_setup = ram_save_setup,
+ .save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete_postcopy = ram_save_complete,
.save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending,
.load_state = ram_load,
- .cleanup = ram_migration_cleanup,
+ .save_cleanup = ram_save_cleanup,
+ .load_setup = ram_load_setup,
+ .load_cleanup = ram_load_cleanup,
};
void ram_mig_init(void)