2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "block_int.h"
17 #include "qemu-queue.h"
19 #include "block-migration.h"
22 #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
24 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
25 #define BLK_MIG_FLAG_EOS 0x02
26 #define BLK_MIG_FLAG_PROGRESS 0x04
28 #define MAX_IS_ALLOCATED_SEARCH 65536
29 #define MAX_BLOCKS_READ 10000
30 #define BLOCKS_READ_CHANGE 100
31 #define INITIAL_BLOCKS_READ 100
33 //#define DEBUG_BLK_MIGRATION
35 #ifdef DEBUG_BLK_MIGRATION
36 #define dprintf(fmt, ...) \
37 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
39 #define dprintf(fmt, ...) \
43 typedef struct BlkMigDevState {
48 int64_t completed_sectors;
49 int64_t total_sectors;
51 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
54 typedef struct BlkMigBlock {
60 BlockDriverAIOCB *aiocb;
62 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
65 typedef struct BlkMigState {
68 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
69 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
73 int64_t total_sector_sum;
77 static BlkMigState block_mig_state;
79 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
83 /* sector number and flags */
84 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
85 | BLK_MIG_FLAG_DEVICE_BLOCK);
88 len = strlen(blk->bmds->bs->device_name);
89 qemu_put_byte(f, len);
90 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
92 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
95 int blk_mig_active(void)
97 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
100 uint64_t blk_mig_bytes_transferred(void)
102 BlkMigDevState *bmds;
105 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
106 sum += bmds->completed_sectors;
108 return sum << BDRV_SECTOR_BITS;
111 uint64_t blk_mig_bytes_remaining(void)
113 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
116 uint64_t blk_mig_bytes_total(void)
118 BlkMigDevState *bmds;
121 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
122 sum += bmds->total_sectors;
124 return sum << BDRV_SECTOR_BITS;
127 static void blk_mig_read_cb(void *opaque, int ret)
129 BlkMigBlock *blk = opaque;
133 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
135 block_mig_state.submitted--;
136 block_mig_state.read_done++;
137 assert(block_mig_state.submitted >= 0);
140 static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
141 BlkMigDevState *bmds, int is_async)
143 int64_t total_sectors = bmds->total_sectors;
144 int64_t cur_sector = bmds->cur_sector;
145 BlockDriverState *bs = bmds->bs;
149 if (bmds->shared_base) {
150 while (cur_sector < total_sectors &&
151 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
153 cur_sector += nr_sectors;
157 if (cur_sector >= total_sectors) {
158 bmds->cur_sector = bmds->completed_sectors = total_sectors;
162 bmds->completed_sectors = cur_sector;
164 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
166 /* we are going to transfer a full block even if it is not allocated */
167 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
169 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
170 nr_sectors = total_sectors - cur_sector;
173 blk = qemu_malloc(sizeof(BlkMigBlock));
174 blk->buf = qemu_malloc(BLOCK_SIZE);
176 blk->sector = cur_sector;
179 blk->iov.iov_base = blk->buf;
180 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
181 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
183 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
184 nr_sectors, blk_mig_read_cb, blk);
188 block_mig_state.submitted++;
190 if (bdrv_read(bs, cur_sector, blk->buf, nr_sectors) < 0) {
199 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
200 bmds->cur_sector = cur_sector + nr_sectors;
202 return (bmds->cur_sector >= total_sectors);
205 monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
206 qemu_file_set_error(f);
212 static void set_dirty_tracking(int enable)
214 BlkMigDevState *bmds;
216 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
217 bdrv_set_dirty_tracking(bmds->bs, enable);
221 static void init_blk_migration(Monitor *mon, QEMUFile *f)
223 BlkMigDevState *bmds;
224 BlockDriverState *bs;
226 block_mig_state.submitted = 0;
227 block_mig_state.read_done = 0;
228 block_mig_state.transferred = 0;
229 block_mig_state.total_sector_sum = 0;
230 block_mig_state.prev_progress = -1;
232 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
233 if (bs->type == BDRV_TYPE_HD) {
234 bmds = qemu_mallocz(sizeof(BlkMigDevState));
236 bmds->bulk_completed = 0;
237 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
238 bmds->completed_sectors = 0;
239 bmds->shared_base = block_mig_state.shared_base;
241 block_mig_state.total_sector_sum += bmds->total_sectors;
243 if (bmds->shared_base) {
244 monitor_printf(mon, "Start migration for %s with shared base "
248 monitor_printf(mon, "Start full migration for %s\n",
252 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
257 static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async)
259 int64_t completed_sector_sum = 0;
260 BlkMigDevState *bmds;
264 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
265 if (bmds->bulk_completed == 0) {
266 if (mig_save_device_bulk(mon, f, bmds, is_async) == 1) {
267 /* completed bulk section for this device */
268 bmds->bulk_completed = 1;
270 completed_sector_sum += bmds->completed_sectors;
274 completed_sector_sum += bmds->completed_sectors;
278 progress = completed_sector_sum * 100 / block_mig_state.total_sector_sum;
279 if (progress != block_mig_state.prev_progress) {
280 block_mig_state.prev_progress = progress;
281 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
282 | BLK_MIG_FLAG_PROGRESS);
283 monitor_printf(mon, "Completed %d %%\r", progress);
290 #define MAX_NUM_BLOCKS 4
292 static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f)
294 BlkMigDevState *bmds;
298 blk.buf = qemu_malloc(BLOCK_SIZE);
300 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
301 for (sector = 0; sector < bmds->cur_sector;) {
302 if (bdrv_get_dirty(bmds->bs, sector)) {
303 if (bdrv_read(bmds->bs, sector, blk.buf,
304 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
305 monitor_printf(mon, "Error reading sector %" PRId64 "\n",
307 qemu_file_set_error(f);
315 bdrv_reset_dirty(bmds->bs, sector,
316 BDRV_SECTORS_PER_DIRTY_CHUNK);
318 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
325 static void flush_blks(QEMUFile* f)
329 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
330 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
331 block_mig_state.transferred);
333 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
334 if (qemu_file_rate_limit(f)) {
338 qemu_file_set_error(f);
343 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
347 block_mig_state.read_done--;
348 block_mig_state.transferred++;
349 assert(block_mig_state.read_done >= 0);
352 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
353 block_mig_state.submitted, block_mig_state.read_done,
354 block_mig_state.transferred);
357 static int is_stage2_completed(void)
359 BlkMigDevState *bmds;
361 if (block_mig_state.submitted > 0) {
365 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
366 if (bmds->bulk_completed == 0) {
374 static void blk_mig_cleanup(Monitor *mon)
376 BlkMigDevState *bmds;
379 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
380 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
384 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
385 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
390 set_dirty_tracking(0);
392 monitor_printf(mon, "\n");
395 static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
397 dprintf("Enter save live stage %d submitted %d transferred %d\n",
398 stage, block_mig_state.submitted, block_mig_state.transferred);
401 blk_mig_cleanup(mon);
405 if (block_mig_state.blk_enable != 1) {
406 /* no need to migrate storage */
407 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
412 init_blk_migration(mon, f);
414 /* start track dirty blocks */
415 set_dirty_tracking(1);
420 if (qemu_file_has_error(f)) {
421 blk_mig_cleanup(mon);
425 /* control the rate of transfer */
426 while ((block_mig_state.submitted +
427 block_mig_state.read_done) * BLOCK_SIZE <
428 qemu_file_get_rate_limit(f)) {
429 if (blk_mig_save_bulked_block(mon, f, 1) == 0) {
430 /* no more bulk blocks for now */
437 if (qemu_file_has_error(f)) {
438 blk_mig_cleanup(mon);
443 while (blk_mig_save_bulked_block(mon, f, 0) != 0) {
447 blk_mig_save_dirty_blocks(mon, f);
448 blk_mig_cleanup(mon);
450 /* report completion */
451 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
453 if (qemu_file_has_error(f)) {
457 monitor_printf(mon, "Block migration completed\n");
460 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
462 return ((stage == 2) && is_stage2_completed());
465 static int block_load(QEMUFile *f, void *opaque, int version_id)
467 static int banner_printed;
469 char device_name[256];
471 BlockDriverState *bs;
475 addr = qemu_get_be64(f);
477 flags = addr & ~BDRV_SECTOR_MASK;
478 addr >>= BDRV_SECTOR_BITS;
480 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
481 /* get device name */
482 len = qemu_get_byte(f);
483 qemu_get_buffer(f, (uint8_t *)device_name, len);
484 device_name[len] = '\0';
486 bs = bdrv_find(device_name);
488 fprintf(stderr, "Error unknown block device %s\n",
493 buf = qemu_malloc(BLOCK_SIZE);
495 qemu_get_buffer(f, buf, BLOCK_SIZE);
496 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
499 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
500 if (!banner_printed) {
501 printf("Receiving block device images\n");
504 printf("Completed %d %%%c", (int)addr,
505 (addr == 100) ? '\n' : '\r');
507 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
508 fprintf(stderr, "Unknown flags\n");
511 if (qemu_file_has_error(f)) {
514 } while (!(flags & BLK_MIG_FLAG_EOS));
519 static void block_set_params(int blk_enable, int shared_base, void *opaque)
521 block_mig_state.blk_enable = blk_enable;
522 block_mig_state.shared_base = shared_base;
524 /* shared base means that blk_enable = 1 */
525 block_mig_state.blk_enable |= shared_base;
528 void blk_mig_init(void)
530 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
531 QSIMPLEQ_INIT(&block_mig_state.blk_list);
533 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
534 NULL, block_load, &block_mig_state);