#include "migration/block.h"
#include "migration/migration.h"
#include "sysemu/blockdev.h"
+#include "sysemu/block-backend.h"
#include <assert.h>
#define BLOCK_SIZE (1 << 20)
blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
nr_sectors, blk_mig_read_cb, blk);
- bdrv_reset_dirty(bs, cur_sector, nr_sectors);
+ bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
qemu_mutex_unlock_iothread();
bmds->cur_sector = cur_sector + nr_sectors;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
- NULL);
+ NULL, NULL);
if (!bmds->dirty_bitmap) {
ret = -errno;
goto fail;
blk_mig_lock();
if (bmds_aio_inflight(bmds, sector)) {
blk_mig_unlock();
- bdrv_drain_all();
+ bdrv_drain(bmds->bs);
} else {
blk_mig_unlock();
}
g_free(blk);
}
- bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
+ bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
break;
}
sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
int64_t dirty = 0;
QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
- dirty += bdrv_get_dirty_count(bmds->bs, bmds->dirty_bitmap);
+ dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
}
return dirty << BDRV_SECTOR_BITS;
block_mig_state.read_done * BLOCK_SIZE;
/* Report at least one block pending during bulk phase */
- if (pending == 0 && !block_mig_state.bulk_completed) {
- pending = BLOCK_SIZE;
+ if (pending <= max_size && !block_mig_state.bulk_completed) {
+ pending = max_size + BLOCK_SIZE;
}
blk_mig_unlock();
qemu_mutex_unlock_iothread();
char device_name[256];
int64_t addr;
BlockDriverState *bs, *bs_prev = NULL;
+ BlockBackend *blk;
uint8_t *buf;
int64_t total_sectors = 0;
int nr_sectors;
qemu_get_buffer(f, (uint8_t *)device_name, len);
device_name[len] = '\0';
- bs = bdrv_find(device_name);
- if (!bs) {
+ blk = blk_by_name(device_name);
+ if (!blk) {
fprintf(stderr, "Error unknown block device %s\n",
device_name);
return -EINVAL;
}
+ bs = blk_bs(blk);
if (bs != bs_prev) {
bs_prev = bs;