]> Git Repo - qemu.git/blame - migration/block.c
migration: Export ram.c functions in its own file
[qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
c163b5ca 18#include "qemu-common.h"
bfb197e0
MA
19#include "block/block.h"
20#include "qemu/error-report.h"
21#include "qemu/main-loop.h"
c163b5ca 22#include "hw/hw.h"
f348b6d1 23#include "qemu/cutils.h"
1de7afc9
PB
24#include "qemu/queue.h"
25#include "qemu/timer.h"
caf71f86
PB
26#include "migration/block.h"
27#include "migration/migration.h"
9c17d615 28#include "sysemu/blockdev.h"
08a0aee1 29#include "qemu-file.h"
987772d9 30#include "migration/vmstate.h"
c9ebaf74 31#include "sysemu/block-backend.h"
c163b5ca 32
50717e94
PB
33#define BLOCK_SIZE (1 << 20)
34#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
35
36#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
37#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 38#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 39#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca
LS
40
41#define MAX_IS_ALLOCATED_SEARCH 65536
c163b5ca 42
f77dcdbc
WC
43#define MAX_INFLIGHT_IO 512
44
c163b5ca
LS
45//#define DEBUG_BLK_MIGRATION
46
47#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 48#define DPRINTF(fmt, ...) \
c163b5ca
LS
49 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
50#else
d0f2c4c6 51#define DPRINTF(fmt, ...) \
c163b5ca
LS
52 do { } while (0)
53#endif
54
a55eb92c 55typedef struct BlkMigDevState {
323920c4 56 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
57 BlockBackend *blk;
58 char *blk_name;
a55eb92c 59 int shared_base;
a55eb92c 60 int64_t total_sectors;
5e5328be 61 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 62 Error *blocker;
323920c4
PB
63
64 /* Only used by migration thread. Does not need a lock. */
65 int bulk_completed;
66 int64_t cur_sector;
67 int64_t cur_dirty;
68
ef0716df
PB
69 /* Data in the aio_bitmap is protected by block migration lock.
70 * Allocation and free happen during setup and cleanup respectively.
71 */
33656af7 72 unsigned long *aio_bitmap;
ef0716df
PB
73
74 /* Protected by block migration lock. */
323920c4 75 int64_t completed_sectors;
ef0716df
PB
76
77 /* During migration this is protected by iothread lock / AioContext.
78 * Allocation and free happen during setup and cleanup respectively.
79 */
e4654d2d 80 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
81} BlkMigDevState;
82
c163b5ca 83typedef struct BlkMigBlock {
323920c4 84 /* Only used by migration thread. */
c163b5ca
LS
85 uint8_t *buf;
86 BlkMigDevState *bmds;
87 int64_t sector;
33656af7 88 int nr_sectors;
c163b5ca
LS
89 struct iovec iov;
90 QEMUIOVector qiov;
7c84b1b8 91 BlockAIOCB *aiocb;
323920c4 92
52e850de 93 /* Protected by block migration lock. */
c163b5ca 94 int ret;
5e5328be 95 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
96} BlkMigBlock;
97
98typedef struct BlkMigState {
5e5328be 99 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 100 int64_t total_sector_sum;
323004a3 101 bool zero_blocks;
323920c4 102
52e850de 103 /* Protected by lock. */
5e5328be 104 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
105 int submitted;
106 int read_done;
323920c4
PB
107
108 /* Only used by migration thread. Does not need a lock. */
c163b5ca 109 int transferred;
01e61e2d 110 int prev_progress;
e970ec0b 111 int bulk_completed;
52e850de 112
ef0716df 113 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
52e850de 114 QemuMutex lock;
c163b5ca
LS
115} BlkMigState;
116
d11ecd3d 117static BlkMigState block_mig_state;
c163b5ca 118
52e850de
PB
119static void blk_mig_lock(void)
120{
121 qemu_mutex_lock(&block_mig_state.lock);
122}
123
124static void blk_mig_unlock(void)
125{
126 qemu_mutex_unlock(&block_mig_state.lock);
127}
128
32c835ba
PB
129/* Must run outside of the iothread lock during the bulk phase,
130 * or the VM will stall.
131 */
132
13f0b67f
JK
133static void blk_send(QEMUFile *f, BlkMigBlock * blk)
134{
135 int len;
323004a3
PL
136 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
137
138 if (block_mig_state.zero_blocks &&
139 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
140 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
141 }
13f0b67f
JK
142
143 /* sector number and flags */
144 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 145 | flags);
13f0b67f
JK
146
147 /* device name */
ebd2f9e7 148 len = strlen(blk->bmds->blk_name);
13f0b67f 149 qemu_put_byte(f, len);
ebd2f9e7 150 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 151
323004a3
PL
152 /* if a block is zero we need to flush here since the network
153 * bandwidth is now a lot higher than the storage device bandwidth.
154 * thus if we queue zero blocks we slow down the migration */
155 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
156 qemu_fflush(f);
157 return;
158 }
159
13f0b67f
JK
160 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
161}
162
25f23643
JK
163int blk_mig_active(void)
164{
165 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
166}
167
168uint64_t blk_mig_bytes_transferred(void)
169{
170 BlkMigDevState *bmds;
171 uint64_t sum = 0;
172
52e850de 173 blk_mig_lock();
25f23643
JK
174 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
175 sum += bmds->completed_sectors;
176 }
52e850de 177 blk_mig_unlock();
25f23643
JK
178 return sum << BDRV_SECTOR_BITS;
179}
180
181uint64_t blk_mig_bytes_remaining(void)
182{
183 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
184}
185
186uint64_t blk_mig_bytes_total(void)
187{
188 BlkMigDevState *bmds;
189 uint64_t sum = 0;
190
191 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
192 sum += bmds->total_sectors;
193 }
194 return sum << BDRV_SECTOR_BITS;
195}
196
52e850de
PB
197
198/* Called with migration lock held. */
199
33656af7
MT
200static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
201{
202 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
203
ebd2f9e7 204 if (sector < blk_nb_sectors(bmds->blk)) {
33656af7
MT
205 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
206 (1UL << (chunk % (sizeof(unsigned long) * 8))));
207 } else {
208 return 0;
209 }
210}
211
52e850de
PB
212/* Called with migration lock held. */
213
33656af7
MT
214static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
215 int nb_sectors, int set)
216{
217 int64_t start, end;
218 unsigned long val, idx, bit;
219
220 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
221 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
222
223 for (; start <= end; start++) {
224 idx = start / (sizeof(unsigned long) * 8);
225 bit = start % (sizeof(unsigned long) * 8);
226 val = bmds->aio_bitmap[idx];
227 if (set) {
62155e2b 228 val |= 1UL << bit;
33656af7 229 } else {
62155e2b 230 val &= ~(1UL << bit);
33656af7
MT
231 }
232 bmds->aio_bitmap[idx] = val;
233 }
234}
235
236static void alloc_aio_bitmap(BlkMigDevState *bmds)
237{
ebd2f9e7 238 BlockBackend *bb = bmds->blk;
33656af7
MT
239 int64_t bitmap_size;
240
ebd2f9e7 241 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
242 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
243
7267c094 244 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
245}
246
52e850de
PB
247/* Never hold migration lock when yielding to the main loop! */
248
c163b5ca
LS
249static void blk_mig_read_cb(void *opaque, int ret)
250{
251 BlkMigBlock *blk = opaque;
a55eb92c 252
52e850de 253 blk_mig_lock();
c163b5ca 254 blk->ret = ret;
a55eb92c 255
5e5328be 256 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 257 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 258
d11ecd3d
JK
259 block_mig_state.submitted--;
260 block_mig_state.read_done++;
261 assert(block_mig_state.submitted >= 0);
52e850de 262 blk_mig_unlock();
c163b5ca
LS
263}
264
32c835ba
PB
265/* Called with no lock taken. */
266
539de124 267static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 268{
57cce12d
JK
269 int64_t total_sectors = bmds->total_sectors;
270 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 271 BlockBackend *bb = bmds->blk;
c163b5ca 272 BlkMigBlock *blk;
13f0b67f 273 int nr_sectors;
a55eb92c 274
57cce12d 275 if (bmds->shared_base) {
32c835ba 276 qemu_mutex_lock_iothread();
ebd2f9e7 277 aio_context_acquire(blk_get_aio_context(bb));
7d66b1fb
EB
278 /* Skip unallocated sectors; intentionally treats failure as
279 * an allocated sector */
b1d10856 280 while (cur_sector < total_sectors &&
ebd2f9e7
KW
281 !bdrv_is_allocated(blk_bs(bb), cur_sector,
282 MAX_IS_ALLOCATED_SEARCH, &nr_sectors)) {
c163b5ca
LS
283 cur_sector += nr_sectors;
284 }
ebd2f9e7 285 aio_context_release(blk_get_aio_context(bb));
32c835ba 286 qemu_mutex_unlock_iothread();
c163b5ca 287 }
a55eb92c
JK
288
289 if (cur_sector >= total_sectors) {
82801d8f 290 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
291 return 1;
292 }
a55eb92c 293
82801d8f 294 bmds->completed_sectors = cur_sector;
a55eb92c 295
57cce12d
JK
296 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
297
6ea44308
JK
298 /* we are going to transfer a full block even if it is not allocated */
299 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 300
6ea44308 301 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 302 nr_sectors = total_sectors - cur_sector;
c163b5ca 303 }
a55eb92c 304
5839e53b 305 blk = g_new(BlkMigBlock, 1);
7267c094 306 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
307 blk->bmds = bmds;
308 blk->sector = cur_sector;
33656af7 309 blk->nr_sectors = nr_sectors;
a55eb92c 310
e970ec0b
LS
311 blk->iov.iov_base = blk->buf;
312 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
313 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 314
52e850de 315 blk_mig_lock();
13197e3c 316 block_mig_state.submitted++;
52e850de 317 blk_mig_unlock();
13197e3c 318
ef0716df
PB
319 /* We do not know if bs is under the main thread (and thus does
320 * not acquire the AioContext when doing AIO) or rather under
321 * dataplane. Thus acquire both the iothread mutex and the
322 * AioContext.
323 *
324 * This is ugly and will disappear when we make bdrv_* thread-safe,
325 * without the need to acquire the AioContext.
326 */
32c835ba 327 qemu_mutex_lock_iothread();
ebd2f9e7
KW
328 aio_context_acquire(blk_get_aio_context(bmds->blk));
329 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
330 0, blk_mig_read_cb, blk);
d76cac7d 331
20dca810 332 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
ebd2f9e7 333 aio_context_release(blk_get_aio_context(bmds->blk));
32c835ba 334 qemu_mutex_unlock_iothread();
a55eb92c 335
32c835ba 336 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 337 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
338}
339
32c835ba
PB
340/* Called with iothread lock taken. */
341
b8afb520 342static int set_dirty_tracking(void)
c163b5ca
LS
343{
344 BlkMigDevState *bmds;
b8afb520
FZ
345 int ret;
346
347 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
348 aio_context_acquire(blk_get_aio_context(bmds->blk));
349 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
350 BLOCK_SIZE, NULL, NULL);
351 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520
FZ
352 if (!bmds->dirty_bitmap) {
353 ret = -errno;
354 goto fail;
355 }
356 }
357 return 0;
5e5328be 358
b8afb520 359fail:
5e5328be 360 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 361 if (bmds->dirty_bitmap) {
ebd2f9e7
KW
362 aio_context_acquire(blk_get_aio_context(bmds->blk));
363 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
364 aio_context_release(blk_get_aio_context(bmds->blk));
b8afb520 365 }
e4654d2d 366 }
b8afb520 367 return ret;
e4654d2d
FZ
368}
369
ef0716df
PB
370/* Called with iothread lock taken. */
371
e4654d2d
FZ
372static void unset_dirty_tracking(void)
373{
374 BlkMigDevState *bmds;
375
376 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
377 aio_context_acquire(blk_get_aio_context(bmds->blk));
378 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
379 aio_context_release(blk_get_aio_context(bmds->blk));
c163b5ca 380 }
c163b5ca
LS
381}
382
6f5ef23a 383static int init_blk_migration(QEMUFile *f)
c163b5ca 384{
fea68bb6 385 BlockDriverState *bs;
5e5328be 386 BlkMigDevState *bmds;
792773b2 387 int64_t sectors;
88be7b4b 388 BdrvNextIterator it;
ebd2f9e7
KW
389 int i, num_bs = 0;
390 struct {
391 BlkMigDevState *bmds;
392 BlockDriverState *bs;
393 } *bmds_bs;
6f5ef23a
KW
394 Error *local_err = NULL;
395 int ret;
a55eb92c 396
fea68bb6
MA
397 block_mig_state.submitted = 0;
398 block_mig_state.read_done = 0;
399 block_mig_state.transferred = 0;
400 block_mig_state.total_sector_sum = 0;
401 block_mig_state.prev_progress = -1;
402 block_mig_state.bulk_completed = 0;
403 block_mig_state.zero_blocks = migrate_zero_blocks();
404
88be7b4b 405 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
406 num_bs++;
407 }
408 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
409
410 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
411 if (bdrv_is_read_only(bs)) {
412 continue;
413 }
414
57322b78 415 sectors = bdrv_nb_sectors(bs);
31f54f24 416 if (sectors <= 0) {
6f5ef23a 417 ret = sectors;
ebd2f9e7 418 goto out;
b66460e4
SH
419 }
420
5839e53b 421 bmds = g_new0(BlkMigDevState, 1);
6f5ef23a 422 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 423 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
424 bmds->bulk_completed = 0;
425 bmds->total_sectors = sectors;
426 bmds->completed_sectors = 0;
ce7c817c 427 bmds->shared_base = migrate_use_block_incremental();
ebd2f9e7
KW
428
429 assert(i < num_bs);
430 bmds_bs[i].bmds = bmds;
431 bmds_bs[i].bs = bs;
b66460e4
SH
432
433 block_mig_state.total_sector_sum += sectors;
434
435 if (bmds->shared_base) {
539de124 436 DPRINTF("Start migration for %s with shared base image\n",
bfb197e0 437 bdrv_get_device_name(bs));
b66460e4 438 } else {
bfb197e0 439 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
b66460e4
SH
440 }
441
442 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
443 }
ebd2f9e7
KW
444
445 /* Can only insert new BDSes now because doing so while iterating block
446 * devices may end up in a deadlock (iterating the new BDSes, too). */
447 for (i = 0; i < num_bs; i++) {
448 BlkMigDevState *bmds = bmds_bs[i].bmds;
449 BlockDriverState *bs = bmds_bs[i].bs;
450
451 if (bmds) {
6f5ef23a
KW
452 ret = blk_insert_bs(bmds->blk, bs, &local_err);
453 if (ret < 0) {
454 error_report_err(local_err);
455 goto out;
456 }
ebd2f9e7
KW
457
458 alloc_aio_bitmap(bmds);
459 error_setg(&bmds->blocker, "block device is in use by migration");
460 bdrv_op_block_all(bs, bmds->blocker);
461 }
462 }
463
6f5ef23a 464 ret = 0;
ebd2f9e7
KW
465out:
466 g_free(bmds_bs);
6f5ef23a 467 return ret;
b66460e4
SH
468}
469
32c835ba
PB
470/* Called with no lock taken. */
471
539de124 472static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 473{
82801d8f 474 int64_t completed_sector_sum = 0;
c163b5ca 475 BlkMigDevState *bmds;
01e61e2d 476 int progress;
82801d8f 477 int ret = 0;
c163b5ca 478
5e5328be 479 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 480 if (bmds->bulk_completed == 0) {
539de124 481 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
482 /* completed bulk section for this device */
483 bmds->bulk_completed = 1;
c163b5ca 484 }
82801d8f
JK
485 completed_sector_sum += bmds->completed_sectors;
486 ret = 1;
487 break;
488 } else {
489 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
490 }
491 }
a55eb92c 492
8b6b2afc
PR
493 if (block_mig_state.total_sector_sum != 0) {
494 progress = completed_sector_sum * 100 /
495 block_mig_state.total_sector_sum;
496 } else {
497 progress = 100;
498 }
01e61e2d
JK
499 if (progress != block_mig_state.prev_progress) {
500 block_mig_state.prev_progress = progress;
501 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
502 | BLK_MIG_FLAG_PROGRESS);
539de124 503 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
504 }
505
506 return ret;
c163b5ca
LS
507}
508
d76cac7d 509static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
510{
511 BlkMigDevState *bmds;
d76cac7d
LS
512
513 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
514 bmds->cur_dirty = 0;
515 }
516}
517
ef0716df 518/* Called with iothread lock and AioContext taken. */
32c835ba 519
539de124
LC
520static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
521 int is_async)
d76cac7d
LS
522{
523 BlkMigBlock *blk;
ebd2f9e7 524 BlockDriverState *bs = blk_bs(bmds->blk);
d76cac7d 525 int64_t total_sectors = bmds->total_sectors;
c163b5ca 526 int64_t sector;
d76cac7d 527 int nr_sectors;
dcd1d224 528 int ret = -EIO;
a55eb92c 529
d76cac7d 530 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 531 blk_mig_lock();
62155e2b 532 if (bmds_aio_inflight(bmds, sector)) {
52e850de 533 blk_mig_unlock();
ebd2f9e7 534 blk_drain(bmds->blk);
52e850de
PB
535 } else {
536 blk_mig_unlock();
62155e2b 537 }
ebd2f9e7 538 if (bdrv_get_dirty(bs, bmds->dirty_bitmap, sector)) {
575a58d7 539
d76cac7d
LS
540 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
541 nr_sectors = total_sectors - sector;
542 } else {
543 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
544 }
5839e53b 545 blk = g_new(BlkMigBlock, 1);
7267c094 546 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
547 blk->bmds = bmds;
548 blk->sector = sector;
33656af7 549 blk->nr_sectors = nr_sectors;
d76cac7d 550
889ae39c 551 if (is_async) {
d76cac7d
LS
552 blk->iov.iov_base = blk->buf;
553 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
554 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
555
ebd2f9e7
KW
556 blk->aiocb = blk_aio_preadv(bmds->blk,
557 sector * BDRV_SECTOR_SIZE,
558 &blk->qiov, 0, blk_mig_read_cb,
559 blk);
52e850de
PB
560
561 blk_mig_lock();
d76cac7d 562 block_mig_state.submitted++;
33656af7 563 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 564 blk_mig_unlock();
d76cac7d 565 } else {
ebd2f9e7
KW
566 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
567 nr_sectors * BDRV_SECTOR_SIZE);
dcd1d224 568 if (ret < 0) {
d76cac7d 569 goto error;
c163b5ca 570 }
d76cac7d 571 blk_send(f, blk);
a55eb92c 572
7267c094
AL
573 g_free(blk->buf);
574 g_free(blk);
a55eb92c 575 }
d76cac7d 576
20dca810 577 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
1cf6aa74
LC
578 sector += nr_sectors;
579 bmds->cur_dirty = sector;
580
d76cac7d 581 break;
c163b5ca 582 }
d76cac7d
LS
583 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
584 bmds->cur_dirty = sector;
c163b5ca 585 }
575a58d7 586
d76cac7d
LS
587 return (bmds->cur_dirty >= bmds->total_sectors);
588
889ae39c 589error:
539de124 590 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
591 g_free(blk->buf);
592 g_free(blk);
43be3a25 593 return ret;
d76cac7d
LS
594}
595
32c835ba
PB
596/* Called with iothread lock taken.
597 *
598 * return value:
ceb2bd09
JQ
599 * 0: too much data for max_downtime
600 * 1: few enough data for max_downtime
601*/
539de124 602static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
603{
604 BlkMigDevState *bmds;
ceb2bd09 605 int ret = 1;
d76cac7d
LS
606
607 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 608 aio_context_acquire(blk_get_aio_context(bmds->blk));
ceb2bd09 609 ret = mig_save_device_dirty(f, bmds, is_async);
ebd2f9e7 610 aio_context_release(blk_get_aio_context(bmds->blk));
43be3a25 611 if (ret <= 0) {
d76cac7d
LS
612 break;
613 }
614 }
615
616 return ret;
c163b5ca
LS
617}
618
32c835ba
PB
619/* Called with no locks taken. */
620
59feec42 621static int flush_blks(QEMUFile *f)
c163b5ca 622{
5e5328be 623 BlkMigBlock *blk;
59feec42 624 int ret = 0;
a55eb92c 625
d0f2c4c6 626 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
627 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
628 block_mig_state.transferred);
a55eb92c 629
52e850de 630 blk_mig_lock();
5e5328be
JK
631 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
632 if (qemu_file_rate_limit(f)) {
633 break;
634 }
4b640365 635 if (blk->ret < 0) {
59feec42 636 ret = blk->ret;
4b640365
JK
637 break;
638 }
a55eb92c 639
5e5328be 640 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 641 blk_mig_unlock();
13197e3c 642 blk_send(f, blk);
52e850de 643 blk_mig_lock();
13197e3c 644
7267c094
AL
645 g_free(blk->buf);
646 g_free(blk);
a55eb92c 647
d11ecd3d
JK
648 block_mig_state.read_done--;
649 block_mig_state.transferred++;
650 assert(block_mig_state.read_done >= 0);
c163b5ca 651 }
52e850de 652 blk_mig_unlock();
c163b5ca 653
d0f2c4c6 654 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
655 block_mig_state.submitted, block_mig_state.read_done,
656 block_mig_state.transferred);
59feec42 657 return ret;
c163b5ca
LS
658}
659
32c835ba
PB
660/* Called with iothread lock taken. */
661
889ae39c
LS
662static int64_t get_remaining_dirty(void)
663{
664 BlkMigDevState *bmds;
665 int64_t dirty = 0;
666
667 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 668 aio_context_acquire(blk_get_aio_context(bmds->blk));
20dca810 669 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
ebd2f9e7 670 aio_context_release(blk_get_aio_context(bmds->blk));
889ae39c
LS
671 }
672
acc906c6 673 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
674}
675
32c835ba
PB
676/* Called with iothread lock taken. */
677
6ad2a215 678static void block_migration_cleanup(void *opaque)
4ec7fcc7 679{
82801d8f
JK
680 BlkMigDevState *bmds;
681 BlkMigBlock *blk;
ef0716df 682 AioContext *ctx;
4ec7fcc7 683
946d58be
KW
684 bdrv_drain_all();
685
e4654d2d 686 unset_dirty_tracking();
8f794c55 687
82801d8f
JK
688 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
689 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
ebd2f9e7 690 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
3718d8ab 691 error_free(bmds->blocker);
ef0716df 692
ebd2f9e7
KW
693 /* Save ctx, because bmds->blk can disappear during blk_unref. */
694 ctx = blk_get_aio_context(bmds->blk);
ef0716df 695 aio_context_acquire(ctx);
ebd2f9e7 696 blk_unref(bmds->blk);
ef0716df
PB
697 aio_context_release(ctx);
698
ebd2f9e7 699 g_free(bmds->blk_name);
7267c094
AL
700 g_free(bmds->aio_bitmap);
701 g_free(bmds);
4ec7fcc7
JK
702 }
703
ef0716df 704 blk_mig_lock();
82801d8f
JK
705 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
706 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
707 g_free(blk->buf);
708 g_free(blk);
4ec7fcc7 709 }
52e850de 710 blk_mig_unlock();
4ec7fcc7
JK
711}
712
d1315aac 713static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 714{
2975725f
JQ
715 int ret;
716
d1315aac
JQ
717 DPRINTF("Enter save live setup submitted %d transferred %d\n",
718 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 719
9b095037 720 qemu_mutex_lock_iothread();
6f5ef23a
KW
721 ret = init_blk_migration(f);
722 if (ret < 0) {
723 qemu_mutex_unlock_iothread();
724 return ret;
725 }
d1315aac
JQ
726
727 /* start track dirty blocks */
b8afb520
FZ
728 ret = set_dirty_tracking();
729
ef0716df
PB
730 qemu_mutex_unlock_iothread();
731
b8afb520 732 if (ret) {
b8afb520
FZ
733 return ret;
734 }
735
59feec42 736 ret = flush_blks(f);
d1315aac 737 blk_mig_reset_dirty_cursor();
d1315aac
JQ
738 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
739
d418cf57 740 return ret;
d1315aac
JQ
741}
742
16310a3c 743static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
744{
745 int ret;
6aaa9dae 746 int64_t last_ftell = qemu_ftell(f);
ebd9fbd7 747 int64_t delta_ftell;
d1315aac 748
16310a3c
JQ
749 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
750 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 751
59feec42 752 ret = flush_blks(f);
2975725f 753 if (ret) {
2975725f 754 return ret;
4b640365
JK
755 }
756
d76cac7d
LS
757 blk_mig_reset_dirty_cursor();
758
16310a3c 759 /* control the rate of transfer */
52e850de 760 blk_mig_lock();
16310a3c
JQ
761 while ((block_mig_state.submitted +
762 block_mig_state.read_done) * BLOCK_SIZE <
f77dcdbc
WC
763 qemu_file_get_rate_limit(f) &&
764 (block_mig_state.submitted +
765 block_mig_state.read_done) <
766 MAX_INFLIGHT_IO) {
52e850de 767 blk_mig_unlock();
16310a3c
JQ
768 if (block_mig_state.bulk_completed == 0) {
769 /* first finish the bulk phase */
770 if (blk_mig_save_bulked_block(f) == 0) {
771 /* finished saving bulk on all devices */
772 block_mig_state.bulk_completed = 1;
773 }
13197e3c 774 ret = 0;
16310a3c 775 } else {
32c835ba
PB
776 /* Always called with iothread lock taken for
777 * simplicity, block_save_complete also calls it.
778 */
779 qemu_mutex_lock_iothread();
43be3a25 780 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 781 qemu_mutex_unlock_iothread();
13197e3c
PB
782 }
783 if (ret < 0) {
784 return ret;
785 }
52e850de 786 blk_mig_lock();
13197e3c
PB
787 if (ret != 0) {
788 /* no more dirty blocks */
789 break;
a55eb92c 790 }
16310a3c 791 }
52e850de 792 blk_mig_unlock();
a55eb92c 793
59feec42 794 ret = flush_blks(f);
16310a3c 795 if (ret) {
16310a3c 796 return ret;
4b640365
JK
797 }
798
16310a3c 799 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
ebd9fbd7
GH
800 delta_ftell = qemu_ftell(f) - last_ftell;
801 if (delta_ftell > 0) {
802 return 1;
803 } else if (delta_ftell < 0) {
804 return -1;
805 } else {
806 return 0;
807 }
16310a3c
JQ
808}
809
32c835ba
PB
810/* Called with iothread lock taken. */
811
16310a3c
JQ
812static int block_save_complete(QEMUFile *f, void *opaque)
813{
814 int ret;
815
816 DPRINTF("Enter save live complete submitted %d transferred %d\n",
817 block_mig_state.submitted, block_mig_state.transferred);
818
59feec42 819 ret = flush_blks(f);
16310a3c 820 if (ret) {
16310a3c
JQ
821 return ret;
822 }
a55eb92c 823
16310a3c 824 blk_mig_reset_dirty_cursor();
01e61e2d 825
16310a3c
JQ
826 /* we know for sure that save bulk is completed and
827 all async read completed */
52e850de 828 blk_mig_lock();
16310a3c 829 assert(block_mig_state.submitted == 0);
52e850de 830 blk_mig_unlock();
16310a3c 831
43be3a25
JQ
832 do {
833 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
834 if (ret < 0) {
835 return ret;
836 }
43be3a25 837 } while (ret == 0);
4b640365 838
43be3a25
JQ
839 /* report completion */
840 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 841
16310a3c
JQ
842 DPRINTF("Block migration completed\n");
843
a55eb92c
JK
844 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
845
16310a3c 846 return 0;
c163b5ca
LS
847}
848
c31b098f
DDAG
849static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
850 uint64_t *non_postcopiable_pending,
851 uint64_t *postcopiable_pending)
e4ed1541 852{
6aaa9dae 853 /* Estimate pending number of bytes to send */
13197e3c
PB
854 uint64_t pending;
855
32c835ba 856 qemu_mutex_lock_iothread();
ef0716df
PB
857 pending = get_remaining_dirty();
858 qemu_mutex_unlock_iothread();
859
52e850de 860 blk_mig_lock();
ef0716df
PB
861 pending += block_mig_state.submitted * BLOCK_SIZE +
862 block_mig_state.read_done * BLOCK_SIZE;
863 blk_mig_unlock();
6aaa9dae
SH
864
865 /* Report at least one block pending during bulk phase */
04636dc4
VSO
866 if (pending <= max_size && !block_mig_state.bulk_completed) {
867 pending = max_size + BLOCK_SIZE;
6aaa9dae 868 }
e4ed1541 869
6aaa9dae 870 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
c31b098f
DDAG
871 /* We don't do postcopy */
872 *non_postcopiable_pending += pending;
e4ed1541
JQ
873}
874
c163b5ca
LS
875static int block_load(QEMUFile *f, void *opaque, int version_id)
876{
01e61e2d 877 static int banner_printed;
c163b5ca
LS
878 int len, flags;
879 char device_name[256];
880 int64_t addr;
ad2964b4 881 BlockBackend *blk, *blk_prev = NULL;;
9bd9c7f5 882 Error *local_err = NULL;
c163b5ca 883 uint8_t *buf;
77358b59
PR
884 int64_t total_sectors = 0;
885 int nr_sectors;
42802d47 886 int ret;
3928d50b
LC
887 BlockDriverInfo bdi;
888 int cluster_size = BLOCK_SIZE;
a55eb92c 889
c163b5ca 890 do {
c163b5ca 891 addr = qemu_get_be64(f);
a55eb92c 892
6ea44308
JK
893 flags = addr & ~BDRV_SECTOR_MASK;
894 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
895
896 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
897 /* get device name */
898 len = qemu_get_byte(f);
c163b5ca
LS
899 qemu_get_buffer(f, (uint8_t *)device_name, len);
900 device_name[len] = '\0';
a55eb92c 901
c9ebaf74
FZ
902 blk = blk_by_name(device_name);
903 if (!blk) {
4b640365
JK
904 fprintf(stderr, "Error unknown block device %s\n",
905 device_name);
906 return -EINVAL;
907 }
a55eb92c 908
ad2964b4
KW
909 if (blk != blk_prev) {
910 blk_prev = blk;
911 total_sectors = blk_nb_sectors(blk);
77358b59 912 if (total_sectors <= 0) {
6daf194d 913 error_report("Error getting length of block device %s",
77358b59
PR
914 device_name);
915 return -EINVAL;
916 }
9bd9c7f5 917
ad2964b4 918 blk_invalidate_cache(blk, &local_err);
9bd9c7f5
KW
919 if (local_err) {
920 error_report_err(local_err);
921 return -EINVAL;
922 }
3928d50b
LC
923
924 ret = bdrv_get_info(blk_bs(blk), &bdi);
925 if (ret == 0 && bdi.cluster_size > 0 &&
926 bdi.cluster_size <= BLOCK_SIZE &&
927 BLOCK_SIZE % bdi.cluster_size == 0) {
928 cluster_size = bdi.cluster_size;
929 } else {
930 cluster_size = BLOCK_SIZE;
931 }
77358b59
PR
932 }
933
934 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
935 nr_sectors = total_sectors - addr;
936 } else {
937 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
938 }
939
323004a3 940 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
941 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
942 nr_sectors * BDRV_SECTOR_SIZE,
943 BDRV_REQ_MAY_UNMAP);
323004a3 944 } else {
3928d50b
LC
945 int i;
946 int64_t cur_addr;
947 uint8_t *cur_buf;
948
323004a3
PL
949 buf = g_malloc(BLOCK_SIZE);
950 qemu_get_buffer(f, buf, BLOCK_SIZE);
3928d50b
LC
951 for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
952 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
953 cur_buf = buf + i * cluster_size;
954
955 if ((!block_mig_state.zero_blocks ||
956 cluster_size < BLOCK_SIZE) &&
957 buffer_is_zero(cur_buf, cluster_size)) {
958 ret = blk_pwrite_zeroes(blk, cur_addr,
959 cluster_size,
960 BDRV_REQ_MAY_UNMAP);
961 } else {
962 ret = blk_pwrite(blk, cur_addr, cur_buf,
963 cluster_size, 0);
964 }
965 if (ret < 0) {
966 break;
967 }
968 }
323004a3
PL
969 g_free(buf);
970 }
575a58d7 971
b02bea3a
YT
972 if (ret < 0) {
973 return ret;
974 }
01e61e2d
JK
975 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
976 if (!banner_printed) {
977 printf("Receiving block device images\n");
978 banner_printed = 1;
979 }
980 printf("Completed %d %%%c", (int)addr,
981 (addr == 100) ? '\n' : '\r');
982 fflush(stdout);
a55eb92c 983 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 984 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
985 return -EINVAL;
986 }
42802d47
JQ
987 ret = qemu_file_get_error(f);
988 if (ret != 0) {
989 return ret;
c163b5ca 990 }
a55eb92c
JK
991 } while (!(flags & BLK_MIG_FLAG_EOS));
992
c163b5ca
LS
993 return 0;
994}
995
6bd68781
JQ
996static bool block_is_active(void *opaque)
997{
ce7c817c 998 return migrate_use_block();
6bd68781
JQ
999}
1000
7a46d042 1001static SaveVMHandlers savevm_block_handlers = {
d1315aac 1002 .save_live_setup = block_save_setup,
16310a3c 1003 .save_live_iterate = block_save_iterate,
a3e06c3d 1004 .save_live_complete_precopy = block_save_complete,
e4ed1541 1005 .save_live_pending = block_save_pending,
7908c78d 1006 .load_state = block_load,
6ad2a215 1007 .cleanup = block_migration_cleanup,
6bd68781 1008 .is_active = block_is_active,
7908c78d
JQ
1009};
1010
c163b5ca 1011void blk_mig_init(void)
a55eb92c 1012{
5e5328be
JK
1013 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1014 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 1015 qemu_mutex_init(&block_mig_state.lock);
5e5328be 1016
7908c78d
JQ
1017 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1018 &block_mig_state);
c163b5ca 1019}
This page took 0.581839 seconds and 4 git commands to generate.