]> Git Repo - qemu.git/blame - migration/block.c
msf2: Add Smartfusion2 SPI controller
[qemu.git] / migration / block.c
CommitLineData
c163b5ca
LS
1/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <[email protected]>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
6b620ca3
PB
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
c163b5ca
LS
14 */
15
1393a485 16#include "qemu/osdep.h"
da34e65c 17#include "qapi/error.h"
bfb197e0 18#include "qemu/error-report.h"
f348b6d1 19#include "qemu/cutils.h"
1de7afc9 20#include "qemu/queue.h"
2c9e6fec
JQ
21#include "block.h"
22#include "migration/misc.h"
6666c96a 23#include "migration.h"
f2a8f0a6 24#include "migration/register.h"
08a0aee1 25#include "qemu-file.h"
987772d9 26#include "migration/vmstate.h"
c9ebaf74 27#include "sysemu/block-backend.h"
c163b5ca 28
50717e94
PB
29#define BLOCK_SIZE (1 << 20)
30#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
c163b5ca
LS
31
32#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
33#define BLK_MIG_FLAG_EOS 0x02
01e61e2d 34#define BLK_MIG_FLAG_PROGRESS 0x04
323004a3 35#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
c163b5ca 36
d6a644bb 37#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
c163b5ca 38
f77dcdbc
WC
39#define MAX_INFLIGHT_IO 512
40
c163b5ca
LS
41//#define DEBUG_BLK_MIGRATION
42
43#ifdef DEBUG_BLK_MIGRATION
d0f2c4c6 44#define DPRINTF(fmt, ...) \
c163b5ca
LS
45 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
46#else
d0f2c4c6 47#define DPRINTF(fmt, ...) \
c163b5ca
LS
48 do { } while (0)
49#endif
50
a55eb92c 51typedef struct BlkMigDevState {
323920c4 52 /* Written during setup phase. Can be read without a lock. */
ebd2f9e7
KW
53 BlockBackend *blk;
54 char *blk_name;
a55eb92c 55 int shared_base;
a55eb92c 56 int64_t total_sectors;
5e5328be 57 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
ef0716df 58 Error *blocker;
323920c4
PB
59
60 /* Only used by migration thread. Does not need a lock. */
61 int bulk_completed;
62 int64_t cur_sector;
63 int64_t cur_dirty;
64
ef0716df
PB
65 /* Data in the aio_bitmap is protected by block migration lock.
66 * Allocation and free happen during setup and cleanup respectively.
67 */
33656af7 68 unsigned long *aio_bitmap;
ef0716df
PB
69
70 /* Protected by block migration lock. */
323920c4 71 int64_t completed_sectors;
ef0716df
PB
72
73 /* During migration this is protected by iothread lock / AioContext.
74 * Allocation and free happen during setup and cleanup respectively.
75 */
e4654d2d 76 BdrvDirtyBitmap *dirty_bitmap;
a55eb92c
JK
77} BlkMigDevState;
78
c163b5ca 79typedef struct BlkMigBlock {
323920c4 80 /* Only used by migration thread. */
c163b5ca
LS
81 uint8_t *buf;
82 BlkMigDevState *bmds;
83 int64_t sector;
33656af7 84 int nr_sectors;
c163b5ca
LS
85 struct iovec iov;
86 QEMUIOVector qiov;
7c84b1b8 87 BlockAIOCB *aiocb;
323920c4 88
52e850de 89 /* Protected by block migration lock. */
c163b5ca 90 int ret;
5e5328be 91 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
c163b5ca
LS
92} BlkMigBlock;
93
94typedef struct BlkMigState {
5e5328be 95 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
323920c4 96 int64_t total_sector_sum;
323004a3 97 bool zero_blocks;
323920c4 98
52e850de 99 /* Protected by lock. */
5e5328be 100 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
c163b5ca
LS
101 int submitted;
102 int read_done;
323920c4
PB
103
104 /* Only used by migration thread. Does not need a lock. */
c163b5ca 105 int transferred;
01e61e2d 106 int prev_progress;
e970ec0b 107 int bulk_completed;
52e850de 108
ef0716df 109 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
52e850de 110 QemuMutex lock;
c163b5ca
LS
111} BlkMigState;
112
d11ecd3d 113static BlkMigState block_mig_state;
c163b5ca 114
52e850de
PB
115static void blk_mig_lock(void)
116{
117 qemu_mutex_lock(&block_mig_state.lock);
118}
119
120static void blk_mig_unlock(void)
121{
122 qemu_mutex_unlock(&block_mig_state.lock);
123}
124
32c835ba
PB
125/* Must run outside of the iothread lock during the bulk phase,
126 * or the VM will stall.
127 */
128
13f0b67f
JK
129static void blk_send(QEMUFile *f, BlkMigBlock * blk)
130{
131 int len;
323004a3
PL
132 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
133
134 if (block_mig_state.zero_blocks &&
135 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
136 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
137 }
13f0b67f
JK
138
139 /* sector number and flags */
140 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
323004a3 141 | flags);
13f0b67f
JK
142
143 /* device name */
ebd2f9e7 144 len = strlen(blk->bmds->blk_name);
13f0b67f 145 qemu_put_byte(f, len);
ebd2f9e7 146 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
13f0b67f 147
323004a3
PL
148 /* if a block is zero we need to flush here since the network
149 * bandwidth is now a lot higher than the storage device bandwidth.
150 * thus if we queue zero blocks we slow down the migration */
151 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
152 qemu_fflush(f);
153 return;
154 }
155
13f0b67f
JK
156 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
157}
158
25f23643
JK
159int blk_mig_active(void)
160{
161 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
162}
163
164uint64_t blk_mig_bytes_transferred(void)
165{
166 BlkMigDevState *bmds;
167 uint64_t sum = 0;
168
52e850de 169 blk_mig_lock();
25f23643
JK
170 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
171 sum += bmds->completed_sectors;
172 }
52e850de 173 blk_mig_unlock();
25f23643
JK
174 return sum << BDRV_SECTOR_BITS;
175}
176
177uint64_t blk_mig_bytes_remaining(void)
178{
179 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
180}
181
182uint64_t blk_mig_bytes_total(void)
183{
184 BlkMigDevState *bmds;
185 uint64_t sum = 0;
186
187 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
188 sum += bmds->total_sectors;
189 }
190 return sum << BDRV_SECTOR_BITS;
191}
192
52e850de
PB
193
194/* Called with migration lock held. */
195
33656af7
MT
196static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
197{
198 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
199
ebd2f9e7 200 if (sector < blk_nb_sectors(bmds->blk)) {
33656af7
MT
201 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
202 (1UL << (chunk % (sizeof(unsigned long) * 8))));
203 } else {
204 return 0;
205 }
206}
207
52e850de
PB
208/* Called with migration lock held. */
209
33656af7
MT
210static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
211 int nb_sectors, int set)
212{
213 int64_t start, end;
214 unsigned long val, idx, bit;
215
216 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
217 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
218
219 for (; start <= end; start++) {
220 idx = start / (sizeof(unsigned long) * 8);
221 bit = start % (sizeof(unsigned long) * 8);
222 val = bmds->aio_bitmap[idx];
223 if (set) {
62155e2b 224 val |= 1UL << bit;
33656af7 225 } else {
62155e2b 226 val &= ~(1UL << bit);
33656af7
MT
227 }
228 bmds->aio_bitmap[idx] = val;
229 }
230}
231
232static void alloc_aio_bitmap(BlkMigDevState *bmds)
233{
ebd2f9e7 234 BlockBackend *bb = bmds->blk;
33656af7
MT
235 int64_t bitmap_size;
236
ebd2f9e7 237 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
33656af7
MT
238 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
239
7267c094 240 bmds->aio_bitmap = g_malloc0(bitmap_size);
33656af7
MT
241}
242
52e850de
PB
243/* Never hold migration lock when yielding to the main loop! */
244
c163b5ca
LS
245static void blk_mig_read_cb(void *opaque, int ret)
246{
247 BlkMigBlock *blk = opaque;
a55eb92c 248
52e850de 249 blk_mig_lock();
c163b5ca 250 blk->ret = ret;
a55eb92c 251
5e5328be 252 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
33656af7 253 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
a55eb92c 254
d11ecd3d
JK
255 block_mig_state.submitted--;
256 block_mig_state.read_done++;
257 assert(block_mig_state.submitted >= 0);
52e850de 258 blk_mig_unlock();
c163b5ca
LS
259}
260
32c835ba
PB
261/* Called with no lock taken. */
262
539de124 263static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
a55eb92c 264{
57cce12d
JK
265 int64_t total_sectors = bmds->total_sectors;
266 int64_t cur_sector = bmds->cur_sector;
ebd2f9e7 267 BlockBackend *bb = bmds->blk;
c163b5ca 268 BlkMigBlock *blk;
13f0b67f 269 int nr_sectors;
d6a644bb 270 int64_t count;
a55eb92c 271
57cce12d 272 if (bmds->shared_base) {
32c835ba 273 qemu_mutex_lock_iothread();
ebd2f9e7 274 aio_context_acquire(blk_get_aio_context(bb));
d6a644bb
EB
275 /* Skip unallocated sectors; intentionally treats failure or
276 * partial sector as an allocated sector */
b1d10856 277 while (cur_sector < total_sectors &&
d6a644bb
EB
278 !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
279 MAX_IS_ALLOCATED_SEARCH, &count)) {
280 if (count < BDRV_SECTOR_SIZE) {
281 break;
282 }
283 cur_sector += count >> BDRV_SECTOR_BITS;
c163b5ca 284 }
ebd2f9e7 285 aio_context_release(blk_get_aio_context(bb));
32c835ba 286 qemu_mutex_unlock_iothread();
c163b5ca 287 }
a55eb92c
JK
288
289 if (cur_sector >= total_sectors) {
82801d8f 290 bmds->cur_sector = bmds->completed_sectors = total_sectors;
c163b5ca
LS
291 return 1;
292 }
a55eb92c 293
82801d8f 294 bmds->completed_sectors = cur_sector;
a55eb92c 295
57cce12d
JK
296 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
297
6ea44308
JK
298 /* we are going to transfer a full block even if it is not allocated */
299 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
c163b5ca 300
6ea44308 301 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
57cce12d 302 nr_sectors = total_sectors - cur_sector;
c163b5ca 303 }
a55eb92c 304
5839e53b 305 blk = g_new(BlkMigBlock, 1);
7267c094 306 blk->buf = g_malloc(BLOCK_SIZE);
13f0b67f
JK
307 blk->bmds = bmds;
308 blk->sector = cur_sector;
33656af7 309 blk->nr_sectors = nr_sectors;
a55eb92c 310
e970ec0b
LS
311 blk->iov.iov_base = blk->buf;
312 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
313 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
a55eb92c 314
52e850de 315 blk_mig_lock();
13197e3c 316 block_mig_state.submitted++;
52e850de 317 blk_mig_unlock();
13197e3c 318
ef0716df
PB
319 /* We do not know if bs is under the main thread (and thus does
320 * not acquire the AioContext when doing AIO) or rather under
321 * dataplane. Thus acquire both the iothread mutex and the
322 * AioContext.
323 *
324 * This is ugly and will disappear when we make bdrv_* thread-safe,
325 * without the need to acquire the AioContext.
326 */
32c835ba 327 qemu_mutex_lock_iothread();
ebd2f9e7
KW
328 aio_context_acquire(blk_get_aio_context(bmds->blk));
329 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
330 0, blk_mig_read_cb, blk);
d76cac7d 331
20dca810 332 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
ebd2f9e7 333 aio_context_release(blk_get_aio_context(bmds->blk));
32c835ba 334 qemu_mutex_unlock_iothread();
a55eb92c 335
32c835ba 336 bmds->cur_sector = cur_sector + nr_sectors;
13f0b67f 337 return (bmds->cur_sector >= total_sectors);
c163b5ca
LS
338}
339
32c835ba
PB
340/* Called with iothread lock taken. */
341
b8afb520 342static int set_dirty_tracking(void)
c163b5ca
LS
343{
344 BlkMigDevState *bmds;
b8afb520
FZ
345 int ret;
346
347 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7
KW
348 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
349 BLOCK_SIZE, NULL, NULL);
b8afb520
FZ
350 if (!bmds->dirty_bitmap) {
351 ret = -errno;
352 goto fail;
353 }
354 }
355 return 0;
5e5328be 356
b8afb520 357fail:
5e5328be 358 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
b8afb520 359 if (bmds->dirty_bitmap) {
ebd2f9e7 360 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
b8afb520 361 }
e4654d2d 362 }
b8afb520 363 return ret;
e4654d2d
FZ
364}
365
ef0716df
PB
366/* Called with iothread lock taken. */
367
e4654d2d
FZ
368static void unset_dirty_tracking(void)
369{
370 BlkMigDevState *bmds;
371
372 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 373 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
c163b5ca 374 }
c163b5ca
LS
375}
376
6f5ef23a 377static int init_blk_migration(QEMUFile *f)
c163b5ca 378{
fea68bb6 379 BlockDriverState *bs;
5e5328be 380 BlkMigDevState *bmds;
792773b2 381 int64_t sectors;
88be7b4b 382 BdrvNextIterator it;
ebd2f9e7
KW
383 int i, num_bs = 0;
384 struct {
385 BlkMigDevState *bmds;
386 BlockDriverState *bs;
387 } *bmds_bs;
6f5ef23a
KW
388 Error *local_err = NULL;
389 int ret;
a55eb92c 390
fea68bb6
MA
391 block_mig_state.submitted = 0;
392 block_mig_state.read_done = 0;
393 block_mig_state.transferred = 0;
394 block_mig_state.total_sector_sum = 0;
395 block_mig_state.prev_progress = -1;
396 block_mig_state.bulk_completed = 0;
397 block_mig_state.zero_blocks = migrate_zero_blocks();
398
88be7b4b 399 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
ebd2f9e7
KW
400 num_bs++;
401 }
402 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
403
404 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
fea68bb6
MA
405 if (bdrv_is_read_only(bs)) {
406 continue;
407 }
408
57322b78 409 sectors = bdrv_nb_sectors(bs);
31f54f24 410 if (sectors <= 0) {
6f5ef23a 411 ret = sectors;
ebd2f9e7 412 goto out;
b66460e4
SH
413 }
414
5839e53b 415 bmds = g_new0(BlkMigDevState, 1);
6f5ef23a 416 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
ebd2f9e7 417 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
b66460e4
SH
418 bmds->bulk_completed = 0;
419 bmds->total_sectors = sectors;
420 bmds->completed_sectors = 0;
ce7c817c 421 bmds->shared_base = migrate_use_block_incremental();
ebd2f9e7
KW
422
423 assert(i < num_bs);
424 bmds_bs[i].bmds = bmds;
425 bmds_bs[i].bs = bs;
b66460e4
SH
426
427 block_mig_state.total_sector_sum += sectors;
428
429 if (bmds->shared_base) {
539de124 430 DPRINTF("Start migration for %s with shared base image\n",
bfb197e0 431 bdrv_get_device_name(bs));
b66460e4 432 } else {
bfb197e0 433 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
b66460e4
SH
434 }
435
436 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
437 }
ebd2f9e7
KW
438
439 /* Can only insert new BDSes now because doing so while iterating block
440 * devices may end up in a deadlock (iterating the new BDSes, too). */
441 for (i = 0; i < num_bs; i++) {
442 BlkMigDevState *bmds = bmds_bs[i].bmds;
443 BlockDriverState *bs = bmds_bs[i].bs;
444
445 if (bmds) {
6f5ef23a
KW
446 ret = blk_insert_bs(bmds->blk, bs, &local_err);
447 if (ret < 0) {
448 error_report_err(local_err);
449 goto out;
450 }
ebd2f9e7
KW
451
452 alloc_aio_bitmap(bmds);
453 error_setg(&bmds->blocker, "block device is in use by migration");
454 bdrv_op_block_all(bs, bmds->blocker);
455 }
456 }
457
6f5ef23a 458 ret = 0;
ebd2f9e7
KW
459out:
460 g_free(bmds_bs);
6f5ef23a 461 return ret;
b66460e4
SH
462}
463
32c835ba
PB
464/* Called with no lock taken. */
465
539de124 466static int blk_mig_save_bulked_block(QEMUFile *f)
c163b5ca 467{
82801d8f 468 int64_t completed_sector_sum = 0;
c163b5ca 469 BlkMigDevState *bmds;
01e61e2d 470 int progress;
82801d8f 471 int ret = 0;
c163b5ca 472
5e5328be 473 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
a55eb92c 474 if (bmds->bulk_completed == 0) {
539de124 475 if (mig_save_device_bulk(f, bmds) == 1) {
57cce12d
JK
476 /* completed bulk section for this device */
477 bmds->bulk_completed = 1;
c163b5ca 478 }
82801d8f
JK
479 completed_sector_sum += bmds->completed_sectors;
480 ret = 1;
481 break;
482 } else {
483 completed_sector_sum += bmds->completed_sectors;
c163b5ca
LS
484 }
485 }
a55eb92c 486
8b6b2afc
PR
487 if (block_mig_state.total_sector_sum != 0) {
488 progress = completed_sector_sum * 100 /
489 block_mig_state.total_sector_sum;
490 } else {
491 progress = 100;
492 }
01e61e2d
JK
493 if (progress != block_mig_state.prev_progress) {
494 block_mig_state.prev_progress = progress;
495 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
496 | BLK_MIG_FLAG_PROGRESS);
539de124 497 DPRINTF("Completed %d %%\r", progress);
82801d8f
JK
498 }
499
500 return ret;
c163b5ca
LS
501}
502
d76cac7d 503static void blk_mig_reset_dirty_cursor(void)
c163b5ca
LS
504{
505 BlkMigDevState *bmds;
d76cac7d
LS
506
507 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
508 bmds->cur_dirty = 0;
509 }
510}
511
ef0716df 512/* Called with iothread lock and AioContext taken. */
32c835ba 513
539de124
LC
514static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
515 int is_async)
d76cac7d
LS
516{
517 BlkMigBlock *blk;
ebd2f9e7 518 BlockDriverState *bs = blk_bs(bmds->blk);
d76cac7d 519 int64_t total_sectors = bmds->total_sectors;
c163b5ca 520 int64_t sector;
d76cac7d 521 int nr_sectors;
dcd1d224 522 int ret = -EIO;
a55eb92c 523
d76cac7d 524 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
52e850de 525 blk_mig_lock();
62155e2b 526 if (bmds_aio_inflight(bmds, sector)) {
52e850de 527 blk_mig_unlock();
ebd2f9e7 528 blk_drain(bmds->blk);
52e850de
PB
529 } else {
530 blk_mig_unlock();
62155e2b 531 }
b64bd51e
PB
532 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
533 if (bdrv_get_dirty_locked(bs, bmds->dirty_bitmap, sector)) {
d76cac7d
LS
534 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
535 nr_sectors = total_sectors - sector;
536 } else {
537 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
538 }
b64bd51e
PB
539 bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap, sector, nr_sectors);
540 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
c0bad499 541
5839e53b 542 blk = g_new(BlkMigBlock, 1);
7267c094 543 blk->buf = g_malloc(BLOCK_SIZE);
d76cac7d
LS
544 blk->bmds = bmds;
545 blk->sector = sector;
33656af7 546 blk->nr_sectors = nr_sectors;
d76cac7d 547
889ae39c 548 if (is_async) {
d76cac7d
LS
549 blk->iov.iov_base = blk->buf;
550 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
551 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
552
ebd2f9e7
KW
553 blk->aiocb = blk_aio_preadv(bmds->blk,
554 sector * BDRV_SECTOR_SIZE,
555 &blk->qiov, 0, blk_mig_read_cb,
556 blk);
52e850de
PB
557
558 blk_mig_lock();
d76cac7d 559 block_mig_state.submitted++;
33656af7 560 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
52e850de 561 blk_mig_unlock();
d76cac7d 562 } else {
ebd2f9e7
KW
563 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
564 nr_sectors * BDRV_SECTOR_SIZE);
dcd1d224 565 if (ret < 0) {
d76cac7d 566 goto error;
c163b5ca 567 }
d76cac7d 568 blk_send(f, blk);
a55eb92c 569
7267c094
AL
570 g_free(blk->buf);
571 g_free(blk);
a55eb92c 572 }
d76cac7d 573
1cf6aa74
LC
574 sector += nr_sectors;
575 bmds->cur_dirty = sector;
d76cac7d 576 break;
c163b5ca 577 }
b64bd51e
PB
578
579 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
d76cac7d
LS
580 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
581 bmds->cur_dirty = sector;
c163b5ca 582 }
575a58d7 583
d76cac7d
LS
584 return (bmds->cur_dirty >= bmds->total_sectors);
585
889ae39c 586error:
539de124 587 DPRINTF("Error reading sector %" PRId64 "\n", sector);
7267c094
AL
588 g_free(blk->buf);
589 g_free(blk);
43be3a25 590 return ret;
d76cac7d
LS
591}
592
32c835ba
PB
593/* Called with iothread lock taken.
594 *
595 * return value:
ceb2bd09
JQ
596 * 0: too much data for max_downtime
597 * 1: few enough data for max_downtime
598*/
539de124 599static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
d76cac7d
LS
600{
601 BlkMigDevState *bmds;
ceb2bd09 602 int ret = 1;
d76cac7d
LS
603
604 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 605 aio_context_acquire(blk_get_aio_context(bmds->blk));
ceb2bd09 606 ret = mig_save_device_dirty(f, bmds, is_async);
ebd2f9e7 607 aio_context_release(blk_get_aio_context(bmds->blk));
43be3a25 608 if (ret <= 0) {
d76cac7d
LS
609 break;
610 }
611 }
612
613 return ret;
c163b5ca
LS
614}
615
32c835ba
PB
616/* Called with no locks taken. */
617
59feec42 618static int flush_blks(QEMUFile *f)
c163b5ca 619{
5e5328be 620 BlkMigBlock *blk;
59feec42 621 int ret = 0;
a55eb92c 622
d0f2c4c6 623 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
d11ecd3d
JK
624 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
625 block_mig_state.transferred);
a55eb92c 626
52e850de 627 blk_mig_lock();
5e5328be
JK
628 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
629 if (qemu_file_rate_limit(f)) {
630 break;
631 }
4b640365 632 if (blk->ret < 0) {
59feec42 633 ret = blk->ret;
4b640365
JK
634 break;
635 }
a55eb92c 636
5e5328be 637 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
52e850de 638 blk_mig_unlock();
13197e3c 639 blk_send(f, blk);
52e850de 640 blk_mig_lock();
13197e3c 641
7267c094
AL
642 g_free(blk->buf);
643 g_free(blk);
a55eb92c 644
d11ecd3d
JK
645 block_mig_state.read_done--;
646 block_mig_state.transferred++;
647 assert(block_mig_state.read_done >= 0);
c163b5ca 648 }
52e850de 649 blk_mig_unlock();
c163b5ca 650
d0f2c4c6 651 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
d11ecd3d
JK
652 block_mig_state.submitted, block_mig_state.read_done,
653 block_mig_state.transferred);
59feec42 654 return ret;
c163b5ca
LS
655}
656
32c835ba
PB
657/* Called with iothread lock taken. */
658
889ae39c
LS
659static int64_t get_remaining_dirty(void)
660{
661 BlkMigDevState *bmds;
662 int64_t dirty = 0;
663
664 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
ebd2f9e7 665 aio_context_acquire(blk_get_aio_context(bmds->blk));
20dca810 666 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
ebd2f9e7 667 aio_context_release(blk_get_aio_context(bmds->blk));
889ae39c
LS
668 }
669
acc906c6 670 return dirty << BDRV_SECTOR_BITS;
889ae39c
LS
671}
672
32c835ba 673
362fdf17
KW
674
675/* Called with iothread lock taken. */
676static void block_migration_cleanup_bmds(void)
4ec7fcc7 677{
82801d8f 678 BlkMigDevState *bmds;
ef0716df 679 AioContext *ctx;
4ec7fcc7 680
e4654d2d 681 unset_dirty_tracking();
8f794c55 682
82801d8f
JK
683 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
684 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
ebd2f9e7 685 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
3718d8ab 686 error_free(bmds->blocker);
ef0716df 687
ebd2f9e7
KW
688 /* Save ctx, because bmds->blk can disappear during blk_unref. */
689 ctx = blk_get_aio_context(bmds->blk);
ef0716df 690 aio_context_acquire(ctx);
ebd2f9e7 691 blk_unref(bmds->blk);
ef0716df
PB
692 aio_context_release(ctx);
693
ebd2f9e7 694 g_free(bmds->blk_name);
7267c094
AL
695 g_free(bmds->aio_bitmap);
696 g_free(bmds);
4ec7fcc7 697 }
362fdf17
KW
698}
699
700/* Called with iothread lock taken. */
701static void block_migration_cleanup(void *opaque)
702{
703 BlkMigBlock *blk;
704
705 bdrv_drain_all();
706
707 block_migration_cleanup_bmds();
4ec7fcc7 708
ef0716df 709 blk_mig_lock();
82801d8f
JK
710 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
711 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
7267c094
AL
712 g_free(blk->buf);
713 g_free(blk);
4ec7fcc7 714 }
52e850de 715 blk_mig_unlock();
4ec7fcc7
JK
716}
717
d1315aac 718static int block_save_setup(QEMUFile *f, void *opaque)
c163b5ca 719{
2975725f
JQ
720 int ret;
721
d1315aac
JQ
722 DPRINTF("Enter save live setup submitted %d transferred %d\n",
723 block_mig_state.submitted, block_mig_state.transferred);
a55eb92c 724
9b095037 725 qemu_mutex_lock_iothread();
6f5ef23a
KW
726 ret = init_blk_migration(f);
727 if (ret < 0) {
728 qemu_mutex_unlock_iothread();
729 return ret;
730 }
d1315aac
JQ
731
732 /* start track dirty blocks */
b8afb520
FZ
733 ret = set_dirty_tracking();
734
ef0716df
PB
735 qemu_mutex_unlock_iothread();
736
b8afb520 737 if (ret) {
b8afb520
FZ
738 return ret;
739 }
740
59feec42 741 ret = flush_blks(f);
d1315aac 742 blk_mig_reset_dirty_cursor();
d1315aac
JQ
743 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
744
d418cf57 745 return ret;
d1315aac
JQ
746}
747
16310a3c 748static int block_save_iterate(QEMUFile *f, void *opaque)
d1315aac
JQ
749{
750 int ret;
6aaa9dae 751 int64_t last_ftell = qemu_ftell(f);
ebd9fbd7 752 int64_t delta_ftell;
d1315aac 753
16310a3c
JQ
754 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
755 block_mig_state.submitted, block_mig_state.transferred);
d1315aac 756
59feec42 757 ret = flush_blks(f);
2975725f 758 if (ret) {
2975725f 759 return ret;
4b640365
JK
760 }
761
d76cac7d
LS
762 blk_mig_reset_dirty_cursor();
763
16310a3c 764 /* control the rate of transfer */
52e850de 765 blk_mig_lock();
16310a3c
JQ
766 while ((block_mig_state.submitted +
767 block_mig_state.read_done) * BLOCK_SIZE <
f77dcdbc
WC
768 qemu_file_get_rate_limit(f) &&
769 (block_mig_state.submitted +
770 block_mig_state.read_done) <
771 MAX_INFLIGHT_IO) {
52e850de 772 blk_mig_unlock();
16310a3c
JQ
773 if (block_mig_state.bulk_completed == 0) {
774 /* first finish the bulk phase */
775 if (blk_mig_save_bulked_block(f) == 0) {
776 /* finished saving bulk on all devices */
777 block_mig_state.bulk_completed = 1;
778 }
13197e3c 779 ret = 0;
16310a3c 780 } else {
32c835ba
PB
781 /* Always called with iothread lock taken for
782 * simplicity, block_save_complete also calls it.
783 */
784 qemu_mutex_lock_iothread();
43be3a25 785 ret = blk_mig_save_dirty_block(f, 1);
32c835ba 786 qemu_mutex_unlock_iothread();
13197e3c
PB
787 }
788 if (ret < 0) {
789 return ret;
790 }
52e850de 791 blk_mig_lock();
13197e3c
PB
792 if (ret != 0) {
793 /* no more dirty blocks */
794 break;
a55eb92c 795 }
16310a3c 796 }
52e850de 797 blk_mig_unlock();
a55eb92c 798
59feec42 799 ret = flush_blks(f);
16310a3c 800 if (ret) {
16310a3c 801 return ret;
4b640365
JK
802 }
803
16310a3c 804 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
ebd9fbd7
GH
805 delta_ftell = qemu_ftell(f) - last_ftell;
806 if (delta_ftell > 0) {
807 return 1;
808 } else if (delta_ftell < 0) {
809 return -1;
810 } else {
811 return 0;
812 }
16310a3c
JQ
813}
814
32c835ba
PB
815/* Called with iothread lock taken. */
816
16310a3c
JQ
817static int block_save_complete(QEMUFile *f, void *opaque)
818{
819 int ret;
820
821 DPRINTF("Enter save live complete submitted %d transferred %d\n",
822 block_mig_state.submitted, block_mig_state.transferred);
823
59feec42 824 ret = flush_blks(f);
16310a3c 825 if (ret) {
16310a3c
JQ
826 return ret;
827 }
a55eb92c 828
16310a3c 829 blk_mig_reset_dirty_cursor();
01e61e2d 830
16310a3c
JQ
831 /* we know for sure that save bulk is completed and
832 all async read completed */
52e850de 833 blk_mig_lock();
16310a3c 834 assert(block_mig_state.submitted == 0);
52e850de 835 blk_mig_unlock();
16310a3c 836
43be3a25
JQ
837 do {
838 ret = blk_mig_save_dirty_block(f, 0);
d418cf57
PB
839 if (ret < 0) {
840 return ret;
841 }
43be3a25 842 } while (ret == 0);
4b640365 843
43be3a25
JQ
844 /* report completion */
845 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
a55eb92c 846
16310a3c
JQ
847 DPRINTF("Block migration completed\n");
848
a55eb92c
JK
849 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
850
362fdf17
KW
851 /* Make sure that our BlockBackends are gone, so that the block driver
852 * nodes can be inactivated. */
853 block_migration_cleanup_bmds();
854
16310a3c 855 return 0;
c163b5ca
LS
856}
857
c31b098f
DDAG
858static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
859 uint64_t *non_postcopiable_pending,
860 uint64_t *postcopiable_pending)
e4ed1541 861{
6aaa9dae 862 /* Estimate pending number of bytes to send */
13197e3c
PB
863 uint64_t pending;
864
32c835ba 865 qemu_mutex_lock_iothread();
ef0716df
PB
866 pending = get_remaining_dirty();
867 qemu_mutex_unlock_iothread();
868
52e850de 869 blk_mig_lock();
ef0716df
PB
870 pending += block_mig_state.submitted * BLOCK_SIZE +
871 block_mig_state.read_done * BLOCK_SIZE;
872 blk_mig_unlock();
6aaa9dae
SH
873
874 /* Report at least one block pending during bulk phase */
04636dc4
VSO
875 if (pending <= max_size && !block_mig_state.bulk_completed) {
876 pending = max_size + BLOCK_SIZE;
6aaa9dae 877 }
e4ed1541 878
6aaa9dae 879 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
c31b098f
DDAG
880 /* We don't do postcopy */
881 *non_postcopiable_pending += pending;
e4ed1541
JQ
882}
883
c163b5ca
LS
884static int block_load(QEMUFile *f, void *opaque, int version_id)
885{
01e61e2d 886 static int banner_printed;
c163b5ca
LS
887 int len, flags;
888 char device_name[256];
889 int64_t addr;
ad2964b4 890 BlockBackend *blk, *blk_prev = NULL;;
9bd9c7f5 891 Error *local_err = NULL;
c163b5ca 892 uint8_t *buf;
77358b59
PR
893 int64_t total_sectors = 0;
894 int nr_sectors;
42802d47 895 int ret;
3928d50b
LC
896 BlockDriverInfo bdi;
897 int cluster_size = BLOCK_SIZE;
a55eb92c 898
c163b5ca 899 do {
c163b5ca 900 addr = qemu_get_be64(f);
a55eb92c 901
6ea44308
JK
902 flags = addr & ~BDRV_SECTOR_MASK;
903 addr >>= BDRV_SECTOR_BITS;
a55eb92c
JK
904
905 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
c163b5ca
LS
906 /* get device name */
907 len = qemu_get_byte(f);
c163b5ca
LS
908 qemu_get_buffer(f, (uint8_t *)device_name, len);
909 device_name[len] = '\0';
a55eb92c 910
c9ebaf74
FZ
911 blk = blk_by_name(device_name);
912 if (!blk) {
4b640365
JK
913 fprintf(stderr, "Error unknown block device %s\n",
914 device_name);
915 return -EINVAL;
916 }
a55eb92c 917
ad2964b4
KW
918 if (blk != blk_prev) {
919 blk_prev = blk;
920 total_sectors = blk_nb_sectors(blk);
77358b59 921 if (total_sectors <= 0) {
6daf194d 922 error_report("Error getting length of block device %s",
77358b59
PR
923 device_name);
924 return -EINVAL;
925 }
9bd9c7f5 926
ad2964b4 927 blk_invalidate_cache(blk, &local_err);
9bd9c7f5
KW
928 if (local_err) {
929 error_report_err(local_err);
930 return -EINVAL;
931 }
3928d50b
LC
932
933 ret = bdrv_get_info(blk_bs(blk), &bdi);
934 if (ret == 0 && bdi.cluster_size > 0 &&
935 bdi.cluster_size <= BLOCK_SIZE &&
936 BLOCK_SIZE % bdi.cluster_size == 0) {
937 cluster_size = bdi.cluster_size;
938 } else {
939 cluster_size = BLOCK_SIZE;
940 }
77358b59
PR
941 }
942
943 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
944 nr_sectors = total_sectors - addr;
945 } else {
946 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
947 }
948
323004a3 949 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
ad2964b4
KW
950 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
951 nr_sectors * BDRV_SECTOR_SIZE,
952 BDRV_REQ_MAY_UNMAP);
323004a3 953 } else {
3928d50b
LC
954 int i;
955 int64_t cur_addr;
956 uint8_t *cur_buf;
957
323004a3
PL
958 buf = g_malloc(BLOCK_SIZE);
959 qemu_get_buffer(f, buf, BLOCK_SIZE);
3928d50b
LC
960 for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
961 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
962 cur_buf = buf + i * cluster_size;
963
964 if ((!block_mig_state.zero_blocks ||
965 cluster_size < BLOCK_SIZE) &&
966 buffer_is_zero(cur_buf, cluster_size)) {
967 ret = blk_pwrite_zeroes(blk, cur_addr,
968 cluster_size,
969 BDRV_REQ_MAY_UNMAP);
970 } else {
971 ret = blk_pwrite(blk, cur_addr, cur_buf,
972 cluster_size, 0);
973 }
974 if (ret < 0) {
975 break;
976 }
977 }
323004a3
PL
978 g_free(buf);
979 }
575a58d7 980
b02bea3a
YT
981 if (ret < 0) {
982 return ret;
983 }
01e61e2d
JK
984 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
985 if (!banner_printed) {
986 printf("Receiving block device images\n");
987 banner_printed = 1;
988 }
989 printf("Completed %d %%%c", (int)addr,
990 (addr == 100) ? '\n' : '\r');
991 fflush(stdout);
a55eb92c 992 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
d5f1f286 993 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
4b640365
JK
994 return -EINVAL;
995 }
42802d47
JQ
996 ret = qemu_file_get_error(f);
997 if (ret != 0) {
998 return ret;
c163b5ca 999 }
a55eb92c
JK
1000 } while (!(flags & BLK_MIG_FLAG_EOS));
1001
c163b5ca
LS
1002 return 0;
1003}
1004
6bd68781
JQ
1005static bool block_is_active(void *opaque)
1006{
ce7c817c 1007 return migrate_use_block();
6bd68781
JQ
1008}
1009
7a46d042 1010static SaveVMHandlers savevm_block_handlers = {
9907e842 1011 .save_setup = block_save_setup,
16310a3c 1012 .save_live_iterate = block_save_iterate,
a3e06c3d 1013 .save_live_complete_precopy = block_save_complete,
e4ed1541 1014 .save_live_pending = block_save_pending,
7908c78d 1015 .load_state = block_load,
70f794fc 1016 .save_cleanup = block_migration_cleanup,
6bd68781 1017 .is_active = block_is_active,
7908c78d
JQ
1018};
1019
c163b5ca 1020void blk_mig_init(void)
a55eb92c 1021{
5e5328be
JK
1022 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1023 QSIMPLEQ_INIT(&block_mig_state.blk_list);
52e850de 1024 qemu_mutex_init(&block_mig_state.lock);
5e5328be 1025
7908c78d
JQ
1026 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1027 &block_mig_state);
c163b5ca 1028}
This page took 0.613317 seconds and 4 git commands to generate.