X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/4fd1cbaf146d4ab35f465bba0fe23115c33cd5a7..a1ed82b4438a7fdf470673c575e8b47d276c9030:/block/backup.c diff --git a/block/backup.c b/block/backup.c index 81895ddbe2..2baf7bed65 100644 --- a/block/backup.c +++ b/block/backup.c @@ -28,25 +28,36 @@ #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) +typedef struct CowRequest { + int64_t start_byte; + int64_t end_byte; + QLIST_ENTRY(CowRequest) list; + CoQueue wait_queue; /* coroutines blocked on this request */ +} CowRequest; + typedef struct BackupBlockJob { BlockJob common; BlockBackend *target; - /* bitmap for sync=incremental */ + BdrvDirtyBitmap *sync_bitmap; + BdrvDirtyBitmap *copy_bitmap; + MirrorSyncMode sync_mode; + BitmapSyncMode bitmap_mode; BlockdevOnError on_source_error; BlockdevOnError on_target_error; CoRwlock flush_rwlock; uint64_t len; uint64_t bytes_read; int64_t cluster_size; - bool compress; NotifierWithReturn before_write; QLIST_HEAD(, CowRequest) inflight_reqs; - HBitmap *copy_bitmap; bool use_copy_range; int64_t copy_range_size; + + BdrvRequestFlags write_flags; + bool initializing_bitmap; } BackupBlockJob; static const BlockJobDriver backup_job_driver; @@ -89,7 +100,7 @@ static void cow_request_end(CowRequest *req) } /* Copy range to target with a bounce buffer and return the bytes copied. If - * error occured, return a negative error number */ + * error occurred, return a negative error number */ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, int64_t start, int64_t end, @@ -98,22 +109,18 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, void **bounce_buffer) { int ret; - struct iovec iov; - QEMUIOVector qiov; BlockBackend *blk = job->common.blk; int nbytes; + int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; - hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); + assert(QEMU_IS_ALIGNED(start, job->cluster_size)); + bdrv_reset_dirty_bitmap(job->copy_bitmap, start, job->cluster_size); nbytes = MIN(job->cluster_size, job->len - start); if (!*bounce_buffer) { *bounce_buffer = blk_blockalign(blk, job->cluster_size); } - iov.iov_base = *bounce_buffer; - iov.iov_len = nbytes; - qemu_iovec_init_external(&qiov, &iov, 1); - ret = blk_co_preadv(blk, start, qiov.size, &qiov, - is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); + ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags); if (ret < 0) { trace_backup_do_cow_read_fail(job, start, ret); if (error_is_read) { @@ -122,14 +129,8 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, goto fail; } - if (qemu_iovec_is_zero(&qiov)) { - ret = blk_co_pwrite_zeroes(job->target, start, - qiov.size, BDRV_REQ_MAY_UNMAP); - } else { - ret = blk_co_pwritev(job->target, start, - qiov.size, &qiov, - job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0); - } + ret = blk_co_pwrite(job->target, start, nbytes, *bounce_buffer, + job->write_flags); if (ret < 0) { trace_backup_do_cow_write_fail(job, start, ret); if (error_is_read) { @@ -140,12 +141,12 @@ static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, return nbytes; fail: - hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); + bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size); return ret; } -/* Copy range to target and return the bytes copied. If error occured, return a +/* Copy range to target and return the bytes copied. If error occurred, return a * negative error number. */ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job, int64_t start, @@ -156,24 +157,97 @@ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job, int nr_clusters; BlockBackend *blk = job->common.blk; int nbytes; + int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size)); + assert(QEMU_IS_ALIGNED(start, job->cluster_size)); nbytes = MIN(job->copy_range_size, end - start); nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size); - hbitmap_reset(job->copy_bitmap, start / job->cluster_size, - nr_clusters); + bdrv_reset_dirty_bitmap(job->copy_bitmap, start, + job->cluster_size * nr_clusters); ret = blk_co_copy_range(blk, start, job->target, start, nbytes, - is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0); + read_flags, job->write_flags); if (ret < 0) { trace_backup_do_cow_copy_range_fail(job, start, ret); - hbitmap_set(job->copy_bitmap, start / job->cluster_size, - nr_clusters); + bdrv_set_dirty_bitmap(job->copy_bitmap, start, + job->cluster_size * nr_clusters); return ret; } return nbytes; } +/* + * Check if the cluster starting at offset is allocated or not. + * return via pnum the number of contiguous clusters sharing this allocation. + */ +static int backup_is_cluster_allocated(BackupBlockJob *s, int64_t offset, + int64_t *pnum) +{ + BlockDriverState *bs = blk_bs(s->common.blk); + int64_t count, total_count = 0; + int64_t bytes = s->len - offset; + int ret; + + assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); + + while (true) { + ret = bdrv_is_allocated(bs, offset, bytes, &count); + if (ret < 0) { + return ret; + } + + total_count += count; + + if (ret || count == 0) { + /* + * ret: partial segment(s) are considered allocated. + * otherwise: unallocated tail is treated as an entire segment. + */ + *pnum = DIV_ROUND_UP(total_count, s->cluster_size); + return ret; + } + + /* Unallocated segment(s) with uncertain following segment(s) */ + if (total_count >= s->cluster_size) { + *pnum = total_count / s->cluster_size; + return 0; + } + + offset += count; + bytes -= count; + } +} + +/** + * Reset bits in copy_bitmap starting at offset if they represent unallocated + * data in the image. May reset subsequent contiguous bits. + * @return 0 when the cluster at @offset was unallocated, + * 1 otherwise, and -ret on error. + */ +static int64_t backup_bitmap_reset_unallocated(BackupBlockJob *s, + int64_t offset, int64_t *count) +{ + int ret; + int64_t clusters, bytes, estimate; + + ret = backup_is_cluster_allocated(s, offset, &clusters); + if (ret < 0) { + return ret; + } + + bytes = clusters * s->cluster_size; + + if (!ret) { + bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); + estimate = bdrv_get_dirty_count(s->copy_bitmap); + job_progress_set_remaining(&s->common.job, estimate); + } + + *count = bytes; + return ret; +} + static int coroutine_fn backup_do_cow(BackupBlockJob *job, int64_t offset, uint64_t bytes, bool *error_is_read, @@ -183,6 +257,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, int ret = 0; int64_t start, end; /* bytes */ void *bounce_buffer = NULL; + int64_t status_bytes; qemu_co_rwlock_rdlock(&job->flush_rwlock); @@ -195,22 +270,43 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, cow_request_begin(&cow_request, job, start, end); while (start < end) { - if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { + int64_t dirty_end; + + if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) { trace_backup_do_cow_skip(job, start); start += job->cluster_size; continue; /* already copied */ } + dirty_end = bdrv_dirty_bitmap_next_zero(job->copy_bitmap, start, + (end - start)); + if (dirty_end < 0) { + dirty_end = end; + } + + if (job->initializing_bitmap) { + ret = backup_bitmap_reset_unallocated(job, start, &status_bytes); + if (ret == 0) { + trace_backup_do_cow_skip_range(job, start, status_bytes); + start += status_bytes; + continue; + } + /* Clamp to known allocated region */ + dirty_end = MIN(dirty_end, start + status_bytes); + } + trace_backup_do_cow_process(job, start); if (job->use_copy_range) { - ret = backup_cow_with_offload(job, start, end, is_write_notifier); + ret = backup_cow_with_offload(job, start, dirty_end, + is_write_notifier); if (ret < 0) { job->use_copy_range = false; } } if (!job->use_copy_range) { - ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier, + ret = backup_cow_with_bounce_buffer(job, start, dirty_end, + is_write_notifier, error_is_read, &bounce_buffer); } if (ret < 0) { @@ -257,15 +353,29 @@ static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) { BdrvDirtyBitmap *bm; BlockDriverState *bs = blk_bs(job->common.blk); + bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \ + && (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER)); - if (ret < 0) { - /* Merge the successor back into the parent, delete nothing. */ - bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); - assert(bm); - } else { - /* Everything is fine, delete this bitmap and install the backup. */ + if (sync) { + /* + * We succeeded, or we always intended to sync the bitmap. + * Delete this bitmap and install the child. + */ bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL); - assert(bm); + } else { + /* + * We failed, or we never intended to sync the bitmap anyway. + * Merge the successor back into the parent, keeping all data. + */ + bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL); + } + + assert(bm); + + if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) { + /* If we failed and synced, merge in the bits we didn't copy: */ + bdrv_dirty_bitmap_merge_internal(bm, job->copy_bitmap, + NULL, true); } } @@ -288,22 +398,21 @@ static void backup_abort(Job *job) static void backup_clean(Job *job) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); + BlockDriverState *bs = blk_bs(s->common.blk); + + if (s->copy_bitmap) { + bdrv_release_dirty_bitmap(bs, s->copy_bitmap); + s->copy_bitmap = NULL; + } + assert(s->target); blk_unref(s->target); s->target = NULL; } -static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context) -{ - BackupBlockJob *s = container_of(job, BackupBlockJob, common); - - blk_set_aio_context(s->target, aio_context); -} - void backup_do_checkpoint(BlockJob *job, Error **errp) { BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); - int64_t len; assert(block_job_driver(job) == &backup_job_driver); @@ -313,39 +422,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp) return; } - len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size); - hbitmap_set(backup_job->copy_bitmap, 0, len); -} - -void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, - uint64_t bytes) -{ - BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); - int64_t start, end; - - assert(block_job_driver(job) == &backup_job_driver); - - start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); - end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); - wait_for_overlapping_requests(backup_job, start, end); -} - -void backup_cow_request_begin(CowRequest *req, BlockJob *job, - int64_t offset, uint64_t bytes) -{ - BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); - int64_t start, end; - - assert(block_job_driver(job) == &backup_job_driver); - - start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size); - end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size); - cow_request_begin(req, backup_job, start, end); -} - -void backup_cow_request_end(CowRequest *req) -{ - cow_request_end(req); + bdrv_set_dirty_bitmap(backup_job->copy_bitmap, 0, backup_job->len); } static void backup_drain(BlockJob *job) @@ -375,18 +452,6 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job, } } -typedef struct { - int ret; -} BackupCompleteData; - -static void backup_complete(Job *job, void *opaque) -{ - BackupCompleteData *data = opaque; - - job_completed(job, data->ret, NULL); - g_free(data); -} - static bool coroutine_fn yield_and_check(BackupBlockJob *job) { uint64_t delay_ns; @@ -408,180 +473,113 @@ static bool coroutine_fn yield_and_check(BackupBlockJob *job) return false; } -static int coroutine_fn backup_run_incremental(BackupBlockJob *job) +static int coroutine_fn backup_loop(BackupBlockJob *job) { - int ret; bool error_is_read; - int64_t cluster; - HBitmapIter hbi; + int64_t offset; + BdrvDirtyBitmapIter *bdbi; + int ret = 0; - hbitmap_iter_init(&hbi, job->copy_bitmap, 0); - while ((cluster = hbitmap_iter_next(&hbi, true)) != -1) { + bdbi = bdrv_dirty_iter_new(job->copy_bitmap); + while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) { do { if (yield_and_check(job)) { - return 0; + goto out; } - ret = backup_do_cow(job, cluster * job->cluster_size, + ret = backup_do_cow(job, offset, job->cluster_size, &error_is_read, false); if (ret < 0 && backup_error_action(job, error_is_read, -ret) == BLOCK_ERROR_ACTION_REPORT) { - return ret; + goto out; } } while (ret < 0); } - return 0; + out: + bdrv_dirty_iter_free(bdbi); + return ret; } -/* init copy_bitmap from sync_bitmap */ -static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) +static void backup_init_copy_bitmap(BackupBlockJob *job) { - BdrvDirtyBitmapIter *dbi; - int64_t offset; - int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), - job->cluster_size); - - dbi = bdrv_dirty_iter_new(job->sync_bitmap); - while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { - int64_t cluster = offset / job->cluster_size; - int64_t next_cluster; - - offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); - if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { - hbitmap_set(job->copy_bitmap, cluster, end - cluster); - break; - } - - offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); - if (offset == -1) { - hbitmap_set(job->copy_bitmap, cluster, end - cluster); - break; - } - - next_cluster = DIV_ROUND_UP(offset, job->cluster_size); - hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); - if (next_cluster >= end) { - break; + bool ret; + uint64_t estimate; + + if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) { + ret = bdrv_dirty_bitmap_merge_internal(job->copy_bitmap, + job->sync_bitmap, + NULL, true); + assert(ret); + } else { + if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { + /* + * We can't hog the coroutine to initialize this thoroughly. + * Set a flag and resume work when we are able to yield safely. + */ + job->initializing_bitmap = true; } - - bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); + bdrv_set_dirty_bitmap(job->copy_bitmap, 0, job->len); } - /* TODO job_progress_set_remaining() would make more sense */ - job_progress_update(&job->common.job, - job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size); - - bdrv_dirty_iter_free(dbi); + estimate = bdrv_get_dirty_count(job->copy_bitmap); + job_progress_set_remaining(&job->common.job, estimate); } -static void coroutine_fn backup_run(void *opaque) +static int coroutine_fn backup_run(Job *job, Error **errp) { - BackupBlockJob *job = opaque; - BackupCompleteData *data; - BlockDriverState *bs = blk_bs(job->common.blk); - int64_t offset, nb_clusters; + BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); + BlockDriverState *bs = blk_bs(s->common.blk); int ret = 0; - QLIST_INIT(&job->inflight_reqs); - qemu_co_rwlock_init(&job->flush_rwlock); + QLIST_INIT(&s->inflight_reqs); + qemu_co_rwlock_init(&s->flush_rwlock); - nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size); - job_progress_set_remaining(&job->common.job, job->len); + backup_init_copy_bitmap(s); - job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); - if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { - backup_incremental_init_copy_bitmap(job); - } else { - hbitmap_set(job->copy_bitmap, 0, nb_clusters); - } + s->before_write.notify = backup_before_write_notify; + bdrv_add_before_write_notifier(bs, &s->before_write); + if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { + int64_t offset = 0; + int64_t count; - job->before_write.notify = backup_before_write_notify; - bdrv_add_before_write_notifier(bs, &job->before_write); + for (offset = 0; offset < s->len; ) { + if (yield_and_check(s)) { + ret = -ECANCELED; + goto out; + } + + ret = backup_bitmap_reset_unallocated(s, offset, &count); + if (ret < 0) { + goto out; + } + + offset += count; + } + s->initializing_bitmap = false; + } - if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { + if (s->sync_mode == MIRROR_SYNC_MODE_NONE) { /* All bits are set in copy_bitmap to allow any cluster to be copied. * This does not actually require them to be copied. */ - while (!job_is_cancelled(&job->common.job)) { + while (!job_is_cancelled(job)) { /* Yield until the job is cancelled. We just let our before_write * notify callback service CoW requests. */ - job_yield(&job->common.job); + job_yield(job); } - } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { - ret = backup_run_incremental(job); } else { - /* Both FULL and TOP SYNC_MODE's require copying.. */ - for (offset = 0; offset < job->len; - offset += job->cluster_size) { - bool error_is_read; - int alloced = 0; - - if (yield_and_check(job)) { - break; - } - - if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { - int i; - int64_t n; - - /* Check to see if these blocks are already in the - * backing file. */ - - for (i = 0; i < job->cluster_size;) { - /* bdrv_is_allocated() only returns true/false based - * on the first set of sectors it comes across that - * are are all in the same state. - * For that reason we must verify each sector in the - * backup cluster length. We end up copying more than - * needed but at some point that is always the case. */ - alloced = - bdrv_is_allocated(bs, offset + i, - job->cluster_size - i, &n); - i += n; - - if (alloced || n == 0) { - break; - } - } - - /* If the above loop never found any sectors that are in - * the topmost image, skip this backup. */ - if (alloced == 0) { - continue; - } - } - /* FULL sync mode we copy the whole drive. */ - if (alloced < 0) { - ret = alloced; - } else { - ret = backup_do_cow(job, offset, job->cluster_size, - &error_is_read, false); - } - if (ret < 0) { - /* Depending on error action, fail now or retry cluster */ - BlockErrorAction action = - backup_error_action(job, error_is_read, -ret); - if (action == BLOCK_ERROR_ACTION_REPORT) { - break; - } else { - offset -= job->cluster_size; - continue; - } - } - } + ret = backup_loop(s); } - notifier_with_return_remove(&job->before_write); + out: + notifier_with_return_remove(&s->before_write); /* wait until pending backup_do_cow() calls have completed */ - qemu_co_rwlock_wrlock(&job->flush_rwlock); - qemu_co_rwlock_unlock(&job->flush_rwlock); - hbitmap_free(job->copy_bitmap); + qemu_co_rwlock_wrlock(&s->flush_rwlock); + qemu_co_rwlock_unlock(&s->flush_rwlock); - data = g_malloc(sizeof(*data)); - data->ret = ret; - job_defer_to_main_loop(&job->common.job, backup_complete, data); + return ret; } static const BlockJobDriver backup_job_driver = { @@ -591,18 +589,54 @@ static const BlockJobDriver backup_job_driver = { .free = block_job_free, .user_resume = block_job_user_resume, .drain = block_job_drain, - .start = backup_run, + .run = backup_run, .commit = backup_commit, .abort = backup_abort, .clean = backup_clean, }, - .attached_aio_context = backup_attached_aio_context, .drain = backup_drain, }; +static int64_t backup_calculate_cluster_size(BlockDriverState *target, + Error **errp) +{ + int ret; + BlockDriverInfo bdi; + + /* + * If there is no backing file on the target, we cannot rely on COW if our + * backup cluster size is smaller than the target cluster size. Even for + * targets with a backing file, try to avoid COW if possible. + */ + ret = bdrv_get_info(target, &bdi); + if (ret == -ENOTSUP && !target->backing) { + /* Cluster size is not defined */ + warn_report("The target block device doesn't provide " + "information about the block size and it doesn't have a " + "backing file. The default block size of %u bytes is " + "used. If the actual block size of the target exceeds " + "this default, the backup may be unusable", + BACKUP_CLUSTER_SIZE_DEFAULT); + return BACKUP_CLUSTER_SIZE_DEFAULT; + } else if (ret < 0 && !target->backing) { + error_setg_errno(errp, -ret, + "Couldn't determine the cluster size of the target image, " + "which has no backing file"); + error_append_hint(errp, + "Aborting, since this may create an unusable destination image\n"); + return ret; + } else if (ret < 0 && target->backing) { + /* Not fatal; just trudge on ahead. */ + return BACKUP_CLUSTER_SIZE_DEFAULT; + } + + return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); +} + BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, BlockDriverState *target, int64_t speed, MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, + BitmapSyncMode bitmap_mode, bool compress, BlockdevOnError on_source_error, BlockdevOnError on_target_error, @@ -611,13 +645,18 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, JobTxn *txn, Error **errp) { int64_t len; - BlockDriverInfo bdi; BackupBlockJob *job = NULL; int ret; + int64_t cluster_size; + BdrvDirtyBitmap *copy_bitmap = NULL; assert(bs); assert(target); + /* QMP interface protects us from these cases */ + assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL); + assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP); + if (bs == target) { error_setg(errp, "Source and target cannot be the same"); return NULL; @@ -649,10 +688,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, return NULL; } - if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { - if (!sync_bitmap) { - error_setg(errp, "must provide a valid bitmap name for " - "\"incremental\" sync mode"); + if (sync_bitmap) { + /* If we need to write to this bitmap, check that we can: */ + if (bitmap_mode != BITMAP_SYNC_MODE_NEVER && + bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) { return NULL; } @@ -660,12 +699,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) { return NULL; } - } else if (sync_bitmap) { - error_setg(errp, - "a sync_bitmap was provided to backup_run, " - "but received an incompatible sync_mode (%s)", - MirrorSyncMode_str(sync_mode)); - return NULL; } len = bdrv_getlength(bs); @@ -675,6 +708,17 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, goto error; } + cluster_size = backup_calculate_cluster_size(target, errp); + if (cluster_size < 0) { + goto error; + } + + copy_bitmap = bdrv_create_dirty_bitmap(bs, cluster_size, NULL, errp); + if (!copy_bitmap) { + goto error; + } + bdrv_disable_dirty_bitmap(copy_bitmap); + /* job->len is fixed, so we can't allow resize */ job = block_job_create(job_id, &backup_job_driver, txn, bs, BLK_PERM_CONSISTENT_READ, @@ -686,48 +730,35 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, } /* The target must match the source in size, so no resize here either */ - job->target = blk_new(BLK_PERM_WRITE, + job->target = blk_new(job->common.job.aio_context, + BLK_PERM_WRITE, BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD); ret = blk_insert_bs(job->target, target, errp); if (ret < 0) { goto error; } + blk_set_disable_request_queuing(job->target, true); job->on_source_error = on_source_error; job->on_target_error = on_target_error; job->sync_mode = sync_mode; - job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ? - sync_bitmap : NULL; - job->compress = compress; + job->sync_bitmap = sync_bitmap; + job->bitmap_mode = bitmap_mode; - /* If there is no backing file on the target, we cannot rely on COW if our - * backup cluster size is smaller than the target cluster size. Even for - * targets with a backing file, try to avoid COW if possible. */ - ret = bdrv_get_info(target, &bdi); - if (ret == -ENOTSUP && !target->backing) { - /* Cluster size is not defined */ - warn_report("The target block device doesn't provide " - "information about the block size and it doesn't have a " - "backing file. The default block size of %u bytes is " - "used. If the actual block size of the target exceeds " - "this default, the backup may be unusable", - BACKUP_CLUSTER_SIZE_DEFAULT); - job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; - } else if (ret < 0 && !target->backing) { - error_setg_errno(errp, -ret, - "Couldn't determine the cluster size of the target image, " - "which has no backing file"); - error_append_hint(errp, - "Aborting, since this may create an unusable destination image\n"); - goto error; - } else if (ret < 0 && target->backing) { - /* Not fatal; just trudge on ahead. */ - job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT; - } else { - job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); - } - job->use_copy_range = true; + /* + * Set write flags: + * 1. Detect image-fleecing (and similar) schemes + * 2. Handle compression + */ + job->write_flags = + (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) | + (compress ? BDRV_REQ_WRITE_COMPRESSED : 0); + + job->cluster_size = cluster_size; + job->copy_bitmap = copy_bitmap; + copy_bitmap = NULL; + job->use_copy_range = !compress; /* compression isn't supported for it */ job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk), blk_get_max_transfer(job->target)); job->copy_range_size = MAX(job->cluster_size, @@ -742,6 +773,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, return &job->common; error: + if (copy_bitmap) { + assert(!job || !job->copy_bitmap); + bdrv_release_dirty_bitmap(bs, copy_bitmap); + } if (sync_bitmap) { bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL); }