char *backing_file_str;
} CommitBlockJob;
-static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
- int64_t offset, uint64_t bytes,
- void *buf)
-{
- int ret = 0;
- QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
-
- assert(bytes < SIZE_MAX);
-
- ret = blk_co_preadv(bs, offset, qiov.size, &qiov, 0);
- if (ret < 0) {
- return ret;
- }
-
- ret = blk_co_pwritev(base, offset, qiov.size, &qiov, 0);
- if (ret < 0) {
- return ret;
- }
-
- return 0;
-}
-
static int commit_prepare(Job *job)
{
CommitBlockJob *s = container_of(job, CommitBlockJob, common.job);
* XXX Can (or should) we somehow keep 'consistent read' blocked even
* after the failed/cancelled commit job is gone? If we already wrote
* something to base, the intermediate images aren't valid any more. */
- bdrv_child_try_set_perm(s->commit_top_bs->backing, 0, BLK_PERM_ALL,
- &error_abort);
bdrv_replace_node(s->commit_top_bs, backing_bs(s->commit_top_bs),
&error_abort);
int ret = 0;
int64_t n = 0; /* bytes */
void *buf = NULL;
- int bytes_written = 0;
int64_t len, base_len;
ret = len = blk_getlength(s->top);
}
if (base_len < len) {
- ret = blk_truncate(s->base, len, PREALLOC_MODE_OFF, NULL);
+ ret = blk_truncate(s->base, len, false, PREALLOC_MODE_OFF, NULL);
if (ret) {
goto out;
}
for (offset = 0; offset < len; offset += n) {
bool copy;
+ bool error_in_source = true;
/* Note that even when no rate limit is applied we need to yield
* with no pending I/O here so that bdrv_drain_all() returns.
break;
}
/* Copy if allocated above the base */
- ret = bdrv_is_allocated_above(blk_bs(s->top), blk_bs(s->base),
+ ret = bdrv_is_allocated_above(blk_bs(s->top), blk_bs(s->base), false,
offset, COMMIT_BUFFER_SIZE, &n);
copy = (ret == 1);
trace_commit_one_iteration(s, offset, n, ret);
if (copy) {
- ret = commit_populate(s->top, s->base, offset, n, buf);
- bytes_written += n;
+ assert(n < SIZE_MAX);
+
+ ret = blk_co_pread(s->top, offset, n, buf, 0);
+ if (ret >= 0) {
+ ret = blk_co_pwrite(s->base, offset, n, buf, 0);
+ if (ret < 0) {
+ error_in_source = false;
+ }
+ }
}
if (ret < 0) {
BlockErrorAction action =
- block_job_error_action(&s->common, false, s->on_error, -ret);
+ block_job_error_action(&s->common, s->on_error,
+ error_in_source, -ret);
if (action == BLOCK_ERROR_ACTION_REPORT) {
goto out;
} else {
.job_type = JOB_TYPE_COMMIT,
.free = block_job_free,
.user_resume = block_job_user_resume,
- .drain = block_job_drain,
.run = commit_run,
.prepare = commit_prepare,
.abort = commit_abort,
if (!filter_node_name) {
commit_top_bs->implicit = true;
}
+
+ /* So that we can always drop this node */
+ commit_top_bs->never_freeze = true;
+
commit_top_bs->total_sectors = top->total_sectors;
- bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(top));
- bdrv_set_backing_hd(commit_top_bs, top, &local_err);
- if (local_err) {
- bdrv_unref(commit_top_bs);
- commit_top_bs = NULL;
- error_propagate(errp, local_err);
- goto fail;
- }
- bdrv_replace_node(top, commit_top_bs, &local_err);
+ bdrv_append(commit_top_bs, top, &local_err);
if (local_err) {
- bdrv_unref(commit_top_bs);
commit_top_bs = NULL;
error_propagate(errp, local_err);
goto fail;
}
s->commit_top_bs = commit_top_bs;
- bdrv_unref(commit_top_bs);
/* Block all nodes between top and base, because they will
* disappear from the chain after this operation. */
goto fail;
}
- s->base = blk_new(BLK_PERM_CONSISTENT_READ
+ s->base = blk_new(s->common.job.aio_context,
+ BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
| BLK_PERM_RESIZE,
BLK_PERM_CONSISTENT_READ
if (ret < 0) {
goto fail;
}
+ blk_set_disable_request_queuing(s->base, true);
s->base_bs = base;
/* Required permissions are already taken with block_job_add_bdrv() */
- s->top = blk_new(0, BLK_PERM_ALL);
+ s->top = blk_new(s->common.job.aio_context, 0, BLK_PERM_ALL);
ret = blk_insert_bs(s->top, top, errp);
if (ret < 0) {
goto fail;
}
+ blk_set_disable_request_queuing(s->top, true);
s->backing_file_str = g_strdup(backing_file_str);
s->on_error = on_error;
if (s->top) {
blk_unref(s->top);
}
+ if (s->base_read_only) {
+ bdrv_reopen_set_read_only(base, true, NULL);
+ }
job_early_fail(&s->common.job);
/* commit_top_bs has to be replaced after deleting the block job,
* otherwise this would fail because of lack of permissions. */
BlockDriverState *backing_file_bs = NULL;
BlockDriverState *commit_top_bs = NULL;
BlockDriver *drv = bs->drv;
+ AioContext *ctx;
int64_t offset, length, backing_length;
int ro;
int64_t n;
}
}
- src = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
- backing = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
+ ctx = bdrv_get_aio_context(bs);
+ src = blk_new(ctx, BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
+ backing = blk_new(ctx, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
ret = blk_insert_bs(src, bs, &local_err);
if (ret < 0) {
error_report_err(local_err);
goto ro_cleanup;
}
- bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(backing_file_bs));
bdrv_set_backing_hd(commit_top_bs, backing_file_bs, &error_abort);
bdrv_set_backing_hd(bs, commit_top_bs, &error_abort);
* grow the backing file image if possible. If not possible,
* we must return an error */
if (length > backing_length) {
- ret = blk_truncate(backing, length, PREALLOC_MODE_OFF, &local_err);
+ ret = blk_truncate(backing, length, false, PREALLOC_MODE_OFF,
+ &local_err);
if (ret < 0) {
error_report_err(local_err);
goto ro_cleanup;