g_free(op);
if (s->waiting_for_io) {
- qemu_coroutine_enter(s->common.co, NULL);
+ qemu_coroutine_enter(s->common.co);
}
}
}
/* Submit async read while handling COW.
- * Returns: nb_sectors if no alignment is necessary, or
+ * Returns: The number of sectors copied after and including sector_num,
+ * excluding any sectors copied prior to sector_num due to alignment.
+ * This will be nb_sectors if no alignment is necessary, or
* (new_end - sector_num) if tail is rounded up or down due to
* alignment or buffer limit.
*/
{
BlockBackend *source = s->common.blk;
int sectors_per_chunk, nb_chunks;
- int ret = nb_sectors;
+ int ret;
MirrorOp *op;
+ int max_sectors;
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
+ max_sectors = sectors_per_chunk * s->max_iov;
/* We can only handle as much as buf_size at a time. */
nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
+ nb_sectors = MIN(max_sectors, nb_sectors);
assert(nb_sectors);
+ ret = nb_sectors;
if (s->cow_bitmap) {
ret += mirror_cow_align(s, §or_num, &nb_sectors);
first_chunk = sector_num / sectors_per_chunk;
while (test_bit(first_chunk, s->in_flight_bitmap)) {
- trace_mirror_yield_in_flight(s, first_chunk, s->in_flight);
+ trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
mirror_wait_for_io(s);
}
+ block_job_pause_point(&s->common);
+
/* Find the number of consective dirty chunks following the first dirty
* one, and wait for in flight requests in them. */
while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
assert(io_sectors);
sector_num += io_sectors;
nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
- delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors);
+ if (s->common.speed) {
+ delay_ns = ratelimit_calculate_delay(&s->limit, io_sectors);
+ }
}
return delay_ns;
}
if (now - last_pause_ns > SLICE_TIME) {
last_pause_ns = now;
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
+ } else {
+ block_job_pause_point(&s->common);
}
if (block_job_is_cancelled(&s->common)) {
goto immediate_exit;
}
+ block_job_pause_point(&s->common);
+
cnt = bdrv_get_dirty_count(s->dirty_bitmap);
/* s->common.offset contains the number of bytes already processed so
* far, cnt is the number of dirty sectors remaining and
target = blk_bs(s->target);
if (!s->synced) {
- error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id);
+ error_setg(errp, "The active block job '%s' cannot be completed",
+ job->id);
return;
}
}
}
- /* check the target bs is not blocked and block all operations on it */
+ /* block all operations on to_replace bs */
if (s->replaces) {
AioContext *replace_aio_context;
block_job_enter(&s->common);
}
+/* There is no matching mirror_resume() because mirror_run() will begin
+ * iterating again when the job is resumed.
+ */
+static void coroutine_fn mirror_pause(BlockJob *job)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+
+ mirror_drain(s);
+}
+
+static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
+{
+ MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
+
+ blk_set_aio_context(s->target, new_context);
+}
+
static const BlockJobDriver mirror_job_driver = {
- .instance_size = sizeof(MirrorBlockJob),
- .job_type = BLOCK_JOB_TYPE_MIRROR,
- .set_speed = mirror_set_speed,
- .complete = mirror_complete,
+ .instance_size = sizeof(MirrorBlockJob),
+ .job_type = BLOCK_JOB_TYPE_MIRROR,
+ .set_speed = mirror_set_speed,
+ .complete = mirror_complete,
+ .pause = mirror_pause,
+ .attached_aio_context = mirror_attached_aio_context,
};
static const BlockJobDriver commit_active_job_driver = {
- .instance_size = sizeof(MirrorBlockJob),
- .job_type = BLOCK_JOB_TYPE_COMMIT,
- .set_speed = mirror_set_speed,
- .complete = mirror_complete,
+ .instance_size = sizeof(MirrorBlockJob),
+ .job_type = BLOCK_JOB_TYPE_COMMIT,
+ .set_speed = mirror_set_speed,
+ .complete = mirror_complete,
+ .pause = mirror_pause,
+ .attached_aio_context = mirror_attached_aio_context,
};
-static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
- const char *replaces,
+static void mirror_start_job(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *target, const char *replaces,
int64_t speed, uint32_t granularity,
int64_t buf_size,
BlockMirrorBackingMode backing_mode,
buf_size = DEFAULT_MIRROR_BUF_SIZE;
}
- s = block_job_create(driver, bs, speed, cb, opaque, errp);
+ s = block_job_create(job_id, driver, bs, speed, cb, opaque, errp);
if (!s) {
return;
}
bdrv_op_block_all(target, s->common.blocker);
- s->common.co = qemu_coroutine_create(mirror_run);
+ s->common.co = qemu_coroutine_create(mirror_run, s);
trace_mirror_start(bs, s, s->common.co, opaque);
- qemu_coroutine_enter(s->common.co, s);
+ qemu_coroutine_enter(s->common.co);
}
-void mirror_start(BlockDriverState *bs, BlockDriverState *target,
- const char *replaces,
+void mirror_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *target, const char *replaces,
int64_t speed, uint32_t granularity, int64_t buf_size,
MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
BlockdevOnError on_source_error,
}
is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
- mirror_start_job(bs, target, replaces,
+ mirror_start_job(job_id, bs, target, replaces,
speed, granularity, buf_size, backing_mode,
on_source_error, on_target_error, unmap, cb, opaque, errp,
&mirror_job_driver, is_none_mode, base);
}
-void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
- int64_t speed,
+void commit_active_start(const char *job_id, BlockDriverState *bs,
+ BlockDriverState *base, int64_t speed,
BlockdevOnError on_error,
BlockCompletionFunc *cb,
void *opaque, Error **errp)
}
}
- mirror_start_job(bs, base, NULL, speed, 0, 0, MIRROR_LEAVE_BACKING_CHAIN,
+ mirror_start_job(job_id, bs, base, NULL, speed, 0, 0,
+ MIRROR_LEAVE_BACKING_CHAIN,
on_error, on_error, false, cb, opaque, &local_err,
&commit_active_job_driver, false, base);
if (local_err) {