#include "qemu/error-report.h"
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
-#define SLICE_TIME 100000000ULL /* ns */
typedef struct BackupBlockJob {
BlockJob common;
/* bitmap for sync=incremental */
BdrvDirtyBitmap *sync_bitmap;
MirrorSyncMode sync_mode;
- RateLimit limit;
BlockdevOnError on_source_error;
BlockdevOnError on_target_error;
CoRwlock flush_rwlock;
+ uint64_t len;
uint64_t bytes_read;
int64_t cluster_size;
bool compress;
HBitmap *copy_bitmap;
} BackupBlockJob;
+static const BlockJobDriver backup_job_driver;
+
/* See if in-flight requests overlap and wait for them to complete */
static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
int64_t start,
trace_backup_do_cow_process(job, start);
- n = MIN(job->cluster_size, job->common.len - start);
+ n = MIN(job->cluster_size, job->len - start);
if (!bounce_buffer) {
bounce_buffer = blk_blockalign(blk, job->cluster_size);
* offset field is an opaque progress value, it is not a disk offset.
*/
job->bytes_read += n;
- job->common.offset += n;
+ job_progress_update(&job->common.job, n);
}
out:
return backup_do_cow(job, req->offset, req->bytes, NULL, true);
}
-static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
-{
- BackupBlockJob *s = container_of(job, BackupBlockJob, common);
-
- if (speed < 0) {
- error_setg(errp, QERR_INVALID_PARAMETER, "speed");
- return;
- }
- ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
-}
-
static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
{
BdrvDirtyBitmap *bm;
BlockDriverState *bs = blk_bs(job->common.blk);
- if (ret < 0 || block_job_is_cancelled(&job->common)) {
+ if (ret < 0) {
/* Merge the successor back into the parent, delete nothing. */
bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
assert(bm);
}
}
-static void backup_commit(BlockJob *job)
+static void backup_commit(Job *job)
{
- BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
if (s->sync_bitmap) {
backup_cleanup_sync_bitmap(s, 0);
}
}
-static void backup_abort(BlockJob *job)
+static void backup_abort(Job *job)
{
- BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
if (s->sync_bitmap) {
backup_cleanup_sync_bitmap(s, -1);
}
}
-static void backup_clean(BlockJob *job)
+static void backup_clean(Job *job)
{
- BackupBlockJob *s = container_of(job, BackupBlockJob, common);
+ BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
assert(s->target);
blk_unref(s->target);
s->target = NULL;
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t len;
- assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+ assert(block_job_driver(job) == &backup_job_driver);
if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
error_setg(errp, "The backup job only supports block checkpoint in"
return;
}
- len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
+ len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size);
hbitmap_set(backup_job->copy_bitmap, 0, len);
}
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t start, end;
- assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+ assert(block_job_driver(job) == &backup_job_driver);
start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
int64_t start, end;
- assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+ assert(block_job_driver(job) == &backup_job_driver);
start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
int ret;
} BackupCompleteData;
-static void backup_complete(BlockJob *job, void *opaque)
+static void backup_complete(Job *job, void *opaque)
{
BackupCompleteData *data = opaque;
- block_job_completed(job, data->ret);
+ job_completed(job, data->ret, NULL);
g_free(data);
}
static bool coroutine_fn yield_and_check(BackupBlockJob *job)
{
- if (block_job_is_cancelled(&job->common)) {
+ uint64_t delay_ns;
+
+ if (job_is_cancelled(&job->common.job)) {
return true;
}
- /* we need to yield so that bdrv_drain_all() returns.
- * (without, VM does not reboot)
- */
- if (job->common.speed) {
- uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
- job->bytes_read);
- job->bytes_read = 0;
- block_job_sleep_ns(&job->common, delay_ns);
- } else {
- block_job_sleep_ns(&job->common, 0);
- }
+ /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
+ * return. Without a yield, the VM would not reboot. */
+ delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
+ job->bytes_read = 0;
+ job_sleep_ns(&job->common.job, delay_ns);
- if (block_job_is_cancelled(&job->common)) {
+ if (job_is_cancelled(&job->common.job)) {
return true;
}
bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size);
}
- job->common.offset = job->common.len -
- hbitmap_count(job->copy_bitmap) * job->cluster_size;
+ /* TODO job_progress_set_remaining() would make more sense */
+ job_progress_update(&job->common.job,
+ job->len - hbitmap_count(job->copy_bitmap) * job->cluster_size);
bdrv_dirty_iter_free(dbi);
}
QLIST_INIT(&job->inflight_reqs);
qemu_co_rwlock_init(&job->flush_rwlock);
- nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size);
+ nb_clusters = DIV_ROUND_UP(job->len, job->cluster_size);
+ job_progress_set_remaining(&job->common.job, job->len);
+
job->copy_bitmap = hbitmap_alloc(nb_clusters, 0);
if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
backup_incremental_init_copy_bitmap(job);
if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
/* All bits are set in copy_bitmap to allow any cluster to be copied.
* This does not actually require them to be copied. */
- while (!block_job_is_cancelled(&job->common)) {
+ while (!job_is_cancelled(&job->common.job)) {
/* Yield until the job is cancelled. We just let our before_write
* notify callback service CoW requests. */
- block_job_yield(&job->common);
+ job_yield(&job->common.job);
}
} else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
ret = backup_run_incremental(job);
} else {
/* Both FULL and TOP SYNC_MODE's require copying.. */
- for (offset = 0; offset < job->common.len;
+ for (offset = 0; offset < job->len;
offset += job->cluster_size) {
bool error_is_read;
int alloced = 0;
data = g_malloc(sizeof(*data));
data->ret = ret;
- block_job_defer_to_main_loop(&job->common, backup_complete, data);
+ job_defer_to_main_loop(&job->common.job, backup_complete, data);
}
static const BlockJobDriver backup_job_driver = {
- .instance_size = sizeof(BackupBlockJob),
- .job_type = BLOCK_JOB_TYPE_BACKUP,
- .start = backup_run,
- .set_speed = backup_set_speed,
- .commit = backup_commit,
- .abort = backup_abort,
- .clean = backup_clean,
+ .job_driver = {
+ .instance_size = sizeof(BackupBlockJob),
+ .job_type = JOB_TYPE_BACKUP,
+ .free = block_job_free,
+ .user_resume = block_job_user_resume,
+ .drain = block_job_drain,
+ .start = backup_run,
+ .commit = backup_commit,
+ .abort = backup_abort,
+ .clean = backup_clean,
+ },
.attached_aio_context = backup_attached_aio_context,
.drain = backup_drain,
};
BlockdevOnError on_target_error,
int creation_flags,
BlockCompletionFunc *cb, void *opaque,
- BlockJobTxn *txn, Error **errp)
+ JobTxn *txn, Error **errp)
{
int64_t len;
BlockDriverInfo bdi;
goto error;
}
- /* job->common.len is fixed, so we can't allow resize */
- job = block_job_create(job_id, &backup_job_driver, bs,
+ /* job->len is fixed, so we can't allow resize */
+ job = block_job_create(job_id, &backup_job_driver, txn, bs,
BLK_PERM_CONSISTENT_READ,
BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
/* Required permissions are already taken with target's blk_new() */
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort);
- job->common.len = len;
- block_job_txn_add_job(txn, &job->common);
+ job->len = len;
return &job->common;
bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
}
if (job) {
- backup_clean(&job->common);
- block_job_early_fail(&job->common);
+ backup_clean(&job->common.job);
+ job_early_fail(&job->common.job);
}
return NULL;