+ job->cancelled = true;
+}
+
+static int block_job_finish_sync(BlockJob *job,
+ void (*finish)(BlockJob *, Error **errp),
+ Error **errp)
+{
+ Error *local_err = NULL;
+ int ret;
+
+ assert(blk_bs(job->blk)->job == job);
+
+ block_job_ref(job);
+
+ if (finish) {
+ finish(job, &local_err);
+ }
+ if (local_err) {
+ error_propagate(errp, local_err);
+ block_job_unref(job);
+ return -EBUSY;
+ }
+ /* block_job_drain calls block_job_enter, and it should be enough to
+ * induce progress until the job completes or moves to the main thread.
+ */
+ while (!job->deferred_to_main_loop && !job->completed) {
+ block_job_drain(job);
+ }
+ while (!job->completed) {
+ aio_poll(qemu_get_aio_context(), true);
+ }
+ ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
+ block_job_unref(job);
+ return ret;
+}
+
+static void block_job_completed_txn_abort(BlockJob *job)
+{
+ AioContext *ctx;
+ BlockJobTxn *txn = job->txn;
+ BlockJob *other_job;
+
+ if (txn->aborting) {
+ /*
+ * We are cancelled by another job, which will handle everything.
+ */
+ return;
+ }
+ txn->aborting = true;
+ block_job_txn_ref(txn);
+
+ /* We are the first failed job. Cancel other jobs. */
+ QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
+ ctx = blk_get_aio_context(other_job->blk);
+ aio_context_acquire(ctx);
+ }
+
+ /* Other jobs are effectively cancelled by us, set the status for
+ * them; this job, however, may or may not be cancelled, depending
+ * on the caller, so leave it. */
+ QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
+ if (other_job != job) {
+ block_job_cancel_async(other_job);
+ }
+ }
+ while (!QLIST_EMPTY(&txn->jobs)) {
+ other_job = QLIST_FIRST(&txn->jobs);
+ ctx = blk_get_aio_context(other_job->blk);
+ if (!other_job->completed) {
+ assert(other_job->cancelled);
+ block_job_finish_sync(other_job, NULL, NULL);
+ }
+ block_job_completed_single(other_job);
+ aio_context_release(ctx);
+ }
+
+ block_job_txn_unref(txn);
+}
+
+static void block_job_completed_txn_success(BlockJob *job)
+{
+ AioContext *ctx;
+ BlockJobTxn *txn = job->txn;
+ BlockJob *other_job, *next;
+ /*
+ * Successful completion, see if there are other running jobs in this
+ * txn.
+ */
+ QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
+ if (!other_job->completed) {
+ return;
+ }
+ }
+ /* We are the last completed job, commit the transaction. */
+ QLIST_FOREACH_SAFE(other_job, &txn->jobs, txn_list, next) {
+ ctx = blk_get_aio_context(other_job->blk);
+ aio_context_acquire(ctx);
+ assert(other_job->ret == 0);
+ block_job_completed_single(other_job);
+ aio_context_release(ctx);
+ }
+}
+
+void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
+{
+ Error *local_err = NULL;
+
+ if (!job->driver->set_speed) {
+ error_setg(errp, QERR_UNSUPPORTED);
+ return;
+ }
+ job->driver->set_speed(job, speed, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ job->speed = speed;
+}
+
+void block_job_complete(BlockJob *job, Error **errp)
+{
+ /* Should not be reachable via external interface for internal jobs */
+ assert(job->id);
+ if (job->pause_count || job->cancelled ||
+ !block_job_started(job) || !job->driver->complete) {
+ error_setg(errp, "The active block job '%s' cannot be completed",
+ job->id);
+ return;
+ }
+
+ job->driver->complete(job, errp);
+}
+
+void block_job_user_pause(BlockJob *job)
+{
+ job->user_paused = true;
+ block_job_pause(job);
+}
+
+bool block_job_user_paused(BlockJob *job)
+{
+ return job->user_paused;
+}
+
+void block_job_user_resume(BlockJob *job)
+{
+ if (job && job->user_paused && job->pause_count > 0) {
+ block_job_iostatus_reset(job);
+ job->user_paused = false;
+ block_job_resume(job);
+ }
+}
+
+void block_job_cancel(BlockJob *job)
+{
+ if (block_job_started(job)) {
+ block_job_cancel_async(job);
+ block_job_enter(job);
+ } else {
+ block_job_completed(job, -ECANCELED);
+ }
+}
+
+/* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
+ * used with block_job_finish_sync() without the need for (rather nasty)
+ * function pointer casts there. */
+static void block_job_cancel_err(BlockJob *job, Error **errp)
+{
+ block_job_cancel(job);
+}
+
+int block_job_cancel_sync(BlockJob *job)
+{
+ return block_job_finish_sync(job, &block_job_cancel_err, NULL);
+}
+
+void block_job_cancel_sync_all(void)
+{
+ BlockJob *job;
+ AioContext *aio_context;
+
+ while ((job = QLIST_FIRST(&block_jobs))) {
+ aio_context = blk_get_aio_context(job->blk);
+ aio_context_acquire(aio_context);
+ block_job_cancel_sync(job);
+ aio_context_release(aio_context);
+ }
+}
+
+int block_job_complete_sync(BlockJob *job, Error **errp)
+{
+ return block_job_finish_sync(job, &block_job_complete, errp);
+}
+
+BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
+{
+ BlockJobInfo *info;
+
+ if (block_job_is_internal(job)) {
+ error_setg(errp, "Cannot query QEMU internal jobs");
+ return NULL;
+ }
+ info = g_new0(BlockJobInfo, 1);
+ info->type = g_strdup(BlockJobType_lookup[job->driver->job_type]);
+ info->device = g_strdup(job->id);
+ info->len = job->len;
+ info->busy = job->busy;
+ info->paused = job->pause_count > 0;
+ info->offset = job->offset;
+ info->speed = job->speed;
+ info->io_status = job->iostatus;
+ info->ready = job->ready;
+ return info;
+}
+
+static void block_job_iostatus_set_err(BlockJob *job, int error)
+{
+ if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
+ job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
+ BLOCK_DEVICE_IO_STATUS_FAILED;
+ }
+}
+
+static void block_job_event_cancelled(BlockJob *job)
+{
+ if (block_job_is_internal(job)) {
+ return;
+ }
+
+ qapi_event_send_block_job_cancelled(job->driver->job_type,
+ job->id,
+ job->len,
+ job->offset,
+ job->speed,
+ &error_abort);
+}
+
+static void block_job_event_completed(BlockJob *job, const char *msg)
+{
+ if (block_job_is_internal(job)) {
+ return;
+ }
+
+ qapi_event_send_block_job_completed(job->driver->job_type,
+ job->id,
+ job->len,
+ job->offset,
+ job->speed,
+ !!msg,
+ msg,
+ &error_abort);
+}
+
+/*
+ * API for block job drivers and the block layer. These functions are
+ * declared in blockjob_int.h.
+ */
+
+void *block_job_create(const char *job_id, const BlockJobDriver *driver,
+ BlockDriverState *bs, uint64_t perm,
+ uint64_t shared_perm, int64_t speed, int flags,
+ BlockCompletionFunc *cb, void *opaque, Error **errp)
+{
+ BlockBackend *blk;
+ BlockJob *job;
+ int ret;
+
+ if (bs->job) {
+ error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
+ return NULL;
+ }
+
+ if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
+ job_id = bdrv_get_device_name(bs);
+ if (!*job_id) {
+ error_setg(errp, "An explicit job ID is required for this node");
+ return NULL;
+ }
+ }
+
+ if (job_id) {
+ if (flags & BLOCK_JOB_INTERNAL) {
+ error_setg(errp, "Cannot specify job ID for internal block job");
+ return NULL;
+ }
+
+ if (!id_wellformed(job_id)) {
+ error_setg(errp, "Invalid job ID '%s'", job_id);
+ return NULL;
+ }
+
+ if (block_job_get(job_id)) {
+ error_setg(errp, "Job ID '%s' already in use", job_id);
+ return NULL;
+ }
+ }
+
+ blk = blk_new(perm, shared_perm);
+ ret = blk_insert_bs(blk, bs, errp);
+ if (ret < 0) {
+ blk_unref(blk);
+ return NULL;
+ }
+
+ job = g_malloc0(driver->instance_size);
+ job->driver = driver;
+ job->id = g_strdup(job_id);
+ job->blk = blk;
+ job->cb = cb;
+ job->opaque = opaque;
+ job->busy = false;
+ job->paused = true;
+ job->pause_count = 1;
+ job->refcnt = 1;
+
+ error_setg(&job->blocker, "block device is in use by block job: %s",
+ BlockJobType_lookup[driver->job_type]);
+ block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
+ bs->job = job;
+
+ blk_set_dev_ops(blk, &block_job_dev_ops, job);
+ bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
+
+ QLIST_INSERT_HEAD(&block_jobs, job, job_list);
+
+ blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
+ block_job_detach_aio_context, job);
+
+ /* Only set speed when necessary to avoid NotSupported error */
+ if (speed != 0) {
+ Error *local_err = NULL;
+
+ block_job_set_speed(job, speed, &local_err);
+ if (local_err) {
+ block_job_unref(job);
+ error_propagate(errp, local_err);
+ return NULL;
+ }