+/* Transactional group of block jobs */
+struct BlockJobTxn {
+
+ /* Is this txn being cancelled? */
+ bool aborting;
+
+ /* List of jobs */
+ QLIST_HEAD(, BlockJob) jobs;
+
+ /* Reference count */
+ int refcnt;
+};
+
+static QLIST_HEAD(, BlockJob) block_jobs = QLIST_HEAD_INITIALIZER(block_jobs);
+
+BlockJob *block_job_next(BlockJob *job)
+{
+ if (!job) {
+ return QLIST_FIRST(&block_jobs);
+ }
+ return QLIST_NEXT(job, job_list);
+}
+
+BlockJob *block_job_get(const char *id)
+{
+ BlockJob *job;
+
+ QLIST_FOREACH(job, &block_jobs, job_list) {
+ if (!strcmp(id, job->id)) {
+ return job;
+ }
+ }
+
+ return NULL;
+}
+
+/* Normally the job runs in its BlockBackend's AioContext. The exception is
+ * block_job_defer_to_main_loop() where it runs in the QEMU main loop. Code
+ * that supports both cases uses this helper function.
+ */
+static AioContext *block_job_get_aio_context(BlockJob *job)
+{
+ return job->deferred_to_main_loop ?
+ qemu_get_aio_context() :
+ blk_get_aio_context(job->blk);
+}
+
+static void block_job_attached_aio_context(AioContext *new_context,
+ void *opaque)
+{
+ BlockJob *job = opaque;
+
+ if (job->driver->attached_aio_context) {
+ job->driver->attached_aio_context(job, new_context);
+ }
+
+ block_job_resume(job);
+}
+
+static void block_job_detach_aio_context(void *opaque)
+{
+ BlockJob *job = opaque;
+
+ /* In case the job terminates during aio_poll()... */
+ block_job_ref(job);
+
+ block_job_pause(job);
+
+ if (!job->paused) {
+ /* If job is !job->busy this kicks it into the next pause point. */
+ block_job_enter(job);
+ }
+ while (!job->paused && !job->completed) {
+ aio_poll(block_job_get_aio_context(job), true);
+ }
+
+ block_job_unref(job);
+}
+
+void *block_job_create(const char *job_id, const BlockJobDriver *driver,
+ BlockDriverState *bs, int64_t speed,
+ BlockCompletionFunc *cb, void *opaque, Error **errp)