2 * QEMU System Emulator block driver
4 * Copyright (c) 2011 IBM Corp.
5 * Copyright (c) 2012 Red Hat, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "qemu-common.h"
28 #include "block/block.h"
29 #include "block/blockjob_int.h"
30 #include "block/block_int.h"
31 #include "block/trace.h"
32 #include "sysemu/block-backend.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-events-block-core.h"
35 #include "qapi/qmp/qerror.h"
36 #include "qemu/coroutine.h"
37 #include "qemu/timer.h"
39 /* Right now, this mutex is only needed to synchronize accesses to job->busy
40 * and job->sleep_timer, such as concurrent calls to block_job_do_yield and
42 static QemuMutex block_job_mutex;
44 static void block_job_lock(void)
46 qemu_mutex_lock(&block_job_mutex);
49 static void block_job_unlock(void)
51 qemu_mutex_unlock(&block_job_mutex);
54 static void __attribute__((__constructor__)) block_job_init(void)
56 qemu_mutex_init(&block_job_mutex);
59 static void block_job_event_cancelled(BlockJob *job);
60 static void block_job_event_completed(BlockJob *job, const char *msg);
61 static int block_job_event_pending(BlockJob *job);
62 static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job));
64 /* Transactional group of block jobs */
67 /* Is this txn being cancelled? */
71 QLIST_HEAD(, BlockJob) jobs;
78 * The block job API is composed of two categories of functions.
80 * The first includes functions used by the monitor. The monitor is
81 * peculiar in that it accesses the block job list with block_job_get, and
82 * therefore needs consistency across block_job_get and the actual operation
83 * (e.g. block_job_set_speed). The consistency is achieved with
84 * aio_context_acquire/release. These functions are declared in blockjob.h.
86 * The second includes functions used by the block job drivers and sometimes
87 * by the core block layer. These do not care about locking, because the
88 * whole coroutine runs under the AioContext lock, and are declared in
92 static bool is_block_job(Job *job)
94 return job_type(job) == JOB_TYPE_BACKUP ||
95 job_type(job) == JOB_TYPE_COMMIT ||
96 job_type(job) == JOB_TYPE_MIRROR ||
97 job_type(job) == JOB_TYPE_STREAM;
100 BlockJob *block_job_next(BlockJob *bjob)
102 Job *job = bjob ? &bjob->job : NULL;
106 } while (job && !is_block_job(job));
108 return job ? container_of(job, BlockJob, job) : NULL;
111 BlockJob *block_job_get(const char *id)
113 Job *job = job_get(id);
115 if (job && is_block_job(job)) {
116 return container_of(job, BlockJob, job);
122 BlockJobTxn *block_job_txn_new(void)
124 BlockJobTxn *txn = g_new0(BlockJobTxn, 1);
125 QLIST_INIT(&txn->jobs);
130 static void block_job_txn_ref(BlockJobTxn *txn)
135 void block_job_txn_unref(BlockJobTxn *txn)
137 if (txn && --txn->refcnt == 0) {
142 void block_job_txn_add_job(BlockJobTxn *txn, BlockJob *job)
151 QLIST_INSERT_HEAD(&txn->jobs, job, txn_list);
152 block_job_txn_ref(txn);
155 static void block_job_txn_del_job(BlockJob *job)
158 QLIST_REMOVE(job, txn_list);
159 block_job_txn_unref(job->txn);
164 /* Assumes the block_job_mutex is held */
165 static bool block_job_timer_pending(BlockJob *job)
167 return timer_pending(&job->sleep_timer);
170 /* Assumes the block_job_mutex is held */
171 static bool block_job_timer_not_pending(BlockJob *job)
173 return !block_job_timer_pending(job);
176 static void block_job_pause(BlockJob *job)
181 static void block_job_resume(BlockJob *job)
183 assert(job->pause_count > 0);
185 if (job->pause_count) {
189 /* kick only if no timer is pending */
190 block_job_enter_cond(job, block_job_timer_not_pending);
193 void block_job_ref(BlockJob *job)
198 static void block_job_attached_aio_context(AioContext *new_context,
200 static void block_job_detach_aio_context(void *opaque);
202 void block_job_unref(BlockJob *job)
204 if (--job->refcnt == 0) {
205 assert(job->job.status == JOB_STATUS_NULL);
207 BlockDriverState *bs = blk_bs(job->blk);
209 block_job_remove_all_bdrv(job);
210 blk_remove_aio_context_notifier(job->blk,
211 block_job_attached_aio_context,
212 block_job_detach_aio_context, job);
214 error_free(job->blocker);
215 assert(!timer_pending(&job->sleep_timer));
216 job_delete(&job->job);
220 static void block_job_attached_aio_context(AioContext *new_context,
223 BlockJob *job = opaque;
225 if (job->driver->attached_aio_context) {
226 job->driver->attached_aio_context(job, new_context);
229 block_job_resume(job);
232 static void block_job_drain(BlockJob *job)
234 /* If job is !job->busy this kicks it into the next pause point. */
235 block_job_enter(job);
238 if (job->driver->drain) {
239 job->driver->drain(job);
243 static void block_job_detach_aio_context(void *opaque)
245 BlockJob *job = opaque;
247 /* In case the job terminates during aio_poll()... */
250 block_job_pause(job);
252 while (!job->paused && !job->completed) {
253 block_job_drain(job);
256 block_job_unref(job);
259 static char *child_job_get_parent_desc(BdrvChild *c)
261 BlockJob *job = c->opaque;
262 return g_strdup_printf("%s job '%s'", job_type_str(&job->job), job->job.id);
265 static void child_job_drained_begin(BdrvChild *c)
267 BlockJob *job = c->opaque;
268 block_job_pause(job);
271 static void child_job_drained_end(BdrvChild *c)
273 BlockJob *job = c->opaque;
274 block_job_resume(job);
277 static const BdrvChildRole child_job = {
278 .get_parent_desc = child_job_get_parent_desc,
279 .drained_begin = child_job_drained_begin,
280 .drained_end = child_job_drained_end,
281 .stay_at_node = true,
284 void block_job_remove_all_bdrv(BlockJob *job)
287 for (l = job->nodes; l; l = l->next) {
288 BdrvChild *c = l->data;
289 bdrv_op_unblock_all(c->bs, job->blocker);
290 bdrv_root_unref_child(c);
292 g_slist_free(job->nodes);
296 int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
297 uint64_t perm, uint64_t shared_perm, Error **errp)
301 c = bdrv_root_attach_child(bs, name, &child_job, perm, shared_perm,
307 job->nodes = g_slist_prepend(job->nodes, c);
309 bdrv_op_block_all(bs, job->blocker);
314 bool block_job_is_internal(BlockJob *job)
316 return (job->job.id == NULL);
319 static bool block_job_started(BlockJob *job)
324 const BlockJobDriver *block_job_driver(BlockJob *job)
330 * All jobs must allow a pause point before entering their job proper. This
331 * ensures that jobs can be paused prior to being started, then resumed later.
333 static void coroutine_fn block_job_co_entry(void *opaque)
335 BlockJob *job = opaque;
337 assert(job && job->driver && job->driver->start);
338 block_job_pause_point(job);
339 job->driver->start(job);
342 static void block_job_sleep_timer_cb(void *opaque)
344 BlockJob *job = opaque;
346 block_job_enter(job);
349 void block_job_start(BlockJob *job)
351 assert(job && !block_job_started(job) && job->paused &&
352 job->driver && job->driver->start);
353 job->co = qemu_coroutine_create(block_job_co_entry, job);
357 job_state_transition(&job->job, JOB_STATUS_RUNNING);
358 bdrv_coroutine_enter(blk_bs(job->blk), job->co);
361 static void block_job_decommission(BlockJob *job)
364 job->completed = true;
367 job->deferred_to_main_loop = true;
368 block_job_txn_del_job(job);
369 job_state_transition(&job->job, JOB_STATUS_NULL);
370 block_job_unref(job);
373 static void block_job_do_dismiss(BlockJob *job)
375 block_job_decommission(job);
378 static void block_job_conclude(BlockJob *job)
380 job_state_transition(&job->job, JOB_STATUS_CONCLUDED);
381 if (job->auto_dismiss || !block_job_started(job)) {
382 block_job_do_dismiss(job);
386 static void block_job_update_rc(BlockJob *job)
388 if (!job->ret && block_job_is_cancelled(job)) {
389 job->ret = -ECANCELED;
392 job_state_transition(&job->job, JOB_STATUS_ABORTING);
396 static int block_job_prepare(BlockJob *job)
398 if (job->ret == 0 && job->driver->prepare) {
399 job->ret = job->driver->prepare(job);
404 static void block_job_commit(BlockJob *job)
407 if (job->driver->commit) {
408 job->driver->commit(job);
412 static void block_job_abort(BlockJob *job)
415 if (job->driver->abort) {
416 job->driver->abort(job);
420 static void block_job_clean(BlockJob *job)
422 if (job->driver->clean) {
423 job->driver->clean(job);
427 static int block_job_finalize_single(BlockJob *job)
429 assert(job->completed);
431 /* Ensure abort is called for late-transactional failures */
432 block_job_update_rc(job);
435 block_job_commit(job);
437 block_job_abort(job);
439 block_job_clean(job);
442 job->cb(job->opaque, job->ret);
445 /* Emit events only if we actually started */
446 if (block_job_started(job)) {
447 if (block_job_is_cancelled(job)) {
448 block_job_event_cancelled(job);
450 const char *msg = NULL;
452 msg = strerror(-job->ret);
454 block_job_event_completed(job, msg);
458 block_job_txn_del_job(job);
459 block_job_conclude(job);
463 static void block_job_cancel_async(BlockJob *job, bool force)
465 if (job->iostatus != BLOCK_DEVICE_IO_STATUS_OK) {
466 block_job_iostatus_reset(job);
468 if (job->user_paused) {
469 /* Do not call block_job_enter here, the caller will handle it. */
470 job->user_paused = false;
473 job->cancelled = true;
474 /* To prevent 'force == false' overriding a previous 'force == true' */
478 static int block_job_txn_apply(BlockJobTxn *txn, int fn(BlockJob *), bool lock)
481 BlockJob *job, *next;
484 QLIST_FOREACH_SAFE(job, &txn->jobs, txn_list, next) {
486 ctx = blk_get_aio_context(job->blk);
487 aio_context_acquire(ctx);
491 aio_context_release(ctx);
500 static int block_job_finish_sync(BlockJob *job,
501 void (*finish)(BlockJob *, Error **errp),
504 Error *local_err = NULL;
507 assert(blk_bs(job->blk)->job == job);
512 finish(job, &local_err);
515 error_propagate(errp, local_err);
516 block_job_unref(job);
519 /* block_job_drain calls block_job_enter, and it should be enough to
520 * induce progress until the job completes or moves to the main thread.
522 while (!job->deferred_to_main_loop && !job->completed) {
523 block_job_drain(job);
525 while (!job->completed) {
526 aio_poll(qemu_get_aio_context(), true);
528 ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
529 block_job_unref(job);
533 static void block_job_completed_txn_abort(BlockJob *job)
536 BlockJobTxn *txn = job->txn;
541 * We are cancelled by another job, which will handle everything.
545 txn->aborting = true;
546 block_job_txn_ref(txn);
548 /* We are the first failed job. Cancel other jobs. */
549 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
550 ctx = blk_get_aio_context(other_job->blk);
551 aio_context_acquire(ctx);
554 /* Other jobs are effectively cancelled by us, set the status for
555 * them; this job, however, may or may not be cancelled, depending
556 * on the caller, so leave it. */
557 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
558 if (other_job != job) {
559 block_job_cancel_async(other_job, false);
562 while (!QLIST_EMPTY(&txn->jobs)) {
563 other_job = QLIST_FIRST(&txn->jobs);
564 ctx = blk_get_aio_context(other_job->blk);
565 if (!other_job->completed) {
566 assert(other_job->cancelled);
567 block_job_finish_sync(other_job, NULL, NULL);
569 block_job_finalize_single(other_job);
570 aio_context_release(ctx);
573 block_job_txn_unref(txn);
576 static int block_job_needs_finalize(BlockJob *job)
578 return !job->auto_finalize;
581 static void block_job_do_finalize(BlockJob *job)
584 assert(job && job->txn);
586 /* prepare the transaction to complete */
587 rc = block_job_txn_apply(job->txn, block_job_prepare, true);
589 block_job_completed_txn_abort(job);
591 block_job_txn_apply(job->txn, block_job_finalize_single, true);
595 static void block_job_completed_txn_success(BlockJob *job)
597 BlockJobTxn *txn = job->txn;
600 job_state_transition(&job->job, JOB_STATUS_WAITING);
603 * Successful completion, see if there are other running jobs in this
606 QLIST_FOREACH(other_job, &txn->jobs, txn_list) {
607 if (!other_job->completed) {
610 assert(other_job->ret == 0);
613 block_job_txn_apply(txn, block_job_event_pending, false);
615 /* If no jobs need manual finalization, automatically do so */
616 if (block_job_txn_apply(txn, block_job_needs_finalize, false) == 0) {
617 block_job_do_finalize(job);
621 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
623 int64_t old_speed = job->speed;
625 if (job_apply_verb(&job->job, JOB_VERB_SET_SPEED, errp)) {
629 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
633 ratelimit_set_speed(&job->limit, speed, BLOCK_JOB_SLICE_TIME);
636 if (speed && speed <= old_speed) {
640 /* kick only if a timer is pending */
641 block_job_enter_cond(job, block_job_timer_pending);
644 int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n)
650 return ratelimit_calculate_delay(&job->limit, n);
653 void block_job_complete(BlockJob *job, Error **errp)
655 /* Should not be reachable via external interface for internal jobs */
657 if (job_apply_verb(&job->job, JOB_VERB_COMPLETE, errp)) {
660 if (job->pause_count || job->cancelled || !job->driver->complete) {
661 error_setg(errp, "The active block job '%s' cannot be completed",
666 job->driver->complete(job, errp);
669 void block_job_finalize(BlockJob *job, Error **errp)
671 assert(job && job->job.id);
672 if (job_apply_verb(&job->job, JOB_VERB_FINALIZE, errp)) {
675 block_job_do_finalize(job);
678 void block_job_dismiss(BlockJob **jobptr, Error **errp)
680 BlockJob *job = *jobptr;
681 /* similarly to _complete, this is QMP-interface only. */
683 if (job_apply_verb(&job->job, JOB_VERB_DISMISS, errp)) {
687 block_job_do_dismiss(job);
691 void block_job_user_pause(BlockJob *job, Error **errp)
693 if (job_apply_verb(&job->job, JOB_VERB_PAUSE, errp)) {
696 if (job->user_paused) {
697 error_setg(errp, "Job is already paused");
700 job->user_paused = true;
701 block_job_pause(job);
704 bool block_job_user_paused(BlockJob *job)
706 return job->user_paused;
709 void block_job_user_resume(BlockJob *job, Error **errp)
712 if (!job->user_paused || job->pause_count <= 0) {
713 error_setg(errp, "Can't resume a job that was not paused");
716 if (job_apply_verb(&job->job, JOB_VERB_RESUME, errp)) {
719 block_job_iostatus_reset(job);
720 job->user_paused = false;
721 block_job_resume(job);
724 void block_job_cancel(BlockJob *job, bool force)
726 if (job->job.status == JOB_STATUS_CONCLUDED) {
727 block_job_do_dismiss(job);
730 block_job_cancel_async(job, force);
731 if (!block_job_started(job)) {
732 block_job_completed(job, -ECANCELED);
733 } else if (job->deferred_to_main_loop) {
734 block_job_completed_txn_abort(job);
736 block_job_enter(job);
740 void block_job_user_cancel(BlockJob *job, bool force, Error **errp)
742 if (job_apply_verb(&job->job, JOB_VERB_CANCEL, errp)) {
745 block_job_cancel(job, force);
748 /* A wrapper around block_job_cancel() taking an Error ** parameter so it may be
749 * used with block_job_finish_sync() without the need for (rather nasty)
750 * function pointer casts there. */
751 static void block_job_cancel_err(BlockJob *job, Error **errp)
753 block_job_cancel(job, false);
756 int block_job_cancel_sync(BlockJob *job)
758 return block_job_finish_sync(job, &block_job_cancel_err, NULL);
761 void block_job_cancel_sync_all(void)
764 AioContext *aio_context;
766 while ((job = block_job_next(NULL))) {
767 aio_context = blk_get_aio_context(job->blk);
768 aio_context_acquire(aio_context);
769 block_job_cancel_sync(job);
770 aio_context_release(aio_context);
774 int block_job_complete_sync(BlockJob *job, Error **errp)
776 return block_job_finish_sync(job, &block_job_complete, errp);
779 void block_job_progress_update(BlockJob *job, uint64_t done)
784 void block_job_progress_set_remaining(BlockJob *job, uint64_t remaining)
786 job->len = job->offset + remaining;
789 BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
793 if (block_job_is_internal(job)) {
794 error_setg(errp, "Cannot query QEMU internal jobs");
797 info = g_new0(BlockJobInfo, 1);
798 info->type = g_strdup(job_type_str(&job->job));
799 info->device = g_strdup(job->job.id);
800 info->len = job->len;
801 info->busy = atomic_read(&job->busy);
802 info->paused = job->pause_count > 0;
803 info->offset = job->offset;
804 info->speed = job->speed;
805 info->io_status = job->iostatus;
806 info->ready = job->ready;
807 info->status = job->job.status;
808 info->auto_finalize = job->auto_finalize;
809 info->auto_dismiss = job->auto_dismiss;
810 info->has_error = job->ret != 0;
811 info->error = job->ret ? g_strdup(strerror(-job->ret)) : NULL;
815 static void block_job_iostatus_set_err(BlockJob *job, int error)
817 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
818 job->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
819 BLOCK_DEVICE_IO_STATUS_FAILED;
823 static void block_job_event_cancelled(BlockJob *job)
825 if (block_job_is_internal(job)) {
829 qapi_event_send_block_job_cancelled(job_type(&job->job),
837 static void block_job_event_completed(BlockJob *job, const char *msg)
839 if (block_job_is_internal(job)) {
843 qapi_event_send_block_job_completed(job_type(&job->job),
853 static int block_job_event_pending(BlockJob *job)
855 job_state_transition(&job->job, JOB_STATUS_PENDING);
856 if (!job->auto_finalize && !block_job_is_internal(job)) {
857 qapi_event_send_block_job_pending(job_type(&job->job),
865 * API for block job drivers and the block layer. These functions are
866 * declared in blockjob_int.h.
869 void *block_job_create(const char *job_id, const BlockJobDriver *driver,
870 BlockJobTxn *txn, BlockDriverState *bs, uint64_t perm,
871 uint64_t shared_perm, int64_t speed, int flags,
872 BlockCompletionFunc *cb, void *opaque, Error **errp)
879 error_setg(errp, QERR_DEVICE_IN_USE, bdrv_get_device_name(bs));
883 if (job_id == NULL && !(flags & BLOCK_JOB_INTERNAL)) {
884 job_id = bdrv_get_device_name(bs);
886 error_setg(errp, "An explicit job ID is required for this node");
892 if (flags & BLOCK_JOB_INTERNAL) {
893 error_setg(errp, "Cannot specify job ID for internal block job");
898 blk = blk_new(perm, shared_perm);
899 ret = blk_insert_bs(blk, bs, errp);
905 job = job_create(job_id, &driver->job_driver, errp);
911 assert(is_block_job(&job->job));
913 job->driver = driver;
916 job->opaque = opaque;
919 job->pause_count = 1;
921 job->auto_finalize = !(flags & BLOCK_JOB_MANUAL_FINALIZE);
922 job->auto_dismiss = !(flags & BLOCK_JOB_MANUAL_DISMISS);
923 aio_timer_init(qemu_get_aio_context(), &job->sleep_timer,
924 QEMU_CLOCK_REALTIME, SCALE_NS,
925 block_job_sleep_timer_cb, job);
927 error_setg(&job->blocker, "block device is in use by block job: %s",
928 job_type_str(&job->job));
929 block_job_add_bdrv(job, "main node", bs, 0, BLK_PERM_ALL, &error_abort);
932 bdrv_op_unblock(bs, BLOCK_OP_TYPE_DATAPLANE, job->blocker);
934 blk_add_aio_context_notifier(blk, block_job_attached_aio_context,
935 block_job_detach_aio_context, job);
937 /* Only set speed when necessary to avoid NotSupported error */
939 Error *local_err = NULL;
941 block_job_set_speed(job, speed, &local_err);
943 block_job_early_fail(job);
944 error_propagate(errp, local_err);
949 /* Single jobs are modeled as single-job transactions for sake of
950 * consolidating the job management logic */
952 txn = block_job_txn_new();
953 block_job_txn_add_job(txn, job);
954 block_job_txn_unref(txn);
956 block_job_txn_add_job(txn, job);
962 void block_job_early_fail(BlockJob *job)
964 assert(job->job.status == JOB_STATUS_CREATED);
965 block_job_decommission(job);
968 void block_job_completed(BlockJob *job, int ret)
970 assert(job && job->txn && !job->completed);
971 assert(blk_bs(job->blk)->job == job);
972 job->completed = true;
974 block_job_update_rc(job);
975 trace_block_job_completed(job, ret, job->ret);
977 block_job_completed_txn_abort(job);
979 block_job_completed_txn_success(job);
983 static bool block_job_should_pause(BlockJob *job)
985 return job->pause_count > 0;
988 /* Yield, and schedule a timer to reenter the coroutine after @ns nanoseconds.
989 * Reentering the job coroutine with block_job_enter() before the timer has
990 * expired is allowed and cancels the timer.
992 * If @ns is (uint64_t) -1, no timer is scheduled and block_job_enter() must be
993 * called explicitly. */
994 static void block_job_do_yield(BlockJob *job, uint64_t ns)
998 timer_mod(&job->sleep_timer, ns);
1002 qemu_coroutine_yield();
1004 /* Set by block_job_enter before re-entering the coroutine. */
1008 void coroutine_fn block_job_pause_point(BlockJob *job)
1010 assert(job && block_job_started(job));
1012 if (!block_job_should_pause(job)) {
1015 if (block_job_is_cancelled(job)) {
1019 if (job->driver->pause) {
1020 job->driver->pause(job);
1023 if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
1024 JobStatus status = job->job.status;
1025 job_state_transition(&job->job, status == JOB_STATUS_READY
1026 ? JOB_STATUS_STANDBY
1027 : JOB_STATUS_PAUSED);
1029 block_job_do_yield(job, -1);
1030 job->paused = false;
1031 job_state_transition(&job->job, status);
1034 if (job->driver->resume) {
1035 job->driver->resume(job);
1040 * Conditionally enter a block_job pending a call to fn() while
1041 * under the block_job_lock critical section.
1043 static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job))
1045 if (!block_job_started(job)) {
1048 if (job->deferred_to_main_loop) {
1058 if (fn && !fn(job)) {
1063 assert(!job->deferred_to_main_loop);
1064 timer_del(&job->sleep_timer);
1067 aio_co_wake(job->co);
1070 void block_job_enter(BlockJob *job)
1072 block_job_enter_cond(job, NULL);
1075 bool block_job_is_cancelled(BlockJob *job)
1077 return job->cancelled;
1080 void block_job_sleep_ns(BlockJob *job, int64_t ns)
1084 /* Check cancellation *before* setting busy = false, too! */
1085 if (block_job_is_cancelled(job)) {
1089 if (!block_job_should_pause(job)) {
1090 block_job_do_yield(job, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + ns);
1093 block_job_pause_point(job);
1096 void block_job_yield(BlockJob *job)
1100 /* Check cancellation *before* setting busy = false, too! */
1101 if (block_job_is_cancelled(job)) {
1105 if (!block_job_should_pause(job)) {
1106 block_job_do_yield(job, -1);
1109 block_job_pause_point(job);
1112 void block_job_iostatus_reset(BlockJob *job)
1114 if (job->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1117 assert(job->user_paused && job->pause_count > 0);
1118 job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1121 void block_job_event_ready(BlockJob *job)
1123 job_state_transition(&job->job, JOB_STATUS_READY);
1126 if (block_job_is_internal(job)) {
1130 qapi_event_send_block_job_ready(job_type(&job->job),
1134 job->speed, &error_abort);
1137 BlockErrorAction block_job_error_action(BlockJob *job, BlockdevOnError on_err,
1138 int is_read, int error)
1140 BlockErrorAction action;
1143 case BLOCKDEV_ON_ERROR_ENOSPC:
1144 case BLOCKDEV_ON_ERROR_AUTO:
1145 action = (error == ENOSPC) ?
1146 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1148 case BLOCKDEV_ON_ERROR_STOP:
1149 action = BLOCK_ERROR_ACTION_STOP;
1151 case BLOCKDEV_ON_ERROR_REPORT:
1152 action = BLOCK_ERROR_ACTION_REPORT;
1154 case BLOCKDEV_ON_ERROR_IGNORE:
1155 action = BLOCK_ERROR_ACTION_IGNORE;
1160 if (!block_job_is_internal(job)) {
1161 qapi_event_send_block_job_error(job->job.id,
1162 is_read ? IO_OPERATION_TYPE_READ :
1163 IO_OPERATION_TYPE_WRITE,
1164 action, &error_abort);
1166 if (action == BLOCK_ERROR_ACTION_STOP) {
1167 block_job_pause(job);
1168 /* make the pause user visible, which will be resumed from QMP. */
1169 job->user_paused = true;
1170 block_job_iostatus_set_err(job, error);
1177 AioContext *aio_context;
1178 BlockJobDeferToMainLoopFn *fn;
1180 } BlockJobDeferToMainLoopData;
1182 static void block_job_defer_to_main_loop_bh(void *opaque)
1184 BlockJobDeferToMainLoopData *data = opaque;
1185 AioContext *aio_context;
1187 /* Prevent race with block_job_defer_to_main_loop() */
1188 aio_context_acquire(data->aio_context);
1190 /* Fetch BDS AioContext again, in case it has changed */
1191 aio_context = blk_get_aio_context(data->job->blk);
1192 if (aio_context != data->aio_context) {
1193 aio_context_acquire(aio_context);
1196 data->fn(data->job, data->opaque);
1198 if (aio_context != data->aio_context) {
1199 aio_context_release(aio_context);
1202 aio_context_release(data->aio_context);
1207 void block_job_defer_to_main_loop(BlockJob *job,
1208 BlockJobDeferToMainLoopFn *fn,
1211 BlockJobDeferToMainLoopData *data = g_malloc(sizeof(*data));
1213 data->aio_context = blk_get_aio_context(job->blk);
1215 data->opaque = opaque;
1216 job->deferred_to_main_loop = true;
1218 aio_bh_schedule_oneshot(qemu_get_aio_context(),
1219 block_job_defer_to_main_loop_bh, data);