return blk_get_aio_context(blk_acb->blk);
}
-static void blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
- bool update_root_node)
+static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ bool update_root_node, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
+ int ret;
if (bs) {
+ if (update_root_node) {
+ ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
+ errp);
+ if (ret < 0) {
+ return ret;
+ }
+ }
if (tgm->throttle_state) {
bdrv_drained_begin(bs);
throttle_group_detach_aio_context(tgm);
throttle_group_attach_aio_context(tgm, new_context);
bdrv_drained_end(bs);
}
- if (update_root_node) {
- GSList *ignore = g_slist_prepend(NULL, blk->root);
- bdrv_set_aio_context_ignore(bs, new_context, &ignore);
- g_slist_free(ignore);
- }
}
+
+ return 0;
}
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ Error **errp)
{
- blk_do_set_aio_context(blk, new_context, true);
+ return blk_do_set_aio_context(blk, new_context, true, errp);
}
static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
GSList **ignore)
{
BlockBackend *blk = child->opaque;
- blk_do_set_aio_context(blk, ctx, false);
+ blk_do_set_aio_context(blk, ctx, false, &error_abort);
}
void blk_add_aio_context_notifier(BlockBackend *blk,
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
unsigned i;
unsigned nvqs = s->conf->num_queues;
+ Error *local_err = NULL;
int r;
if (vblk->dataplane_started || s->starting) {
vblk->dataplane_started = true;
trace_virtio_blk_data_plane_start(s);
- blk_set_aio_context(s->conf->conf.blk, s->ctx);
+ r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err);
+ if (r < 0) {
+ error_report_err(local_err);
+ goto fail_guest_notifiers;
+ }
/* Kick right away to begin processing requests already in vring */
for (i = 0; i < nvqs; i++) {
aio_context_acquire(s->ctx);
aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s);
- /* Drain and switch bs back to the QEMU main loop */
- blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context());
+ /* Drain and try to switch bs back to the QEMU main loop. If other users
+ * keep the BlockBackend in the iothread, that's ok */
+ blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL);
aio_context_release(s->ctx);
}
aio_context_acquire(dataplane->ctx);
- blk_set_aio_context(dataplane->blk, qemu_get_aio_context());
+ /* Xen doesn't have multiple users for nodes, so this can't fail */
+ blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort);
aio_context_release(dataplane->ctx);
xendev = dataplane->xendev;
}
aio_context_acquire(dataplane->ctx);
- blk_set_aio_context(dataplane->blk, dataplane->ctx);
+ /* If other users keep the BlockBackend in the iothread, that's ok */
+ blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL);
aio_context_release(dataplane->ctx);
return;
VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev);
VirtIOSCSI *s = VIRTIO_SCSI(vdev);
SCSIDevice *sd = SCSI_DEVICE(dev);
+ int ret;
if (s->ctx && !s->dataplane_fenced) {
AioContext *ctx;
return;
}
virtio_scsi_acquire(s);
- blk_set_aio_context(sd->conf.blk, s->ctx);
+ ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp);
virtio_scsi_release(s);
-
+ if (ret < 0) {
+ return;
+ }
}
if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) {
if (s->ctx) {
virtio_scsi_acquire(s);
- blk_set_aio_context(sd->conf.blk, qemu_get_aio_context());
+ /* If other users keep the BlockBackend in the iothread, that's ok */
+ blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL);
virtio_scsi_release(s);
}
void blk_op_block_all(BlockBackend *blk, Error *reason);
void blk_op_unblock_all(BlockBackend *blk, Error *reason);
AioContext *blk_get_aio_context(BlockBackend *blk);
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context);
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ Error **errp);
void blk_add_aio_context_notifier(BlockBackend *blk,
void (*attached_aio_context)(AioContext *new_context, void *opaque),
void (*detach_aio_context)(void *opaque), void *opaque);
s = bs->opaque;
blk_insert_bs(blk, bs, &error_abort);
- blk_set_aio_context(blk, ctx_a);
+ blk_set_aio_context(blk, ctx_a, &error_abort);
aio_context_acquire(ctx_a);
s->bh_indirection_ctx = ctx_b;
}
aio_context_acquire(ctx_a);
- blk_set_aio_context(blk, qemu_get_aio_context());
+ blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
aio_context_release(ctx_a);
bdrv_unref(bs);
if (use_iothread) {
iothread = iothread_new();
ctx = iothread_get_aio_context(iothread);
- blk_set_aio_context(blk_src, ctx);
+ blk_set_aio_context(blk_src, ctx, &error_abort);
} else {
ctx = qemu_get_aio_context();
}
g_assert_cmpint(ret, ==, (result == TEST_JOB_SUCCESS ? 0 : -EIO));
if (use_iothread) {
- blk_set_aio_context(blk_src, qemu_get_aio_context());
+ blk_set_aio_context(blk_src, qemu_get_aio_context(), &error_abort);
}
aio_context_release(ctx);
blk_insert_bs(blk, bs, &error_abort);
c = QLIST_FIRST(&bs->parents);
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
aio_context_acquire(ctx);
t->fn(c);
if (t->blkfn) {
t->blkfn(blk);
}
aio_context_release(ctx);
- blk_set_aio_context(blk, qemu_get_aio_context());
+ blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
bdrv_unref(bs);
blk_unref(blk);
aio_poll(qemu_get_aio_context(), false);
}
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
tjob->n = 0;
while (tjob->n == 0) {
}
aio_context_acquire(ctx);
- blk_set_aio_context(blk, qemu_get_aio_context());
+ blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
aio_context_release(ctx);
tjob->n = 0;
aio_poll(qemu_get_aio_context(), false);
}
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
tjob->n = 0;
while (tjob->n == 0) {
aio_context_acquire(ctx);
job_complete_sync(&tjob->common.job, &error_abort);
- blk_set_aio_context(blk, qemu_get_aio_context());
+ blk_set_aio_context(blk, qemu_get_aio_context(), &error_abort);
aio_context_release(ctx);
bdrv_unref(bs);
bs_verify = bdrv_open(NULL, NULL, options, BDRV_O_RDWR, &error_abort);
/* Switch the AioContext */
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
g_assert(blk_get_aio_context(blk) == ctx);
g_assert(bdrv_get_aio_context(bs_a) == ctx);
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
/* Switch the AioContext back */
ctx = qemu_get_aio_context();
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
g_assert(blk_get_aio_context(blk) == ctx);
g_assert(bdrv_get_aio_context(bs_a) == ctx);
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
blk_insert_bs(blk, bs_verify, &error_abort);
/* Switch the AioContext */
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
g_assert(blk_get_aio_context(blk) == ctx);
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
g_assert(bdrv_get_aio_context(bs_a) == ctx);
/* Switch the AioContext back */
ctx = qemu_get_aio_context();
- blk_set_aio_context(blk, ctx);
+ blk_set_aio_context(blk, ctx, &error_abort);
g_assert(blk_get_aio_context(blk) == ctx);
g_assert(bdrv_get_aio_context(bs_verify) == ctx);
g_assert(bdrv_get_aio_context(bs_a) == ctx);
job_cancel_sync_all();
aio_context_acquire(ctx);
- blk_set_aio_context(blk, main_ctx);
+ blk_set_aio_context(blk, main_ctx, &error_abort);
bdrv_try_set_aio_context(target, main_ctx, &error_abort);
aio_context_release(ctx);