char *name;
int refcnt;
BdrvChild *root;
+ AioContext *ctx;
DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
QTAILQ_ENTRY(BlockBackend) link; /* for block_backends */
QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
uint64_t shared_perm;
bool disable_perm;
+ bool allow_aio_context_change;
bool allow_write_beyond_eof;
NotifierList remove_bs_notifiers, insert_bs_notifiers;
static void blk_root_change_media(BdrvChild *child, bool load);
static void blk_root_resize(BdrvChild *child);
+static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore, Error **errp);
+static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore);
+
static char *blk_root_get_parent_desc(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
.attach = blk_root_attach,
.detach = blk_root_detach,
+
+ .can_set_aio_ctx = blk_root_can_set_aio_ctx,
+ .set_aio_ctx = blk_root_set_aio_ctx,
};
/*
*
* Return the new BlockBackend on success, null on failure.
*/
-BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
+BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
{
BlockBackend *blk;
blk = g_new0(BlockBackend, 1);
blk->refcnt = 1;
+ blk->ctx = ctx;
blk->perm = perm;
blk->shared_perm = shared_perm;
blk_set_enable_write_cache(blk, true);
/*
* Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
+ * The new BlockBackend is in the main AioContext.
*
* Just as with bdrv_open(), after having called this function the reference to
* @options belongs to the block layer (even on failure).
perm |= BLK_PERM_RESIZE;
}
- blk = blk_new(perm, BLK_PERM_ALL);
+ blk = blk_new(qemu_get_aio_context(), perm, BLK_PERM_ALL);
bs = bdrv_open(filename, reference, options, flags, errp);
if (!bs) {
blk_unref(blk);
return NULL;
}
- blk->root = bdrv_root_attach_child(bs, "root", &child_root,
+ blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx,
perm, BLK_PERM_ALL, blk, errp);
if (!blk->root) {
- bdrv_unref(bs);
blk_unref(blk);
return NULL;
}
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
{
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
- blk->root = bdrv_root_attach_child(bs, "root", &child_root,
+ bdrv_ref(bs);
+ blk->root = bdrv_root_attach_child(bs, "root", &child_root, blk->ctx,
blk->perm, blk->shared_perm, blk, errp);
if (blk->root == NULL) {
return -EPERM;
}
- bdrv_ref(bs);
notifier_list_notify(&blk->insert_bs_notifiers, blk);
if (tgm->throttle_state) {
blk->allow_write_beyond_eof = allow;
}
+void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
+{
+ blk->allow_aio_context_change = allow;
+}
+
static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
size_t size)
{
}
}
+/* Returns the minimum request alignment, in bytes; guaranteed nonzero */
+uint32_t blk_get_request_alignment(BlockBackend *blk)
+{
+ BlockDriverState *bs = blk_bs(blk);
+ return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
+}
+
/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
uint32_t blk_get_max_transfer(BlockBackend *blk)
{
AioContext *blk_get_aio_context(BlockBackend *blk)
{
- return bdrv_get_aio_context(blk_bs(blk));
+ BlockDriverState *bs = blk_bs(blk);
+
+ if (bs) {
+ AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
+ assert(ctx == blk->ctx);
+ }
+
+ return blk->ctx;
}
static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
return blk_get_aio_context(blk_acb->blk);
}
-void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
+static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ bool update_root_node, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
+ int ret;
if (bs) {
+ if (update_root_node) {
+ ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
+ errp);
+ if (ret < 0) {
+ return ret;
+ }
+ }
if (tgm->throttle_state) {
bdrv_drained_begin(bs);
throttle_group_detach_aio_context(tgm);
throttle_group_attach_aio_context(tgm, new_context);
bdrv_drained_end(bs);
}
- bdrv_set_aio_context(bs, new_context);
}
+
+ blk->ctx = new_context;
+ return 0;
+}
+
+int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
+ Error **errp)
+{
+ return blk_do_set_aio_context(blk, new_context, true, errp);
+}
+
+static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore, Error **errp)
+{
+ BlockBackend *blk = child->opaque;
+
+ if (blk->allow_aio_context_change) {
+ return true;
+ }
+
+ /* Only manually created BlockBackends that are not attached to anything
+ * can change their AioContext without updating their user. */
+ if (!blk->name || blk->dev) {
+ /* TODO Add BB name/QOM path */
+ error_setg(errp, "Cannot change iothread of active block backend");
+ return false;
+ }
+
+ return true;
+}
+
+static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
+ GSList **ignore)
+{
+ BlockBackend *blk = child->opaque;
+ blk_do_set_aio_context(blk, ctx, false, &error_abort);
}
void blk_add_aio_context_notifier(BlockBackend *blk,