return blk_name(child->opaque);
}
+/*
+ * Notifies the user of the BlockBackend that migration has completed. qdev
+ * devices can tighten their permissions in response (specifically revoke
+ * shared write permissions that we needed for storage migration).
+ *
+ * If an error is returned, the VM cannot be allowed to be resumed.
+ */
+static void blk_root_activate(BdrvChild *child, Error **errp)
+{
+ BlockBackend *blk = child->opaque;
+ Error *local_err = NULL;
+
+ if (!blk->disable_perm) {
+ return;
+ }
+
+ blk->disable_perm = false;
+
+ blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ blk->disable_perm = true;
+ return;
+ }
+}
+
+static int blk_root_inactivate(BdrvChild *child)
+{
+ BlockBackend *blk = child->opaque;
+
+ if (blk->disable_perm) {
+ return 0;
+ }
+
+ /* Only inactivate BlockBackends for guest devices (which are inactive at
+ * this point because the VM is stopped) and unattached monitor-owned
+ * BlockBackends. If there is still any other user like a block job, then
+ * we simply can't inactivate the image. */
+ if (!blk->dev && !blk_name(blk)[0]) {
+ return -EPERM;
+ }
+
+ blk->disable_perm = true;
+ if (blk->root) {
+ bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
+ }
+
+ return 0;
+}
+
static const BdrvChildRole child_root = {
.inherit_options = blk_root_inherit_options,
.drained_begin = blk_root_drained_begin,
.drained_end = blk_root_drained_end,
+
+ .activate = blk_root_activate,
+ .inactivate = blk_root_inactivate,
};
/*
blk->shared_perm = shared_perm;
blk_set_enable_write_cache(blk, true);
+ qemu_co_mutex_init(&blk->public.throttled_reqs_lock);
qemu_co_queue_init(&blk->public.throttled_reqs[0]);
qemu_co_queue_init(&blk->public.throttled_reqs[1]);
+ block_acct_init(&blk->stats);
notifier_list_init(&blk->remove_bs_notifiers);
notifier_list_init(&blk->insert_bs_notifiers);
assert(!blk->refcnt);
assert(!blk->name);
assert(!blk->dev);
+ if (blk->public.throttle_state) {
+ blk_io_limits_disable(blk);
+ }
if (blk->root) {
blk_remove_bs(blk);
}
* Return @blk's name, a non-null string.
* Returns an empty string iff @blk is not referenced by the monitor.
*/
-const char *blk_name(BlockBackend *blk)
+const char *blk_name(const BlockBackend *blk)
{
return blk->name ?: "";
}
*shared_perm = blk->shared_perm;
}
-/*
- * Notifies the user of all BlockBackends that migration has completed. qdev
- * devices can tighten their permissions in response (specifically revoke
- * shared write permissions that we needed for storage migration).
- *
- * If an error is returned, the VM cannot be allowed to be resumed.
- */
-void blk_resume_after_migration(Error **errp)
-{
- BlockBackend *blk;
- Error *local_err = NULL;
-
- for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
- if (!blk->disable_perm) {
- continue;
- }
-
- blk->disable_perm = false;
-
- blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- blk->disable_perm = true;
- return;
- }
- }
-}
-
static int blk_do_attach_dev(BlockBackend *blk, void *dev)
{
if (blk->dev) {
co_entry(&rwco);
} else {
Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
- qemu_coroutine_enter(co);
+ bdrv_coroutine_enter(blk_bs(blk), co);
BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
}
}
int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int count, BdrvRequestFlags flags)
+ int bytes, BdrvRequestFlags flags)
{
- return blk_prw(blk, offset, NULL, count, blk_write_entry,
+ return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
flags | BDRV_REQ_ZERO_WRITE);
}
acb->has_returned = false;
co = qemu_coroutine_create(co_entry, acb);
- qemu_coroutine_enter(co);
+ bdrv_coroutine_enter(blk_bs(blk), co);
acb->has_returned = true;
if (acb->rwco.ret != NOT_DONE) {
}
BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
- int64_t offset, int count,
+ int64_t offset, int bytes,
BlockCompletionFunc *cb, void *opaque)
{
- return blk_aio_prwv(blk, offset, count, NULL, blk_aio_pdiscard_entry, 0,
+ return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
cb, opaque);
}
return blk_aio_prwv(blk, req, 0, &qiov, blk_aio_ioctl_entry, 0, cb, opaque);
}
-int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int count)
+int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
- int ret = blk_check_byte_request(blk, offset, count);
+ int ret = blk_check_byte_request(blk, offset, bytes);
if (ret < 0) {
return ret;
}
- return bdrv_co_pdiscard(blk_bs(blk), offset, count);
+ return bdrv_co_pdiscard(blk_bs(blk), offset, bytes);
}
int blk_co_flush(BlockBackend *blk)
}
int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
- int count, BdrvRequestFlags flags)
+ int bytes, BdrvRequestFlags flags)
{
- return blk_co_pwritev(blk, offset, count, NULL,
+ return blk_co_pwritev(blk, offset, bytes, NULL,
flags | BDRV_REQ_ZERO_WRITE);
}
BDRV_REQ_WRITE_COMPRESSED);
}
-int blk_truncate(BlockBackend *blk, int64_t offset)
+int blk_truncate(BlockBackend *blk, int64_t offset, Error **errp)
{
if (!blk_is_available(blk)) {
+ error_setg(errp, "No medium inserted");
return -ENOMEDIUM;
}
- return bdrv_truncate(blk->root, offset);
+ return bdrv_truncate(blk->root, offset, errp);
}
static void blk_pdiscard_entry(void *opaque)
rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, rwco->qiov->size);
}
-int blk_pdiscard(BlockBackend *blk, int64_t offset, int count)
+int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
{
- return blk_prw(blk, offset, NULL, count, blk_pdiscard_entry, 0);
+ return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
}
int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
/* Note that blk->root may not be accessible here yet if we are just
* attaching to a BlockDriverState that is drained. Use child instead. */
- if (blk->public.io_limits_disabled++ == 0) {
+ if (atomic_fetch_inc(&blk->public.io_limits_disabled) == 0) {
throttle_group_restart_blk(blk);
}
}
assert(blk->quiesce_counter);
assert(blk->public.io_limits_disabled);
- --blk->public.io_limits_disabled;
+ atomic_dec(&blk->public.io_limits_disabled);
if (--blk->quiesce_counter == 0) {
if (blk->dev_ops && blk->dev_ops->drained_end) {