#include "qapi-event.h"
#include "qemu/id.h"
#include "trace.h"
+#include "migration/misc.h"
/* Number of coroutines to reserve per attached device model */
#define COROUTINE_POOL_RESERVATION 64
NotifierList remove_bs_notifiers, insert_bs_notifiers;
int quiesce_counter;
+ VMChangeStateEntry *vmsh;
+ bool force_allow_inactivate;
};
typedef struct BlockBackendAIOCB {
static void drive_info_del(DriveInfo *dinfo);
static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
-static char *blk_get_attached_dev_id(BlockBackend *blk);
/* All BlockBackends */
static QTAILQ_HEAD(, BlockBackend) block_backends =
return blk_name(child->opaque);
}
+static void blk_vm_state_changed(void *opaque, int running, RunState state)
+{
+ Error *local_err = NULL;
+ BlockBackend *blk = opaque;
+
+ if (state == RUN_STATE_INMIGRATE) {
+ return;
+ }
+
+ qemu_del_vm_change_state_handler(blk->vmsh);
+ blk->vmsh = NULL;
+ blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ }
+}
+
/*
* Notifies the user of the BlockBackend that migration has completed. qdev
* devices can tighten their permissions in response (specifically revoke
blk->disable_perm = false;
+ blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ blk->disable_perm = true;
+ return;
+ }
+
+ if (runstate_check(RUN_STATE_INMIGRATE)) {
+ /* Activation can happen when migration process is still active, for
+ * example when nbd_server_add is called during non-shared storage
+ * migration. Defer the shared_perm update to migration completion. */
+ if (!blk->vmsh) {
+ blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
+ blk);
+ }
+ return;
+ }
+
blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
if (local_err) {
error_propagate(errp, local_err);
}
}
+void blk_set_force_allow_inactivate(BlockBackend *blk)
+{
+ blk->force_allow_inactivate = true;
+}
+
+static bool blk_can_inactivate(BlockBackend *blk)
+{
+ /* If it is a guest device, inactivate is ok. */
+ if (blk->dev || blk_name(blk)[0]) {
+ return true;
+ }
+
+ /* Inactivating means no more writes to the image can be done,
+ * even if those writes would be changes invisible to the
+ * guest. For block job BBs that satisfy this, we can just allow
+ * it. This is the case for mirror job source, which is required
+ * by libvirt non-shared block migration. */
+ if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
+ return true;
+ }
+
+ return blk->force_allow_inactivate;
+}
+
static int blk_root_inactivate(BdrvChild *child)
{
BlockBackend *blk = child->opaque;
return 0;
}
- /* Only inactivate BlockBackends for guest devices (which are inactive at
- * this point because the VM is stopped) and unattached monitor-owned
- * BlockBackends. If there is still any other user like a block job, then
- * we simply can't inactivate the image. */
- if (!blk->dev && !blk_name(blk)[0]) {
+ if (!blk_can_inactivate(blk)) {
return -EPERM;
}
blk->shared_perm = shared_perm;
blk_set_enable_write_cache(blk, true);
- qemu_co_mutex_init(&blk->public.throttled_reqs_lock);
- qemu_co_queue_init(&blk->public.throttled_reqs[0]);
- qemu_co_queue_init(&blk->public.throttled_reqs[1]);
block_acct_init(&blk->stats);
notifier_list_init(&blk->remove_bs_notifiers);
assert(!blk->refcnt);
assert(!blk->name);
assert(!blk->dev);
- if (blk->public.throttle_state) {
+ if (blk->public.throttle_group_member.throttle_state) {
blk_io_limits_disable(blk);
}
if (blk->root) {
blk_remove_bs(blk);
}
+ if (blk->vmsh) {
+ qemu_del_vm_change_state_handler(blk->vmsh);
+ blk->vmsh = NULL;
+ }
assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
QTAILQ_REMOVE(&block_backends, blk, link);
* Behaves similarly to blk_next() but iterates over all BlockBackends, even the
* ones which are hidden (i.e. are not referenced by the monitor).
*/
-static BlockBackend *blk_all_next(BlockBackend *blk)
+BlockBackend *blk_all_next(BlockBackend *blk)
{
return blk ? QTAILQ_NEXT(blk, link)
: QTAILQ_FIRST(&block_backends);
*/
void blk_remove_bs(BlockBackend *blk)
{
+ ThrottleTimers *tt;
+
notifier_list_notify(&blk->remove_bs_notifiers, blk);
- if (blk->public.throttle_state) {
- throttle_timers_detach_aio_context(&blk->public.throttle_timers);
+ if (blk->public.throttle_group_member.throttle_state) {
+ tt = &blk->public.throttle_group_member.throttle_timers;
+ throttle_timers_detach_aio_context(tt);
}
blk_update_root_state(blk);
bdrv_ref(bs);
notifier_list_notify(&blk->insert_bs_notifiers, blk);
- if (blk->public.throttle_state) {
+ if (blk->public.throttle_group_member.throttle_state) {
throttle_timers_attach_aio_context(
- &blk->public.throttle_timers, bdrv_get_aio_context(bs));
+ &blk->public.throttle_group_member.throttle_timers,
+ bdrv_get_aio_context(bs));
}
return 0;
/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
* device attached to the BlockBackend. */
-static char *blk_get_attached_dev_id(BlockBackend *blk)
+char *blk_get_attached_dev_id(BlockBackend *blk)
{
DeviceState *dev;
bdrv_inc_in_flight(bs);
/* throttling disk I/O */
- if (blk->public.throttle_state) {
- throttle_group_co_io_limits_intercept(blk, bytes, false);
+ if (blk->public.throttle_group_member.throttle_state) {
+ throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
+ bytes, false);
}
ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
}
bdrv_inc_in_flight(bs);
-
/* throttling disk I/O */
- if (blk->public.throttle_state) {
- throttle_group_co_io_limits_intercept(blk, bytes, true);
+ if (blk->public.throttle_group_member.throttle_state) {
+ throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
+ bytes, true);
}
if (!blk->enable_write_cache) {
void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
{
BlockDriverState *bs = blk_bs(blk);
+ ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
if (bs) {
- if (blk->public.throttle_state) {
- throttle_timers_detach_aio_context(&blk->public.throttle_timers);
+ if (tgm->throttle_state) {
+ throttle_group_detach_aio_context(tgm);
+ throttle_group_attach_aio_context(tgm, new_context);
}
bdrv_set_aio_context(bs, new_context);
- if (blk->public.throttle_state) {
- throttle_timers_attach_aio_context(&blk->public.throttle_timers,
- new_context);
- }
}
}
/* throttling disk I/O limits */
void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
{
- throttle_group_config(blk, cfg);
+ throttle_group_config(&blk->public.throttle_group_member, cfg);
}
void blk_io_limits_disable(BlockBackend *blk)
{
- assert(blk->public.throttle_state);
+ assert(blk->public.throttle_group_member.throttle_state);
bdrv_drained_begin(blk_bs(blk));
- throttle_group_unregister_blk(blk);
+ throttle_group_unregister_tgm(&blk->public.throttle_group_member);
bdrv_drained_end(blk_bs(blk));
}
/* should be called before blk_set_io_limits if a limit is set */
void blk_io_limits_enable(BlockBackend *blk, const char *group)
{
- assert(!blk->public.throttle_state);
- throttle_group_register_blk(blk, group);
+ assert(!blk->public.throttle_group_member.throttle_state);
+ throttle_group_register_tgm(&blk->public.throttle_group_member,
+ group, blk_get_aio_context(blk));
}
void blk_io_limits_update_group(BlockBackend *blk, const char *group)
{
/* this BB is not part of any group */
- if (!blk->public.throttle_state) {
+ if (!blk->public.throttle_group_member.throttle_state) {
return;
}
/* this BB is a part of the same group than the one we want */
- if (!g_strcmp0(throttle_group_get_name(blk), group)) {
+ if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
+ group)) {
return;
}
/* Note that blk->root may not be accessible here yet if we are just
* attaching to a BlockDriverState that is drained. Use child instead. */
- if (atomic_fetch_inc(&blk->public.io_limits_disabled) == 0) {
- throttle_group_restart_blk(blk);
+ if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
+ throttle_group_restart_tgm(&blk->public.throttle_group_member);
}
}
BlockBackend *blk = child->opaque;
assert(blk->quiesce_counter);
- assert(blk->public.io_limits_disabled);
- atomic_dec(&blk->public.io_limits_disabled);
+ assert(blk->public.throttle_group_member.io_limits_disabled);
+ atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
if (--blk->quiesce_counter == 0) {
if (blk->dev_ops && blk->dev_ops->drained_end) {