typedef enum {
BDRV_REQ_COPY_ON_READ = 0x1,
BDRV_REQ_ZERO_WRITE = 0x2,
- /* The BDRV_REQ_MAY_UNMAP flag is used to indicate that the block driver
- * is allowed to optimize a write zeroes request by unmapping (discarding)
- * blocks if it is guaranteed that the result will read back as
- * zeroes. The flag is only passed to the driver if the block device is
- * opened with BDRV_O_UNMAP.
+
+ /*
+ * The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
+ * that the block driver should unmap (discard) blocks if it is guaranteed
+ * that the result will read back as zeroes. The flag is only passed to the
+ * driver if the block device is opened with BDRV_O_UNMAP.
*/
BDRV_REQ_MAY_UNMAP = 0x4,
+
+ /*
+ * The BDRV_REQ_NO_SERIALISING flag is only valid for reads and means that
+ * we don't want wait_serialising_requests() during the read operation.
+ *
+ * This flag is used for backup copy-on-write operations, when we need to
+ * read old data before write (write notifier triggered). It is okay since
+ * we already waited for other serializing requests in the initiating write
+ * (see bdrv_aligned_pwritev), and it is necessary if the initiating write
+ * is already serializing (without the flag, the read would deadlock
+ * waiting for the serialising write to complete).
+ */
BDRV_REQ_NO_SERIALISING = 0x8,
BDRV_REQ_FUA = 0x10,
BDRV_REQ_WRITE_COMPRESSED = 0x20,
* content. */
BDRV_REQ_WRITE_UNCHANGED = 0x40,
+ /*
+ * BDRV_REQ_SERIALISING forces request serialisation for writes.
+ * It is used to ensure that writes to the backing file of a backup process
+ * target cannot race with a read of the backup target that defers to the
+ * backing file.
+ *
+ * Note, that BDRV_REQ_SERIALISING is _not_ opposite in meaning to
+ * BDRV_REQ_NO_SERIALISING. A more descriptive name for the latter might be
+ * _DO_NOT_WAIT_FOR_SERIALISING, except that is too long.
+ */
+ BDRV_REQ_SERIALISING = 0x80,
+
/* Mask of valid flags */
- BDRV_REQ_MASK = 0x7f,
+ BDRV_REQ_MASK = 0xff,
} BdrvRequestFlags;
typedef struct BlockSizes {
select an appropriate protocol driver,
ignoring the format layer */
#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
+#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */
#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
#define BDRV_OPT_CACHE_DIRECT "cache.direct"
#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
#define BDRV_OPT_READ_ONLY "read-only"
+#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
#define BDRV_OPT_DISCARD "discard"
#define BDRV_OPT_FORCE_SHARE "force-share"
typedef struct BDRVReopenState {
BlockDriverState *bs;
int flags;
+ BlockdevDetectZeroesOptions detect_zeroes;
uint64_t perm, shared_perm;
QDict *options;
QDict *explicit_options;
BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
int flags, Error **errp);
BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
- BlockDriverState *bs,
- QDict *options, int flags);
+ BlockDriverState *bs, QDict *options);
int bdrv_reopen_multiple(AioContext *ctx, BlockReopenQueue *bs_queue, Error **errp);
-int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp);
+int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
+ Error **errp);
int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp);
void bdrv_reopen_commit(BDRVReopenState *reopen_state);
void bdrv_drain_all_end(void);
void bdrv_drain_all(void);
-/* Returns NULL when bs == NULL */
-AioWait *bdrv_get_aio_wait(BlockDriverState *bs);
-
#define BDRV_POLL_WHILE(bs, cond) ({ \
BlockDriverState *bs_ = (bs); \
- AIO_WAIT_WHILE(bdrv_get_aio_wait(bs_), \
- bdrv_get_aio_context(bs_), \
+ AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
cond); })
-int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
-int bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
+int bdrv_pdiscard(BdrvChild *child, int64_t offset, int bytes);
+int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int bytes);
int bdrv_has_zero_init_1(BlockDriverState *bs);
int bdrv_has_zero_init(BlockDriverState *bs);
bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
bool bdrv_is_read_only(BlockDriverState *bs);
int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
bool ignore_allow_rdw, Error **errp);
-int bdrv_set_read_only(BlockDriverState *bs, bool read_only, Error **errp);
+int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
+ Error **errp);
bool bdrv_is_writable(BlockDriverState *bs);
bool bdrv_is_sg(BlockDriverState *bs);
bool bdrv_is_inserted(BlockDriverState *bs);
const char *bdrv_get_format_name(BlockDriverState *bs);
BlockDriverState *bdrv_find_node(const char *node_name);
BlockDeviceInfoList *bdrv_named_nodes_list(Error **errp);
+XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp);
BlockDriverState *bdrv_lookup_bs(const char *device,
const char *node_name,
Error **errp);
const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
int bdrv_get_flags(BlockDriverState *bs);
int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
-ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs);
+ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
+ Error **errp);
void bdrv_round_to_clusters(BlockDriverState *bs,
int64_t offset, int64_t bytes,
int64_t *cluster_offset,
int64_t *cluster_bytes);
-const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
void bdrv_get_backing_filename(BlockDriverState *bs,
char *filename, int filename_size);
void bdrv_get_full_backing_filename(BlockDriverState *bs,
int path_has_protocol(const char *path);
int path_is_absolute(const char *path);
-void path_combine(char *dest, int dest_size,
- const char *base_path,
- const char *filename);
+char *path_combine(const char *base_path, const char *filename);
int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
bool ignore_bds_parents);
+/**
+ * bdrv_parent_drained_begin_single:
+ *
+ * Begin a quiesced section for the parent of @c. If @poll is true, wait for
+ * any pending activity to cease.
+ */
+void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
+
/**
* bdrv_parent_drained_end:
*
**/
int coroutine_fn bdrv_co_copy_range(BdrvChild *src, uint64_t src_offset,
BdrvChild *dst, uint64_t dst_offset,
- uint64_t bytes, BdrvRequestFlags flags);
+ uint64_t bytes, BdrvRequestFlags read_flags,
+ BdrvRequestFlags write_flags);
#endif