2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "block/aio-wait.h"
30 #include "qemu/queue.h"
31 #include "qemu/coroutine.h"
32 #include "qemu/stats64.h"
33 #include "qemu/timer.h"
34 #include "qemu/hbitmap.h"
35 #include "block/snapshot.h"
36 #include "qemu/main-loop.h"
37 #include "qemu/throttle.h"
39 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
41 #define BLOCK_OPT_SIZE "size"
42 #define BLOCK_OPT_ENCRYPT "encryption"
43 #define BLOCK_OPT_ENCRYPT_FORMAT "encrypt.format"
44 #define BLOCK_OPT_COMPAT6 "compat6"
45 #define BLOCK_OPT_HWVERSION "hwversion"
46 #define BLOCK_OPT_BACKING_FILE "backing_file"
47 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
48 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
49 #define BLOCK_OPT_TABLE_SIZE "table_size"
50 #define BLOCK_OPT_PREALLOC "preallocation"
51 #define BLOCK_OPT_SUBFMT "subformat"
52 #define BLOCK_OPT_COMPAT_LEVEL "compat"
53 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
54 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
55 #define BLOCK_OPT_REDUNDANCY "redundancy"
56 #define BLOCK_OPT_NOCOW "nocow"
57 #define BLOCK_OPT_OBJECT_SIZE "object_size"
58 #define BLOCK_OPT_REFCOUNT_BITS "refcount_bits"
60 #define BLOCK_PROBE_BUF_SIZE 512
62 enum BdrvTrackedRequestType {
66 BDRV_TRACKED_TRUNCATE,
69 typedef struct BdrvTrackedRequest {
73 enum BdrvTrackedRequestType type;
76 int64_t overlap_offset;
77 uint64_t overlap_bytes;
79 QLIST_ENTRY(BdrvTrackedRequest) list;
80 Coroutine *co; /* owner, used for deadlock detection */
81 CoQueue wait_queue; /* coroutines blocked on this request */
83 struct BdrvTrackedRequest *waiting_for;
87 const char *format_name;
90 /* set to true if the BlockDriver is a block filter. Block filters pass
91 * certain callbacks that refer to data (see block.c) to their bs->file if
92 * the driver doesn't implement them. Drivers that do not wish to forward
93 * must implement them and return -ENOTSUP.
96 /* for snapshots block filter like Quorum can implement the
97 * following recursive callback.
98 * It's purpose is to recurse on the filter children while calling
99 * bdrv_recurse_is_first_non_filter on them.
100 * For a sample implementation look in the future Quorum block filter.
102 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs,
103 BlockDriverState *candidate);
105 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
106 int (*bdrv_probe_device)(const char *filename);
108 /* Any driver implementing this callback is expected to be able to handle
109 * NULL file names in its .bdrv_open() implementation */
110 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
111 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
112 * this field set to true, except ones that are defined only by their
114 * An example of the last type will be the quorum block driver.
116 bool bdrv_needs_filename;
118 /* Set if a driver can support backing files */
119 bool supports_backing;
121 /* For handling image reopen for split or non-split files */
122 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
123 BlockReopenQueue *queue, Error **errp);
124 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
125 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
126 void (*bdrv_join_options)(QDict *options, QDict *old_options);
128 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
131 /* Protocol drivers should implement this instead of bdrv_open */
132 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
134 void (*bdrv_close)(BlockDriverState *bs);
135 int coroutine_fn (*bdrv_co_create)(BlockdevCreateOptions *opts,
137 int coroutine_fn (*bdrv_co_create_opts)(const char *filename,
140 int (*bdrv_make_empty)(BlockDriverState *bs);
142 void (*bdrv_refresh_filename)(BlockDriverState *bs, QDict *options);
145 BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs,
146 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
147 BlockCompletionFunc *cb, void *opaque);
148 BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs,
149 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
150 BlockCompletionFunc *cb, void *opaque);
151 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
152 BlockCompletionFunc *cb, void *opaque);
153 BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
154 int64_t offset, int bytes,
155 BlockCompletionFunc *cb, void *opaque);
157 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
158 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
161 * @offset: position in bytes to read at
162 * @bytes: number of bytes to read
163 * @qiov: the buffers to fill with read data
164 * @flags: currently unused, always 0
166 * @offset and @bytes will be a multiple of 'request_alignment',
167 * but the length of individual @qiov elements does not have to
170 * @bytes will always equal the total size of @qiov, and will be
171 * no larger than 'max_transfer'.
173 * The buffer in @qiov may point directly to guest memory.
175 int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
176 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
177 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
178 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
180 * @offset: position in bytes to write at
181 * @bytes: number of bytes to write
182 * @qiov: the buffers containing data to write
183 * @flags: zero or more bits allowed by 'supported_write_flags'
185 * @offset and @bytes will be a multiple of 'request_alignment',
186 * but the length of individual @qiov elements does not have to
189 * @bytes will always equal the total size of @qiov, and will be
190 * no larger than 'max_transfer'.
192 * The buffer in @qiov may point directly to guest memory.
194 int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
195 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
198 * Efficiently zero a region of the disk image. Typically an image format
199 * would use a compact metadata representation to implement this. This
200 * function pointer may be NULL or return -ENOSUP and .bdrv_co_writev()
201 * will be called instead.
203 int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
204 int64_t offset, int bytes, BdrvRequestFlags flags);
205 int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
206 int64_t offset, int bytes);
208 /* Map [offset, offset + nbytes) range onto a child of @bs to copy from,
209 * and invoke bdrv_co_copy_range_from(child, ...), or invoke
210 * bdrv_co_copy_range_to() if @bs is the leaf child to copy data from.
212 * See the comment of bdrv_co_copy_range for the parameter and return value
215 int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs,
221 BdrvRequestFlags read_flags,
222 BdrvRequestFlags write_flags);
224 /* Map [offset, offset + nbytes) range onto a child of bs to copy data to,
225 * and invoke bdrv_co_copy_range_to(child, src, ...), or perform the copy
226 * operation if @bs is the leaf and @src has the same BlockDriver. Return
227 * -ENOTSUP if @bs is the leaf but @src has a different BlockDriver.
229 * See the comment of bdrv_co_copy_range for the parameter and return value
232 int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs,
238 BdrvRequestFlags read_flags,
239 BdrvRequestFlags write_flags);
242 * Building block for bdrv_block_status[_above] and
243 * bdrv_is_allocated[_above]. The driver should answer only
244 * according to the current layer, and should only need to set
245 * BDRV_BLOCK_DATA, BDRV_BLOCK_ZERO, BDRV_BLOCK_OFFSET_VALID,
246 * and/or BDRV_BLOCK_RAW; if the current layer defers to a backing
247 * layer, the result should be 0 (and not BDRV_BLOCK_ZERO). See
248 * block.h for the overall meaning of the bits. As a hint, the
249 * flag want_zero is true if the caller cares more about precise
250 * mappings (favor accurate _OFFSET_VALID/_ZERO) or false for
251 * overall allocation (favor larger *pnum, perhaps by reporting
252 * _DATA instead of _ZERO). The block layer guarantees input
253 * clamped to bdrv_getlength() and aligned to request_alignment,
254 * as well as non-NULL pnum, map, and file; in turn, the driver
255 * must return an error or set pnum to an aligned non-zero value.
257 int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
258 bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
259 int64_t *map, BlockDriverState **file);
262 * Invalidate any cached meta-data.
264 void coroutine_fn (*bdrv_co_invalidate_cache)(BlockDriverState *bs,
266 int (*bdrv_inactivate)(BlockDriverState *bs);
269 * Flushes all data for all layers by calling bdrv_co_flush for underlying
270 * layers, if needed. This function is needed for deterministic
271 * synchronization of the flush finishing callback.
273 int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs);
276 * Flushes all data that was already written to the OS all the way down to
277 * the disk (for example file-posix.c calls fsync()).
279 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
282 * Flushes all internal caches to the OS. The data may still sit in a
283 * writeback cache of the host OS, but it will survive a crash of the qemu
286 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
289 * Drivers setting this field must be able to work with just a plain
290 * filename with '<protocol_name>:' as a prefix, and no other options.
291 * Options may be extracted from the filename by implementing
292 * bdrv_parse_filename.
294 const char *protocol_name;
295 int coroutine_fn (*bdrv_co_truncate)(BlockDriverState *bs, int64_t offset,
296 PreallocMode prealloc, Error **errp);
298 int64_t (*bdrv_getlength)(BlockDriverState *bs);
299 bool has_variable_length;
300 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
301 BlockMeasureInfo *(*bdrv_measure)(QemuOpts *opts, BlockDriverState *in_bs,
304 int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs,
305 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov);
307 int (*bdrv_snapshot_create)(BlockDriverState *bs,
308 QEMUSnapshotInfo *sn_info);
309 int (*bdrv_snapshot_goto)(BlockDriverState *bs,
310 const char *snapshot_id);
311 int (*bdrv_snapshot_delete)(BlockDriverState *bs,
312 const char *snapshot_id,
315 int (*bdrv_snapshot_list)(BlockDriverState *bs,
316 QEMUSnapshotInfo **psn_info);
317 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
318 const char *snapshot_id,
321 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
322 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs,
325 int coroutine_fn (*bdrv_save_vmstate)(BlockDriverState *bs,
328 int coroutine_fn (*bdrv_load_vmstate)(BlockDriverState *bs,
332 int (*bdrv_change_backing_file)(BlockDriverState *bs,
333 const char *backing_file, const char *backing_fmt);
335 /* removable device specific */
336 bool (*bdrv_is_inserted)(BlockDriverState *bs);
337 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
338 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
340 /* to control generic scsi devices */
341 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
342 unsigned long int req, void *buf,
343 BlockCompletionFunc *cb, void *opaque);
344 int coroutine_fn (*bdrv_co_ioctl)(BlockDriverState *bs,
345 unsigned long int req, void *buf);
347 /* List of options for creating images, terminated by name == NULL */
348 QemuOptsList *create_opts;
351 * Returns 0 for completed check, -errno for internal errors.
352 * The check results are stored in result.
354 int coroutine_fn (*bdrv_co_check)(BlockDriverState *bs,
355 BdrvCheckResult *result,
358 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts,
359 BlockDriverAmendStatusCB *status_cb,
363 void (*bdrv_debug_event)(BlockDriverState *bs, BlkdebugEvent event);
365 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
366 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
368 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
370 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
371 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
373 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
376 * Returns 1 if newly created images are guaranteed to contain only
377 * zeros, 0 otherwise.
379 int (*bdrv_has_zero_init)(BlockDriverState *bs);
381 /* Remove fd handlers, timers, and other event loop callbacks so the event
382 * loop is no longer in use. Called with no in-flight requests and in
383 * depth-first traversal order with parents before child nodes.
385 void (*bdrv_detach_aio_context)(BlockDriverState *bs);
387 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
388 * can be processed again. Called with no in-flight requests and in
389 * depth-first traversal order with child nodes before parent nodes.
391 void (*bdrv_attach_aio_context)(BlockDriverState *bs,
392 AioContext *new_context);
394 /* io queue for linux-aio */
395 void (*bdrv_io_plug)(BlockDriverState *bs);
396 void (*bdrv_io_unplug)(BlockDriverState *bs);
399 * Try to get @bs's logical and physical block size.
400 * On success, store them in @bsz and return zero.
401 * On failure, return negative errno.
403 int (*bdrv_probe_blocksizes)(BlockDriverState *bs, BlockSizes *bsz);
405 * Try to get @bs's geometry (cyls, heads, sectors)
406 * On success, store them in @geo and return 0.
407 * On failure return -errno.
408 * Only drivers that want to override guest geometry implement this
409 * callback; see hd_geometry_guess().
411 int (*bdrv_probe_geometry)(BlockDriverState *bs, HDGeometry *geo);
414 * bdrv_co_drain_begin is called if implemented in the beginning of a
415 * drain operation to drain and stop any internal sources of requests in
417 * bdrv_co_drain_end is called if implemented at the end of the drain.
419 * They should be used by the driver to e.g. manage scheduled I/O
420 * requests, or toggle an internal state. After the end of the drain new
421 * requests will continue normally.
423 void coroutine_fn (*bdrv_co_drain_begin)(BlockDriverState *bs);
424 void coroutine_fn (*bdrv_co_drain_end)(BlockDriverState *bs);
426 void (*bdrv_add_child)(BlockDriverState *parent, BlockDriverState *child,
428 void (*bdrv_del_child)(BlockDriverState *parent, BdrvChild *child,
432 * Informs the block driver that a permission change is intended. The
433 * driver checks whether the change is permissible and may take other
434 * preparations for the change (e.g. get file system locks). This operation
435 * is always followed either by a call to either .bdrv_set_perm or
436 * .bdrv_abort_perm_update.
438 * Checks whether the requested set of cumulative permissions in @perm
439 * can be granted for accessing @bs and whether no other users are using
440 * permissions other than those given in @shared (both arguments take
441 * BLK_PERM_* bitmasks).
443 * If both conditions are met, 0 is returned. Otherwise, -errno is returned
444 * and errp is set to an error describing the conflict.
446 int (*bdrv_check_perm)(BlockDriverState *bs, uint64_t perm,
447 uint64_t shared, Error **errp);
450 * Called to inform the driver that the set of cumulative set of used
451 * permissions for @bs has changed to @perm, and the set of sharable
452 * permission to @shared. The driver can use this to propagate changes to
453 * its children (i.e. request permissions only if a parent actually needs
456 * This function is only invoked after bdrv_check_perm(), so block drivers
457 * may rely on preparations made in their .bdrv_check_perm implementation.
459 void (*bdrv_set_perm)(BlockDriverState *bs, uint64_t perm, uint64_t shared);
462 * Called to inform the driver that after a previous bdrv_check_perm()
463 * call, the permission update is not performed and any preparations made
464 * for it (e.g. taken file locks) need to be undone.
466 * This function can be called even for nodes that never saw a
467 * bdrv_check_perm() call. It is a no-op then.
469 void (*bdrv_abort_perm_update)(BlockDriverState *bs);
472 * Returns in @nperm and @nshared the permissions that the driver for @bs
473 * needs on its child @c, based on the cumulative permissions requested by
474 * the parents in @parent_perm and @parent_shared.
476 * If @c is NULL, return the permissions for attaching a new child for the
479 * If @reopen_queue is non-NULL, don't return the currently needed
480 * permissions, but those that will be needed after applying the
483 void (*bdrv_child_perm)(BlockDriverState *bs, BdrvChild *c,
484 const BdrvChildRole *role,
485 BlockReopenQueue *reopen_queue,
486 uint64_t parent_perm, uint64_t parent_shared,
487 uint64_t *nperm, uint64_t *nshared);
490 * Bitmaps should be marked as 'IN_USE' in the image on reopening image
491 * as rw. This handler should realize it. It also should unset readonly
492 * field of BlockDirtyBitmap's in case of success.
494 int (*bdrv_reopen_bitmaps_rw)(BlockDriverState *bs, Error **errp);
495 bool (*bdrv_can_store_new_dirty_bitmap)(BlockDriverState *bs,
497 uint32_t granularity,
499 void (*bdrv_remove_persistent_dirty_bitmap)(BlockDriverState *bs,
504 * Register/unregister a buffer for I/O. For example, when the driver is
505 * interested to know the memory areas that will later be used in iovs, so
506 * that it can do IOMMU mapping with VFIO etc., in order to get better
507 * performance. In the case of VFIO drivers, this callback is used to do
508 * DMA mapping for hot buffers.
510 void (*bdrv_register_buf)(BlockDriverState *bs, void *host, size_t size);
511 void (*bdrv_unregister_buf)(BlockDriverState *bs, void *host);
512 QLIST_ENTRY(BlockDriver) list;
515 typedef struct BlockLimits {
516 /* Alignment requirement, in bytes, for offset/length of I/O
517 * requests. Must be a power of 2 less than INT_MAX; defaults to
518 * 1 for drivers with modern byte interfaces, and to 512
520 uint32_t request_alignment;
522 /* Maximum number of bytes that can be discarded at once (since it
523 * is signed, it must be < 2G, if set). Must be multiple of
524 * pdiscard_alignment, but need not be power of 2. May be 0 if no
525 * inherent 32-bit limit */
526 int32_t max_pdiscard;
528 /* Optimal alignment for discard requests in bytes. A power of 2
529 * is best but not mandatory. Must be a multiple of
530 * bl.request_alignment, and must be less than max_pdiscard if
531 * that is set. May be 0 if bl.request_alignment is good enough */
532 uint32_t pdiscard_alignment;
534 /* Maximum number of bytes that can zeroized at once (since it is
535 * signed, it must be < 2G, if set). Must be multiple of
536 * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
537 int32_t max_pwrite_zeroes;
539 /* Optimal alignment for write zeroes requests in bytes. A power
540 * of 2 is best but not mandatory. Must be a multiple of
541 * bl.request_alignment, and must be less than max_pwrite_zeroes
542 * if that is set. May be 0 if bl.request_alignment is good
544 uint32_t pwrite_zeroes_alignment;
546 /* Optimal transfer length in bytes. A power of 2 is best but not
547 * mandatory. Must be a multiple of bl.request_alignment, or 0 if
548 * no preferred size */
549 uint32_t opt_transfer;
551 /* Maximal transfer length in bytes. Need not be power of 2, but
552 * must be multiple of opt_transfer and bl.request_alignment, or 0
553 * for no 32-bit limit. For now, anything larger than INT_MAX is
555 uint32_t max_transfer;
557 /* memory alignment, in bytes so that no bounce buffer is needed */
558 size_t min_mem_alignment;
560 /* memory alignment, in bytes, for bounce buffer */
561 size_t opt_mem_alignment;
563 /* maximum number of iovec elements */
567 typedef struct BdrvOpBlocker BdrvOpBlocker;
569 typedef struct BdrvAioNotifier {
570 void (*attached_aio_context)(AioContext *new_context, void *opaque);
571 void (*detach_aio_context)(void *opaque);
576 QLIST_ENTRY(BdrvAioNotifier) list;
579 struct BdrvChildRole {
580 /* If true, bdrv_replace_node() doesn't change the node this BdrvChild
584 /* If true, the parent is a BlockDriverState and bdrv_next_all_states()
585 * will return it. This information is used for drain_all, where every node
586 * will be drained separately, so the drain only needs to be propagated to
587 * non-BDS parents. */
590 void (*inherit_options)(int *child_flags, QDict *child_options,
591 int parent_flags, QDict *parent_options);
593 void (*change_media)(BdrvChild *child, bool load);
594 void (*resize)(BdrvChild *child);
596 /* Returns a name that is supposedly more useful for human users than the
597 * node name for identifying the node in question (in particular, a BB
598 * name), or NULL if the parent can't provide a better name. */
599 const char *(*get_name)(BdrvChild *child);
601 /* Returns a malloced string that describes the parent of the child for a
602 * human reader. This could be a node-name, BlockBackend name, qdev ID or
603 * QOM path of the device owning the BlockBackend, job type and ID etc. The
604 * caller is responsible for freeing the memory. */
605 char *(*get_parent_desc)(BdrvChild *child);
608 * If this pair of functions is implemented, the parent doesn't issue new
609 * requests after returning from .drained_begin() until .drained_end() is
612 * These functions must not change the graph (and therefore also must not
613 * call aio_poll(), which could change the graph indirectly).
615 * Note that this can be nested. If drained_begin() was called twice, new
616 * I/O is allowed only after drained_end() was called twice, too.
618 void (*drained_begin)(BdrvChild *child);
619 void (*drained_end)(BdrvChild *child);
622 * Returns whether the parent has pending requests for the child. This
623 * callback is polled after .drained_begin() has been called until all
624 * activity on the child has stopped.
626 bool (*drained_poll)(BdrvChild *child);
628 /* Notifies the parent that the child has been activated/inactivated (e.g.
629 * when migration is completing) and it can start/stop requesting
630 * permissions and doing I/O on it. */
631 void (*activate)(BdrvChild *child, Error **errp);
632 int (*inactivate)(BdrvChild *child);
634 void (*attach)(BdrvChild *child);
635 void (*detach)(BdrvChild *child);
637 /* Notifies the parent that the filename of its child has changed (e.g.
638 * because the direct child was removed from the backing chain), so that it
639 * can update its reference. */
640 int (*update_filename)(BdrvChild *child, BlockDriverState *new_base,
641 const char *filename, Error **errp);
644 extern const BdrvChildRole child_file;
645 extern const BdrvChildRole child_format;
646 extern const BdrvChildRole child_backing;
649 BlockDriverState *bs;
651 const BdrvChildRole *role;
655 * Granted permissions for operating on this BdrvChild (BLK_PERM_* bitmask)
660 * Permissions that can still be granted to other users of @bs while this
661 * BdrvChild is still attached to it. (BLK_PERM_* bitmask)
663 uint64_t shared_perm;
665 /* backup of permissions during permission update procedure */
666 bool has_backup_perm;
667 uint64_t backup_perm;
668 uint64_t backup_shared_perm;
670 QLIST_ENTRY(BdrvChild) next;
671 QLIST_ENTRY(BdrvChild) next_parent;
675 * Note: the function bdrv_append() copies and swaps contents of
676 * BlockDriverStates, so if you add new fields to this struct, please
677 * inspect bdrv_append() to determine if the new fields need to be
680 struct BlockDriverState {
681 /* Protected by big QEMU lock or read-only after opening. No special
682 * locking needed during I/O...
684 int open_flags; /* flags used to open the file, re-used for re-open */
685 bool read_only; /* if true, the media is read only */
686 bool encrypted; /* if true, the media is encrypted */
687 bool sg; /* if true, the device is a /dev/sg* */
688 bool probed; /* if true, format was probed rather than specified */
689 bool force_share; /* if true, always allow all shared permissions */
690 bool implicit; /* if true, this filter node was automatically inserted */
692 BlockDriver *drv; /* NULL means no media */
695 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
696 /* long-running tasks intended to always use the same AioContext as this
697 * BDS may register themselves in this list to be notified of changes
698 * regarding this BDS's context */
699 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
700 bool walking_aio_notifiers; /* to make removal during iteration safe */
702 char filename[PATH_MAX];
703 char backing_file[PATH_MAX]; /* if non zero, the image is a diff of
705 char backing_format[16]; /* if non-zero and backing_file exists */
707 QDict *full_open_options;
708 char exact_filename[PATH_MAX];
716 /* Flags honored during pwrite (so far: BDRV_REQ_FUA,
717 * BDRV_REQ_WRITE_UNCHANGED).
718 * If a driver does not support BDRV_REQ_WRITE_UNCHANGED, those
719 * writes will be issued as normal writes without the flag set.
720 * This is important to note for drivers that do not explicitly
721 * request a WRITE permission for their children and instead take
722 * the same permissions as their parent did (this is commonly what
723 * block filters do). Such drivers have to be aware that the
724 * parent may have taken a WRITE_UNCHANGED permission only and is
725 * issuing such requests. Drivers either must make sure that
726 * these requests do not result in plain WRITE accesses (usually
727 * by supporting BDRV_REQ_WRITE_UNCHANGED, and then forwarding
728 * every incoming write request as-is, including potentially that
729 * flag), or they have to explicitly take the WRITE permission for
731 unsigned int supported_write_flags;
732 /* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
733 * BDRV_REQ_MAY_UNMAP, BDRV_REQ_WRITE_UNCHANGED) */
734 unsigned int supported_zero_flags;
736 /* the following member gives a name to every node on the bs graph. */
738 /* element of the list of named nodes building the graph */
739 QTAILQ_ENTRY(BlockDriverState) node_list;
740 /* element of the list of all BlockDriverStates (all_bdrv_states) */
741 QTAILQ_ENTRY(BlockDriverState) bs_list;
742 /* element of the list of monitor-owned BDS */
743 QTAILQ_ENTRY(BlockDriverState) monitor_list;
746 /* operation blockers */
747 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
749 /* long-running background operation */
752 /* The node that this node inherited default options from (and a reopen on
753 * which can affect this node by changing these defaults). This is always a
754 * parent node of this node. */
755 BlockDriverState *inherits_from;
756 QLIST_HEAD(, BdrvChild) children;
757 QLIST_HEAD(, BdrvChild) parents;
760 QDict *explicit_options;
761 BlockdevDetectZeroesOptions detect_zeroes;
763 /* The error object in use for blocking operations on backing_hd */
764 Error *backing_blocker;
766 /* Protected by AioContext lock */
768 /* If we are reading a disk image, give its size in sectors.
769 * Generally read-only; it is written to by load_snapshot and
770 * save_snaphost, but the block layer is quiescent during those.
772 int64_t total_sectors;
774 /* Callback before write request is processed */
775 NotifierWithReturnList before_write_notifiers;
777 /* threshold limit for writes, in bytes. "High water mark". */
778 uint64_t write_threshold_offset;
779 NotifierWithReturn write_threshold_notifier;
781 /* Writing to the list requires the BQL _and_ the dirty_bitmap_mutex.
782 * Reading from the list can be done with either the BQL or the
783 * dirty_bitmap_mutex. Modifying a bitmap only requires
784 * dirty_bitmap_mutex. */
785 QemuMutex dirty_bitmap_mutex;
786 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
788 /* Offset after the highest byte written to */
789 Stat64 wr_highest_offset;
791 /* If true, copy read backing sectors into image. Can be >1 if more
792 * than one client has requested copy-on-read. Accessed with atomic
797 /* number of in-flight requests; overall and serialising.
798 * Accessed with atomic ops.
800 unsigned int in_flight;
801 unsigned int serialising_in_flight;
803 /* counter for nested bdrv_io_plug.
804 * Accessed with atomic ops.
808 /* do we need to tell the quest if we have a volatile write cache? */
809 int enable_write_cache;
811 /* Accessed with atomic ops. */
813 int recursive_quiesce_counter;
815 unsigned int write_gen; /* Current data generation */
817 /* Protected by reqs_lock. */
819 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
820 CoQueue flush_queue; /* Serializing flush queue */
821 bool active_flush_req; /* Flush request in flight? */
823 /* Only read/written by whoever has set active_flush_req to true. */
824 unsigned int flushed_gen; /* Flushed write generation */
827 struct BlockBackendRootState {
830 BlockdevDetectZeroesOptions detect_zeroes;
833 typedef enum BlockMirrorBackingMode {
834 /* Reuse the existing backing chain from the source for the target.
835 * - sync=full: Set backing BDS to NULL.
836 * - sync=top: Use source's backing BDS.
837 * - sync=none: Use source as the backing BDS. */
838 MIRROR_SOURCE_BACKING_CHAIN,
840 /* Open the target's backing chain completely anew */
841 MIRROR_OPEN_BACKING_CHAIN,
843 /* Do not change the target's backing BDS after job completion */
844 MIRROR_LEAVE_BACKING_CHAIN,
845 } BlockMirrorBackingMode;
847 static inline BlockDriverState *backing_bs(BlockDriverState *bs)
849 return bs->backing ? bs->backing->bs : NULL;
853 /* Essential block drivers which must always be statically linked into qemu, and
854 * which therefore can be accessed without using bdrv_find_format() */
855 extern BlockDriver bdrv_file;
856 extern BlockDriver bdrv_raw;
857 extern BlockDriver bdrv_qcow2;
859 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
860 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
861 BdrvRequestFlags flags);
862 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
863 int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
864 BdrvRequestFlags flags);
866 extern unsigned int bdrv_drain_all_count;
867 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent);
868 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent);
870 int get_tmp_filename(char *filename, int size);
871 BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
872 const char *filename);
874 void bdrv_parse_filename_strip_prefix(const char *filename, const char *prefix,
879 * bdrv_add_before_write_notifier:
881 * Register a callback that is invoked before write requests are processed but
882 * after any throttling or waiting for overlapping requests.
884 void bdrv_add_before_write_notifier(BlockDriverState *bs,
885 NotifierWithReturn *notifier);
888 * bdrv_detach_aio_context:
890 * May be called from .bdrv_detach_aio_context() to detach children from the
891 * current #AioContext. This is only needed by block drivers that manage their
892 * own children. Both ->file and ->backing are automatically handled and
893 * block drivers should not call this function on them explicitly.
895 void bdrv_detach_aio_context(BlockDriverState *bs);
898 * bdrv_attach_aio_context:
900 * May be called from .bdrv_attach_aio_context() to attach children to the new
901 * #AioContext. This is only needed by block drivers that manage their own
902 * children. Both ->file and ->backing are automatically handled and block
903 * drivers should not call this function on them explicitly.
905 void bdrv_attach_aio_context(BlockDriverState *bs,
906 AioContext *new_context);
909 * bdrv_add_aio_context_notifier:
911 * If a long-running job intends to be always run in the same AioContext as a
912 * certain BDS, it may use this function to be notified of changes regarding the
913 * association of the BDS to an AioContext.
915 * attached_aio_context() is called after the target BDS has been attached to a
916 * new AioContext; detach_aio_context() is called before the target BDS is being
917 * detached from its old AioContext.
919 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
920 void (*attached_aio_context)(AioContext *new_context, void *opaque),
921 void (*detach_aio_context)(void *opaque), void *opaque);
924 * bdrv_remove_aio_context_notifier:
926 * Unsubscribe of change notifications regarding the BDS's AioContext. The
927 * parameters given here have to be the same as those given to
928 * bdrv_add_aio_context_notifier().
930 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
931 void (*aio_context_attached)(AioContext *,
933 void (*aio_context_detached)(void *),
938 * @bs: The BlockDriverState for which an I/O operation has been completed.
940 * Wake up the main thread if it is waiting on BDRV_POLL_WHILE. During
941 * synchronous I/O on a BlockDriverState that is attached to another
942 * I/O thread, the main thread lets the I/O thread's event loop run,
943 * waiting for the I/O operation to complete. A bdrv_wakeup will wake
944 * up the main thread if necessary.
946 * Manual calls to bdrv_wakeup are rarely necessary, because
947 * bdrv_dec_in_flight already calls it.
949 void bdrv_wakeup(BlockDriverState *bs);
952 int is_windows_drive(const char *filename);
957 * @job_id: The id of the newly-created job, or %NULL to use the
958 * device name of @bs.
959 * @bs: Block device to operate on.
960 * @base: Block device that will become the new base, or %NULL to
961 * flatten the whole backing file chain onto @bs.
962 * @backing_file_str: The file name that will be written to @bs as the
963 * the new backing file if the job completes. Ignored if @base is %NULL.
964 * @creation_flags: Flags that control the behavior of the Job lifetime.
965 * See @BlockJobCreateFlags
966 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
967 * @on_error: The action to take upon error.
968 * @errp: Error object.
970 * Start a streaming operation on @bs. Clusters that are unallocated
971 * in @bs, but allocated in any image between @base and @bs (both
972 * exclusive) will be written to @bs. At the end of a successful
973 * streaming job, the backing file of @bs will be changed to
974 * @backing_file_str in the written image and to @base in the live
977 void stream_start(const char *job_id, BlockDriverState *bs,
978 BlockDriverState *base, const char *backing_file_str,
979 int creation_flags, int64_t speed,
980 BlockdevOnError on_error, Error **errp);
984 * @job_id: The id of the newly-created job, or %NULL to use the
985 * device name of @bs.
986 * @bs: Active block device.
987 * @top: Top block device to be committed.
988 * @base: Block device that will be written into, and become the new top.
989 * @creation_flags: Flags that control the behavior of the Job lifetime.
990 * See @BlockJobCreateFlags
991 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
992 * @on_error: The action to take upon error.
993 * @backing_file_str: String to use as the backing file in @top's overlay
994 * @filter_node_name: The node name that should be assigned to the filter
995 * driver that the commit job inserts into the graph above @top. NULL means
996 * that a node name should be autogenerated.
997 * @errp: Error object.
1000 void commit_start(const char *job_id, BlockDriverState *bs,
1001 BlockDriverState *base, BlockDriverState *top,
1002 int creation_flags, int64_t speed,
1003 BlockdevOnError on_error, const char *backing_file_str,
1004 const char *filter_node_name, Error **errp);
1006 * commit_active_start:
1007 * @job_id: The id of the newly-created job, or %NULL to use the
1008 * device name of @bs.
1009 * @bs: Active block device to be committed.
1010 * @base: Block device that will be written into, and become the new top.
1011 * @creation_flags: Flags that control the behavior of the Job lifetime.
1012 * See @BlockJobCreateFlags
1013 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1014 * @on_error: The action to take upon error.
1015 * @filter_node_name: The node name that should be assigned to the filter
1016 * driver that the commit job inserts into the graph above @bs. NULL means that
1017 * a node name should be autogenerated.
1018 * @cb: Completion function for the job.
1019 * @opaque: Opaque pointer value passed to @cb.
1020 * @auto_complete: Auto complete the job.
1021 * @errp: Error object.
1024 void commit_active_start(const char *job_id, BlockDriverState *bs,
1025 BlockDriverState *base, int creation_flags,
1026 int64_t speed, BlockdevOnError on_error,
1027 const char *filter_node_name,
1028 BlockCompletionFunc *cb, void *opaque,
1029 bool auto_complete, Error **errp);
1032 * @job_id: The id of the newly-created job, or %NULL to use the
1033 * device name of @bs.
1034 * @bs: Block device to operate on.
1035 * @target: Block device to write to.
1036 * @replaces: Block graph node name to replace once the mirror is done. Can
1037 * only be used when full mirroring is selected.
1038 * @creation_flags: Flags that control the behavior of the Job lifetime.
1039 * See @BlockJobCreateFlags
1040 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1041 * @granularity: The chosen granularity for the dirty bitmap.
1042 * @buf_size: The amount of data that can be in flight at one time.
1043 * @mode: Whether to collapse all images in the chain to the target.
1044 * @backing_mode: How to establish the target's backing chain after completion.
1045 * @on_source_error: The action to take upon error reading from the source.
1046 * @on_target_error: The action to take upon error writing to the target.
1047 * @unmap: Whether to unmap target where source sectors only contain zeroes.
1048 * @filter_node_name: The node name that should be assigned to the filter
1049 * driver that the mirror job inserts into the graph above @bs. NULL means that
1050 * a node name should be autogenerated.
1051 * @copy_mode: When to trigger writes to the target.
1052 * @errp: Error object.
1054 * Start a mirroring operation on @bs. Clusters that are allocated
1055 * in @bs will be written to @target until the job is cancelled or
1056 * manually completed. At the end of a successful mirroring job,
1057 * @bs will be switched to read from @target.
1059 void mirror_start(const char *job_id, BlockDriverState *bs,
1060 BlockDriverState *target, const char *replaces,
1061 int creation_flags, int64_t speed,
1062 uint32_t granularity, int64_t buf_size,
1063 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1064 BlockdevOnError on_source_error,
1065 BlockdevOnError on_target_error,
1066 bool unmap, const char *filter_node_name,
1067 MirrorCopyMode copy_mode, Error **errp);
1070 * backup_job_create:
1071 * @job_id: The id of the newly-created job, or %NULL to use the
1072 * device name of @bs.
1073 * @bs: Block device to operate on.
1074 * @target: Block device to write to.
1075 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
1076 * @sync_mode: What parts of the disk image should be copied to the destination.
1077 * @sync_bitmap: The dirty bitmap if sync_mode is MIRROR_SYNC_MODE_INCREMENTAL.
1078 * @on_source_error: The action to take upon error reading from the source.
1079 * @on_target_error: The action to take upon error writing to the target.
1080 * @creation_flags: Flags that control the behavior of the Job lifetime.
1081 * See @BlockJobCreateFlags
1082 * @cb: Completion function for the job.
1083 * @opaque: Opaque pointer value passed to @cb.
1084 * @txn: Transaction that this job is part of (may be NULL).
1086 * Create a backup operation on @bs. Clusters in @bs are written to @target
1087 * until the job is cancelled or manually completed.
1089 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
1090 BlockDriverState *target, int64_t speed,
1091 MirrorSyncMode sync_mode,
1092 BdrvDirtyBitmap *sync_bitmap,
1094 BlockdevOnError on_source_error,
1095 BlockdevOnError on_target_error,
1097 BlockCompletionFunc *cb, void *opaque,
1098 JobTxn *txn, Error **errp);
1100 void hmp_drive_add_node(Monitor *mon, const char *optstr);
1102 BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
1103 const char *child_name,
1104 const BdrvChildRole *child_role,
1105 uint64_t perm, uint64_t shared_perm,
1106 void *opaque, Error **errp);
1107 void bdrv_root_unref_child(BdrvChild *child);
1109 int bdrv_child_try_set_perm(BdrvChild *c, uint64_t perm, uint64_t shared,
1112 /* Default implementation for BlockDriver.bdrv_child_perm() that can be used by
1113 * block filters: Forward CONSISTENT_READ, WRITE, WRITE_UNCHANGED and RESIZE to
1115 void bdrv_filter_default_perms(BlockDriverState *bs, BdrvChild *c,
1116 const BdrvChildRole *role,
1117 BlockReopenQueue *reopen_queue,
1118 uint64_t perm, uint64_t shared,
1119 uint64_t *nperm, uint64_t *nshared);
1121 /* Default implementation for BlockDriver.bdrv_child_perm() that can be used by
1122 * (non-raw) image formats: Like above for bs->backing, but for bs->file it
1123 * requires WRITE | RESIZE for read-write images, always requires
1124 * CONSISTENT_READ and doesn't share WRITE. */
1125 void bdrv_format_default_perms(BlockDriverState *bs, BdrvChild *c,
1126 const BdrvChildRole *role,
1127 BlockReopenQueue *reopen_queue,
1128 uint64_t perm, uint64_t shared,
1129 uint64_t *nperm, uint64_t *nshared);
1132 * Default implementation for drivers to pass bdrv_co_block_status() to
1135 int coroutine_fn bdrv_co_block_status_from_file(BlockDriverState *bs,
1141 BlockDriverState **file);
1143 * Default implementation for drivers to pass bdrv_co_block_status() to
1144 * their backing file.
1146 int coroutine_fn bdrv_co_block_status_from_backing(BlockDriverState *bs,
1152 BlockDriverState **file);
1153 const char *bdrv_get_parent_name(const BlockDriverState *bs);
1154 void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp);
1155 bool blk_dev_has_removable_media(BlockBackend *blk);
1156 bool blk_dev_has_tray(BlockBackend *blk);
1157 void blk_dev_eject_request(BlockBackend *blk, bool force);
1158 bool blk_dev_is_tray_open(BlockBackend *blk);
1159 bool blk_dev_is_medium_locked(BlockBackend *blk);
1161 void bdrv_set_dirty(BlockDriverState *bs, int64_t offset, int64_t bytes);
1163 void bdrv_clear_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap **out);
1164 void bdrv_restore_dirty_bitmap(BdrvDirtyBitmap *bitmap, HBitmap *backup);
1166 void bdrv_inc_in_flight(BlockDriverState *bs);
1167 void bdrv_dec_in_flight(BlockDriverState *bs);
1169 void blockdev_close_all_bdrv_states(void);
1171 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, uint64_t src_offset,
1172 BdrvChild *dst, uint64_t dst_offset,
1174 BdrvRequestFlags read_flags,
1175 BdrvRequestFlags write_flags);
1176 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, uint64_t src_offset,
1177 BdrvChild *dst, uint64_t dst_offset,
1179 BdrvRequestFlags read_flags,
1180 BdrvRequestFlags write_flags);
1182 int refresh_total_sectors(BlockDriverState *bs, int64_t hint);
1184 #endif /* BLOCK_INT_H */