2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 #include "block/accounting.h"
28 #include "block/block.h"
29 #include "qemu/option.h"
30 #include "qemu/queue.h"
31 #include "block/coroutine.h"
32 #include "qemu/timer.h"
33 #include "qapi-types.h"
34 #include "qapi/qmp/qerror.h"
35 #include "monitor/monitor.h"
36 #include "qemu/hbitmap.h"
37 #include "block/snapshot.h"
38 #include "qemu/main-loop.h"
39 #include "qemu/throttle.h"
41 #define BLOCK_FLAG_ENCRYPT 1
42 #define BLOCK_FLAG_COMPAT6 4
43 #define BLOCK_FLAG_LAZY_REFCOUNTS 8
45 #define BLOCK_OPT_SIZE "size"
46 #define BLOCK_OPT_ENCRYPT "encryption"
47 #define BLOCK_OPT_COMPAT6 "compat6"
48 #define BLOCK_OPT_BACKING_FILE "backing_file"
49 #define BLOCK_OPT_BACKING_FMT "backing_fmt"
50 #define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
51 #define BLOCK_OPT_TABLE_SIZE "table_size"
52 #define BLOCK_OPT_PREALLOC "preallocation"
53 #define BLOCK_OPT_SUBFMT "subformat"
54 #define BLOCK_OPT_COMPAT_LEVEL "compat"
55 #define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
56 #define BLOCK_OPT_ADAPTER_TYPE "adapter_type"
57 #define BLOCK_OPT_REDUNDANCY "redundancy"
58 #define BLOCK_OPT_NOCOW "nocow"
60 typedef struct BdrvTrackedRequest {
67 int64_t overlap_offset;
68 unsigned int overlap_bytes;
70 QLIST_ENTRY(BdrvTrackedRequest) list;
71 Coroutine *co; /* owner, used for deadlock detection */
72 CoQueue wait_queue; /* coroutines blocked on this request */
74 struct BdrvTrackedRequest *waiting_for;
78 const char *format_name;
81 /* set to true if the BlockDriver is a block filter */
83 /* for snapshots block filter like Quorum can implement the
84 * following recursive callback.
85 * It's purpose is to recurse on the filter children while calling
86 * bdrv_recurse_is_first_non_filter on them.
87 * For a sample implementation look in the future Quorum block filter.
89 bool (*bdrv_recurse_is_first_non_filter)(BlockDriverState *bs,
90 BlockDriverState *candidate);
92 int (*bdrv_probe)(const uint8_t *buf, int buf_size, const char *filename);
93 int (*bdrv_probe_device)(const char *filename);
95 /* Any driver implementing this callback is expected to be able to handle
96 * NULL file names in its .bdrv_open() implementation */
97 void (*bdrv_parse_filename)(const char *filename, QDict *options, Error **errp);
98 /* Drivers not implementing bdrv_parse_filename nor bdrv_open should have
99 * this field set to true, except ones that are defined only by their
101 * An example of the last type will be the quorum block driver.
103 bool bdrv_needs_filename;
105 /* Set if a driver can support backing files */
106 bool supports_backing;
108 /* For handling image reopen for split or non-split files */
109 int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
110 BlockReopenQueue *queue, Error **errp);
111 void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
112 void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
114 int (*bdrv_open)(BlockDriverState *bs, QDict *options, int flags,
116 int (*bdrv_file_open)(BlockDriverState *bs, QDict *options, int flags,
118 int (*bdrv_read)(BlockDriverState *bs, int64_t sector_num,
119 uint8_t *buf, int nb_sectors);
120 int (*bdrv_write)(BlockDriverState *bs, int64_t sector_num,
121 const uint8_t *buf, int nb_sectors);
122 void (*bdrv_close)(BlockDriverState *bs);
123 void (*bdrv_rebind)(BlockDriverState *bs);
124 int (*bdrv_create)(const char *filename, QemuOpts *opts, Error **errp);
125 int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
126 int (*bdrv_make_empty)(BlockDriverState *bs);
128 void (*bdrv_refresh_filename)(BlockDriverState *bs);
131 BlockAIOCB *(*bdrv_aio_readv)(BlockDriverState *bs,
132 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
133 BlockCompletionFunc *cb, void *opaque);
134 BlockAIOCB *(*bdrv_aio_writev)(BlockDriverState *bs,
135 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
136 BlockCompletionFunc *cb, void *opaque);
137 BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
138 BlockCompletionFunc *cb, void *opaque);
139 BlockAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
140 int64_t sector_num, int nb_sectors,
141 BlockCompletionFunc *cb, void *opaque);
143 int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
144 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
145 int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
146 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
148 * Efficiently zero a region of the disk image. Typically an image format
149 * would use a compact metadata representation to implement this. This
150 * function pointer may be NULL and .bdrv_co_writev() will be called
153 int coroutine_fn (*bdrv_co_write_zeroes)(BlockDriverState *bs,
154 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
155 int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
156 int64_t sector_num, int nb_sectors);
157 int64_t coroutine_fn (*bdrv_co_get_block_status)(BlockDriverState *bs,
158 int64_t sector_num, int nb_sectors, int *pnum);
161 * Invalidate any cached meta-data.
163 void (*bdrv_invalidate_cache)(BlockDriverState *bs, Error **errp);
166 * Flushes all data that was already written to the OS all the way down to
167 * the disk (for example raw-posix calls fsync()).
169 int coroutine_fn (*bdrv_co_flush_to_disk)(BlockDriverState *bs);
172 * Flushes all internal caches to the OS. The data may still sit in a
173 * writeback cache of the host OS, but it will survive a crash of the qemu
176 int coroutine_fn (*bdrv_co_flush_to_os)(BlockDriverState *bs);
178 const char *protocol_name;
179 int (*bdrv_truncate)(BlockDriverState *bs, int64_t offset);
181 int64_t (*bdrv_getlength)(BlockDriverState *bs);
182 bool has_variable_length;
183 int64_t (*bdrv_get_allocated_file_size)(BlockDriverState *bs);
185 int (*bdrv_write_compressed)(BlockDriverState *bs, int64_t sector_num,
186 const uint8_t *buf, int nb_sectors);
188 int (*bdrv_snapshot_create)(BlockDriverState *bs,
189 QEMUSnapshotInfo *sn_info);
190 int (*bdrv_snapshot_goto)(BlockDriverState *bs,
191 const char *snapshot_id);
192 int (*bdrv_snapshot_delete)(BlockDriverState *bs,
193 const char *snapshot_id,
196 int (*bdrv_snapshot_list)(BlockDriverState *bs,
197 QEMUSnapshotInfo **psn_info);
198 int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
199 const char *snapshot_id,
202 int (*bdrv_get_info)(BlockDriverState *bs, BlockDriverInfo *bdi);
203 ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs);
205 int (*bdrv_save_vmstate)(BlockDriverState *bs, QEMUIOVector *qiov,
207 int (*bdrv_load_vmstate)(BlockDriverState *bs, uint8_t *buf,
208 int64_t pos, int size);
210 int (*bdrv_change_backing_file)(BlockDriverState *bs,
211 const char *backing_file, const char *backing_fmt);
213 /* removable device specific */
214 int (*bdrv_is_inserted)(BlockDriverState *bs);
215 int (*bdrv_media_changed)(BlockDriverState *bs);
216 void (*bdrv_eject)(BlockDriverState *bs, bool eject_flag);
217 void (*bdrv_lock_medium)(BlockDriverState *bs, bool locked);
219 /* to control generic scsi devices */
220 int (*bdrv_ioctl)(BlockDriverState *bs, unsigned long int req, void *buf);
221 BlockAIOCB *(*bdrv_aio_ioctl)(BlockDriverState *bs,
222 unsigned long int req, void *buf,
223 BlockCompletionFunc *cb, void *opaque);
225 /* List of options for creating images, terminated by name == NULL */
226 QemuOptsList *create_opts;
229 * Returns 0 for completed check, -errno for internal errors.
230 * The check results are stored in result.
232 int (*bdrv_check)(BlockDriverState* bs, BdrvCheckResult *result,
235 int (*bdrv_amend_options)(BlockDriverState *bs, QemuOpts *opts,
236 BlockDriverAmendStatusCB *status_cb);
238 void (*bdrv_debug_event)(BlockDriverState *bs, BlkDebugEvent event);
240 /* TODO Better pass a option string/QDict/QemuOpts to add any rule? */
241 int (*bdrv_debug_breakpoint)(BlockDriverState *bs, const char *event,
243 int (*bdrv_debug_remove_breakpoint)(BlockDriverState *bs,
245 int (*bdrv_debug_resume)(BlockDriverState *bs, const char *tag);
246 bool (*bdrv_debug_is_suspended)(BlockDriverState *bs, const char *tag);
248 void (*bdrv_refresh_limits)(BlockDriverState *bs, Error **errp);
251 * Returns 1 if newly created images are guaranteed to contain only
252 * zeros, 0 otherwise.
254 int (*bdrv_has_zero_init)(BlockDriverState *bs);
256 /* Remove fd handlers, timers, and other event loop callbacks so the event
257 * loop is no longer in use. Called with no in-flight requests and in
258 * depth-first traversal order with parents before child nodes.
260 void (*bdrv_detach_aio_context)(BlockDriverState *bs);
262 /* Add fd handlers, timers, and other event loop callbacks so I/O requests
263 * can be processed again. Called with no in-flight requests and in
264 * depth-first traversal order with child nodes before parent nodes.
266 void (*bdrv_attach_aio_context)(BlockDriverState *bs,
267 AioContext *new_context);
269 /* io queue for linux-aio */
270 void (*bdrv_io_plug)(BlockDriverState *bs);
271 void (*bdrv_io_unplug)(BlockDriverState *bs);
272 void (*bdrv_flush_io_queue)(BlockDriverState *bs);
274 QLIST_ENTRY(BlockDriver) list;
277 typedef struct BlockLimits {
278 /* maximum number of sectors that can be discarded at once */
281 /* optimal alignment for discard requests in sectors */
282 int64_t discard_alignment;
284 /* maximum number of sectors that can zeroized at once */
285 int max_write_zeroes;
287 /* optimal alignment for write zeroes requests in sectors */
288 int64_t write_zeroes_alignment;
290 /* optimal transfer length in sectors */
291 int opt_transfer_length;
293 /* maximal transfer length in sectors */
294 int max_transfer_length;
296 /* memory alignment so that no bounce buffer is needed */
297 size_t opt_mem_alignment;
300 typedef struct BdrvOpBlocker BdrvOpBlocker;
302 typedef struct BdrvAioNotifier {
303 void (*attached_aio_context)(AioContext *new_context, void *opaque);
304 void (*detach_aio_context)(void *opaque);
308 QLIST_ENTRY(BdrvAioNotifier) list;
312 * Note: the function bdrv_append() copies and swaps contents of
313 * BlockDriverStates, so if you add new fields to this struct, please
314 * inspect bdrv_append() to determine if the new fields need to be
317 struct BlockDriverState {
318 int64_t total_sectors; /* if we are reading a disk image, give its
320 int read_only; /* if true, the media is read only */
321 int open_flags; /* flags used to open the file, re-used for re-open */
322 int encrypted; /* if true, the media is encrypted */
323 int valid_key; /* if true, a valid encryption key has been set */
324 int sg; /* if true, the device is a /dev/sg* */
325 int copy_on_read; /* if true, copy read backing sectors into image
326 note this is a reference count */
328 BlockDriver *drv; /* NULL means no media */
331 BlockBackend *blk; /* owning backend, if any */
333 AioContext *aio_context; /* event loop used for fd handlers, timers, etc */
334 /* long-running tasks intended to always use the same AioContext as this
335 * BDS may register themselves in this list to be notified of changes
336 * regarding this BDS's context */
337 QLIST_HEAD(, BdrvAioNotifier) aio_notifiers;
340 char backing_file[1024]; /* if non zero, the image is a diff of
342 char backing_format[16]; /* if non-zero and backing_file exists */
344 QDict *full_open_options;
345 char exact_filename[1024];
347 BlockDriverState *backing_hd;
348 BlockDriverState *file;
350 NotifierList close_notifiers;
352 /* Callback before write request is processed */
353 NotifierWithReturnList before_write_notifiers;
355 /* number of in-flight serialising requests */
356 unsigned int serialising_in_flight;
359 ThrottleState throttle_state;
360 CoQueue throttled_reqs[2];
361 bool io_limits_enabled;
363 /* I/O stats (display with "info blockstats"). */
364 BlockAcctStats stats;
369 /* Whether the disk can expand beyond total_sectors */
372 /* Whether produces zeros when read beyond eof */
373 bool zero_beyond_eof;
375 /* Alignment requirement for offset/length of I/O requests */
376 unsigned int request_alignment;
378 /* the block size for which the guest device expects atomicity */
379 int guest_block_size;
381 /* do we need to tell the quest if we have a volatile write cache? */
382 int enable_write_cache;
384 /* NOTE: the following infos are only hints for real hardware
385 drivers. They are not used by the block driver */
386 BlockdevOnError on_read_error, on_write_error;
387 bool iostatus_enabled;
388 BlockDeviceIoStatus iostatus;
390 /* the following member gives a name to every node on the bs graph. */
392 /* element of the list of named nodes building the graph */
393 QTAILQ_ENTRY(BlockDriverState) node_list;
394 /* element of the list of "drives" the guest sees */
395 QTAILQ_ENTRY(BlockDriverState) device_list;
396 QLIST_HEAD(, BdrvDirtyBitmap) dirty_bitmaps;
399 QLIST_HEAD(, BdrvTrackedRequest) tracked_requests;
401 /* operation blockers */
402 QLIST_HEAD(, BdrvOpBlocker) op_blockers[BLOCK_OP_TYPE_MAX];
404 /* long-running background operation */
408 BlockdevDetectZeroesOptions detect_zeroes;
410 /* The error object in use for blocking operations on backing_hd */
411 Error *backing_blocker;
414 int get_tmp_filename(char *filename, int size);
416 void bdrv_set_io_limits(BlockDriverState *bs,
417 ThrottleConfig *cfg);
421 * bdrv_add_before_write_notifier:
423 * Register a callback that is invoked before write requests are processed but
424 * after any throttling or waiting for overlapping requests.
426 void bdrv_add_before_write_notifier(BlockDriverState *bs,
427 NotifierWithReturn *notifier);
430 * bdrv_detach_aio_context:
432 * May be called from .bdrv_detach_aio_context() to detach children from the
433 * current #AioContext. This is only needed by block drivers that manage their
434 * own children. Both ->file and ->backing_hd are automatically handled and
435 * block drivers should not call this function on them explicitly.
437 void bdrv_detach_aio_context(BlockDriverState *bs);
440 * bdrv_attach_aio_context:
442 * May be called from .bdrv_attach_aio_context() to attach children to the new
443 * #AioContext. This is only needed by block drivers that manage their own
444 * children. Both ->file and ->backing_hd are automatically handled and block
445 * drivers should not call this function on them explicitly.
447 void bdrv_attach_aio_context(BlockDriverState *bs,
448 AioContext *new_context);
451 * bdrv_add_aio_context_notifier:
453 * If a long-running job intends to be always run in the same AioContext as a
454 * certain BDS, it may use this function to be notified of changes regarding the
455 * association of the BDS to an AioContext.
457 * attached_aio_context() is called after the target BDS has been attached to a
458 * new AioContext; detach_aio_context() is called before the target BDS is being
459 * detached from its old AioContext.
461 void bdrv_add_aio_context_notifier(BlockDriverState *bs,
462 void (*attached_aio_context)(AioContext *new_context, void *opaque),
463 void (*detach_aio_context)(void *opaque), void *opaque);
466 * bdrv_remove_aio_context_notifier:
468 * Unsubscribe of change notifications regarding the BDS's AioContext. The
469 * parameters given here have to be the same as those given to
470 * bdrv_add_aio_context_notifier().
472 void bdrv_remove_aio_context_notifier(BlockDriverState *bs,
473 void (*aio_context_attached)(AioContext *,
475 void (*aio_context_detached)(void *),
479 int is_windows_drive(const char *filename);
484 * @bs: Block device to operate on.
485 * @base: Block device that will become the new base, or %NULL to
486 * flatten the whole backing file chain onto @bs.
487 * @base_id: The file name that will be written to @bs as the new
488 * backing file if the job completes. Ignored if @base is %NULL.
489 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
490 * @on_error: The action to take upon error.
491 * @cb: Completion function for the job.
492 * @opaque: Opaque pointer value passed to @cb.
493 * @errp: Error object.
495 * Start a streaming operation on @bs. Clusters that are unallocated
496 * in @bs, but allocated in any image between @base and @bs (both
497 * exclusive) will be written to @bs. At the end of a successful
498 * streaming job, the backing file of @bs will be changed to
499 * @base_id in the written image and to @base in the live BlockDriverState.
501 void stream_start(BlockDriverState *bs, BlockDriverState *base,
502 const char *base_id, int64_t speed, BlockdevOnError on_error,
503 BlockCompletionFunc *cb,
504 void *opaque, Error **errp);
508 * @bs: Active block device.
509 * @top: Top block device to be committed.
510 * @base: Block device that will be written into, and become the new top.
511 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
512 * @on_error: The action to take upon error.
513 * @cb: Completion function for the job.
514 * @opaque: Opaque pointer value passed to @cb.
515 * @backing_file_str: String to use as the backing file in @top's overlay
516 * @errp: Error object.
519 void commit_start(BlockDriverState *bs, BlockDriverState *base,
520 BlockDriverState *top, int64_t speed,
521 BlockdevOnError on_error, BlockCompletionFunc *cb,
522 void *opaque, const char *backing_file_str, Error **errp);
524 * commit_active_start:
525 * @bs: Active block device to be committed.
526 * @base: Block device that will be written into, and become the new top.
527 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
528 * @on_error: The action to take upon error.
529 * @cb: Completion function for the job.
530 * @opaque: Opaque pointer value passed to @cb.
531 * @errp: Error object.
534 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
536 BlockdevOnError on_error,
537 BlockCompletionFunc *cb,
538 void *opaque, Error **errp);
541 * @bs: Block device to operate on.
542 * @target: Block device to write to.
543 * @replaces: Block graph node name to replace once the mirror is done. Can
544 * only be used when full mirroring is selected.
545 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
546 * @granularity: The chosen granularity for the dirty bitmap.
547 * @buf_size: The amount of data that can be in flight at one time.
548 * @mode: Whether to collapse all images in the chain to the target.
549 * @on_source_error: The action to take upon error reading from the source.
550 * @on_target_error: The action to take upon error writing to the target.
551 * @cb: Completion function for the job.
552 * @opaque: Opaque pointer value passed to @cb.
553 * @errp: Error object.
555 * Start a mirroring operation on @bs. Clusters that are allocated
556 * in @bs will be written to @bs until the job is cancelled or
557 * manually completed. At the end of a successful mirroring job,
558 * @bs will be switched to read from @target.
560 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
561 const char *replaces,
562 int64_t speed, int64_t granularity, int64_t buf_size,
563 MirrorSyncMode mode, BlockdevOnError on_source_error,
564 BlockdevOnError on_target_error,
565 BlockCompletionFunc *cb,
566 void *opaque, Error **errp);
570 * @bs: Block device to operate on.
571 * @target: Block device to write to.
572 * @speed: The maximum speed, in bytes per second, or 0 for unlimited.
573 * @sync_mode: What parts of the disk image should be copied to the destination.
574 * @on_source_error: The action to take upon error reading from the source.
575 * @on_target_error: The action to take upon error writing to the target.
576 * @cb: Completion function for the job.
577 * @opaque: Opaque pointer value passed to @cb.
579 * Start a backup operation on @bs. Clusters in @bs are written to @target
580 * until the job is cancelled or manually completed.
582 void backup_start(BlockDriverState *bs, BlockDriverState *target,
583 int64_t speed, MirrorSyncMode sync_mode,
584 BlockdevOnError on_source_error,
585 BlockdevOnError on_target_error,
586 BlockCompletionFunc *cb, void *opaque,
589 void blk_dev_change_media_cb(BlockBackend *blk, bool load);
590 bool blk_dev_has_removable_media(BlockBackend *blk);
591 void blk_dev_eject_request(BlockBackend *blk, bool force);
592 bool blk_dev_is_tray_open(BlockBackend *blk);
593 bool blk_dev_is_medium_locked(BlockBackend *blk);
594 void blk_dev_resize_cb(BlockBackend *blk);
596 #endif /* BLOCK_INT_H */