2 * QEMU System Emulator block driver
4 * Copyright (c) 2003 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "config-host.h"
25 #include "qemu-common.h"
28 #include "block_int.h"
30 #include "qemu-objects.h"
33 #include <sys/types.h>
35 #include <sys/ioctl.h>
36 #include <sys/queue.h>
46 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
47 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
48 BlockDriverCompletionFunc *cb, void *opaque);
49 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
50 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
51 BlockDriverCompletionFunc *cb, void *opaque);
52 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
53 BlockDriverCompletionFunc *cb, void *opaque);
54 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
55 BlockDriverCompletionFunc *cb, void *opaque);
56 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
57 uint8_t *buf, int nb_sectors);
58 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
59 const uint8_t *buf, int nb_sectors);
61 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
62 QTAILQ_HEAD_INITIALIZER(bdrv_states);
64 static QLIST_HEAD(, BlockDriver) bdrv_drivers =
65 QLIST_HEAD_INITIALIZER(bdrv_drivers);
67 /* The device to use for VM snapshots */
68 static BlockDriverState *bs_snapshots;
70 /* If non-zero, use only whitelisted block drivers */
71 static int use_bdrv_whitelist;
74 static int is_windows_drive_prefix(const char *filename)
76 return (((filename[0] >= 'a' && filename[0] <= 'z') ||
77 (filename[0] >= 'A' && filename[0] <= 'Z')) &&
81 int is_windows_drive(const char *filename)
83 if (is_windows_drive_prefix(filename) &&
86 if (strstart(filename, "\\\\.\\", NULL) ||
87 strstart(filename, "//./", NULL))
93 /* check if the path starts with "<protocol>:" */
94 static int path_has_protocol(const char *path)
97 if (is_windows_drive(path) ||
98 is_windows_drive_prefix(path)) {
103 return strchr(path, ':') != NULL;
106 int path_is_absolute(const char *path)
110 /* specific case for names like: "\\.\d:" */
111 if (*path == '/' || *path == '\\')
114 p = strchr(path, ':');
120 return (*p == '/' || *p == '\\');
126 /* if filename is absolute, just copy it to dest. Otherwise, build a
127 path to it by considering it is relative to base_path. URL are
129 void path_combine(char *dest, int dest_size,
130 const char *base_path,
131 const char *filename)
138 if (path_is_absolute(filename)) {
139 pstrcpy(dest, dest_size, filename);
141 p = strchr(base_path, ':');
146 p1 = strrchr(base_path, '/');
150 p2 = strrchr(base_path, '\\');
162 if (len > dest_size - 1)
164 memcpy(dest, base_path, len);
166 pstrcat(dest, dest_size, filename);
170 void bdrv_register(BlockDriver *bdrv)
172 if (!bdrv->bdrv_aio_readv) {
173 /* add AIO emulation layer */
174 bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
175 bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
176 } else if (!bdrv->bdrv_read) {
177 /* add synchronous IO emulation layer */
178 bdrv->bdrv_read = bdrv_read_em;
179 bdrv->bdrv_write = bdrv_write_em;
182 if (!bdrv->bdrv_aio_flush)
183 bdrv->bdrv_aio_flush = bdrv_aio_flush_em;
185 QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
188 /* create a new block device (by default it is empty) */
189 BlockDriverState *bdrv_new(const char *device_name)
191 BlockDriverState *bs;
193 bs = qemu_mallocz(sizeof(BlockDriverState));
194 pstrcpy(bs->device_name, sizeof(bs->device_name), device_name);
195 if (device_name[0] != '\0') {
196 QTAILQ_INSERT_TAIL(&bdrv_states, bs, list);
201 BlockDriver *bdrv_find_format(const char *format_name)
204 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
205 if (!strcmp(drv1->format_name, format_name)) {
212 static int bdrv_is_whitelisted(BlockDriver *drv)
214 static const char *whitelist[] = {
215 CONFIG_BDRV_WHITELIST
220 return 1; /* no whitelist, anything goes */
222 for (p = whitelist; *p; p++) {
223 if (!strcmp(drv->format_name, *p)) {
230 BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
232 BlockDriver *drv = bdrv_find_format(format_name);
233 return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
236 int bdrv_create(BlockDriver *drv, const char* filename,
237 QEMUOptionParameter *options)
239 if (!drv->bdrv_create)
242 return drv->bdrv_create(filename, options);
245 int bdrv_create_file(const char* filename, QEMUOptionParameter *options)
249 drv = bdrv_find_protocol(filename);
254 return bdrv_create(drv, filename, options);
258 void get_tmp_filename(char *filename, int size)
260 char temp_dir[MAX_PATH];
262 GetTempPath(MAX_PATH, temp_dir);
263 GetTempFileName(temp_dir, "qem", 0, filename);
266 void get_tmp_filename(char *filename, int size)
270 /* XXX: race condition possible */
271 tmpdir = getenv("TMPDIR");
274 snprintf(filename, size, "%s/vl.XXXXXX", tmpdir);
275 fd = mkstemp(filename);
281 * Detect host devices. By convention, /dev/cdrom[N] is always
282 * recognized as a host CDROM.
284 static BlockDriver *find_hdev_driver(const char *filename)
286 int score_max = 0, score;
287 BlockDriver *drv = NULL, *d;
289 QLIST_FOREACH(d, &bdrv_drivers, list) {
290 if (d->bdrv_probe_device) {
291 score = d->bdrv_probe_device(filename);
292 if (score > score_max) {
302 BlockDriver *bdrv_find_protocol(const char *filename)
309 /* TODO Drivers without bdrv_file_open must be specified explicitly */
312 * XXX(hch): we really should not let host device detection
313 * override an explicit protocol specification, but moving this
314 * later breaks access to device names with colons in them.
315 * Thanks to the brain-dead persistent naming schemes on udev-
316 * based Linux systems those actually are quite common.
318 drv1 = find_hdev_driver(filename);
323 if (!path_has_protocol(filename)) {
324 return bdrv_find_format("file");
326 p = strchr(filename, ':');
329 if (len > sizeof(protocol) - 1)
330 len = sizeof(protocol) - 1;
331 memcpy(protocol, filename, len);
332 protocol[len] = '\0';
333 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
334 if (drv1->protocol_name &&
335 !strcmp(drv1->protocol_name, protocol)) {
342 static int find_image_format(const char *filename, BlockDriver **pdrv)
344 int ret, score, score_max;
345 BlockDriver *drv1, *drv;
347 BlockDriverState *bs;
349 ret = bdrv_file_open(&bs, filename, 0);
355 /* Return the raw BlockDriver * to scsi-generic devices or empty drives */
356 if (bs->sg || !bdrv_is_inserted(bs)) {
358 drv = bdrv_find_format("raw");
366 ret = bdrv_pread(bs, 0, buf, sizeof(buf));
375 QLIST_FOREACH(drv1, &bdrv_drivers, list) {
376 if (drv1->bdrv_probe) {
377 score = drv1->bdrv_probe(buf, ret, filename);
378 if (score > score_max) {
392 * Set the current 'total_sectors' value
394 static int refresh_total_sectors(BlockDriverState *bs, int64_t hint)
396 BlockDriver *drv = bs->drv;
398 /* Do not attempt drv->bdrv_getlength() on scsi-generic devices */
402 /* query actual device if possible, otherwise just trust the hint */
403 if (drv->bdrv_getlength) {
404 int64_t length = drv->bdrv_getlength(bs);
408 hint = length >> BDRV_SECTOR_BITS;
411 bs->total_sectors = hint;
416 * Common part for opening disk images and files
418 static int bdrv_open_common(BlockDriverState *bs, const char *filename,
419 int flags, BlockDriver *drv)
426 bs->total_sectors = 0;
429 bs->open_flags = flags;
430 /* buffer_alignment defaulted to 512, drivers can change this value */
431 bs->buffer_alignment = 512;
433 pstrcpy(bs->filename, sizeof(bs->filename), filename);
435 if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv)) {
440 bs->opaque = qemu_mallocz(drv->instance_size);
442 if (flags & BDRV_O_CACHE_WB)
443 bs->enable_write_cache = 1;
446 * Clear flags that are internal to the block layer before opening the
449 open_flags = flags & ~(BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
452 * Snapshots should be writable.
454 if (bs->is_temporary) {
455 open_flags |= BDRV_O_RDWR;
458 /* Open the image, either directly or using a protocol */
459 if (drv->bdrv_file_open) {
460 ret = drv->bdrv_file_open(bs, filename, open_flags);
462 ret = bdrv_file_open(&bs->file, filename, open_flags);
464 ret = drv->bdrv_open(bs, open_flags);
472 bs->keep_read_only = bs->read_only = !(open_flags & BDRV_O_RDWR);
474 ret = refresh_total_sectors(bs, bs->total_sectors);
480 if (bs->is_temporary) {
488 bdrv_delete(bs->file);
491 qemu_free(bs->opaque);
498 * Opens a file using a protocol (file, host_device, nbd, ...)
500 int bdrv_file_open(BlockDriverState **pbs, const char *filename, int flags)
502 BlockDriverState *bs;
506 drv = bdrv_find_protocol(filename);
512 ret = bdrv_open_common(bs, filename, flags, drv);
523 * Opens a disk image (raw, qcow2, vmdk, ...)
525 int bdrv_open(BlockDriverState *bs, const char *filename, int flags,
530 if (flags & BDRV_O_SNAPSHOT) {
531 BlockDriverState *bs1;
534 BlockDriver *bdrv_qcow2;
535 QEMUOptionParameter *options;
536 char tmp_filename[PATH_MAX];
537 char backing_filename[PATH_MAX];
539 /* if snapshot, we create a temporary backing file and open it
540 instead of opening 'filename' directly */
542 /* if there is a backing file, use it */
544 ret = bdrv_open(bs1, filename, 0, drv);
549 total_size = bdrv_getlength(bs1) & BDRV_SECTOR_MASK;
551 if (bs1->drv && bs1->drv->protocol_name)
556 get_tmp_filename(tmp_filename, sizeof(tmp_filename));
558 /* Real path is meaningless for protocols */
560 snprintf(backing_filename, sizeof(backing_filename),
562 else if (!realpath(filename, backing_filename))
565 bdrv_qcow2 = bdrv_find_format("qcow2");
566 options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
568 set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size);
569 set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
571 set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
575 ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
576 free_option_parameters(options);
581 filename = tmp_filename;
583 bs->is_temporary = 1;
586 /* Find the right image format driver */
588 ret = find_image_format(filename, &drv);
592 goto unlink_and_fail;
596 ret = bdrv_open_common(bs, filename, flags, drv);
598 goto unlink_and_fail;
601 /* If there is a backing file, use it */
602 if ((flags & BDRV_O_NO_BACKING) == 0 && bs->backing_file[0] != '\0') {
603 char backing_filename[PATH_MAX];
605 BlockDriver *back_drv = NULL;
607 bs->backing_hd = bdrv_new("");
609 if (path_has_protocol(bs->backing_file)) {
610 pstrcpy(backing_filename, sizeof(backing_filename),
613 path_combine(backing_filename, sizeof(backing_filename),
614 filename, bs->backing_file);
617 if (bs->backing_format[0] != '\0') {
618 back_drv = bdrv_find_format(bs->backing_format);
621 /* backing files always opened read-only */
623 flags & ~(BDRV_O_RDWR | BDRV_O_SNAPSHOT | BDRV_O_NO_BACKING);
625 ret = bdrv_open(bs->backing_hd, backing_filename, back_flags, back_drv);
630 if (bs->is_temporary) {
631 bs->backing_hd->keep_read_only = !(flags & BDRV_O_RDWR);
633 /* base image inherits from "parent" */
634 bs->backing_hd->keep_read_only = bs->keep_read_only;
638 if (!bdrv_key_required(bs)) {
639 /* call the change callback */
640 bs->media_changed = 1;
642 bs->change_cb(bs->change_opaque, CHANGE_MEDIA);
648 if (bs->is_temporary) {
654 void bdrv_close(BlockDriverState *bs)
657 if (bs == bs_snapshots) {
660 if (bs->backing_hd) {
661 bdrv_delete(bs->backing_hd);
662 bs->backing_hd = NULL;
664 bs->drv->bdrv_close(bs);
665 qemu_free(bs->opaque);
667 if (bs->is_temporary) {
668 unlink(bs->filename);
674 if (bs->file != NULL) {
675 bdrv_close(bs->file);
678 /* call the change callback */
679 bs->media_changed = 1;
681 bs->change_cb(bs->change_opaque, CHANGE_MEDIA);
685 void bdrv_close_all(void)
687 BlockDriverState *bs;
689 QTAILQ_FOREACH(bs, &bdrv_states, list) {
694 /* make a BlockDriverState anonymous by removing from bdrv_state list.
695 Also, NULL terminate the device_name to prevent double remove */
696 void bdrv_make_anon(BlockDriverState *bs)
698 if (bs->device_name[0] != '\0') {
699 QTAILQ_REMOVE(&bdrv_states, bs, list);
701 bs->device_name[0] = '\0';
704 void bdrv_delete(BlockDriverState *bs)
708 /* remove from list, if necessary */
712 if (bs->file != NULL) {
713 bdrv_delete(bs->file);
716 assert(bs != bs_snapshots);
720 int bdrv_attach(BlockDriverState *bs, DeviceState *qdev)
729 void bdrv_detach(BlockDriverState *bs, DeviceState *qdev)
731 assert(bs->peer == qdev);
735 DeviceState *bdrv_get_attached(BlockDriverState *bs)
741 * Run consistency checks on an image
743 * Returns 0 if the check could be completed (it doesn't mean that the image is
744 * free of errors) or -errno when an internal error occurred. The results of the
745 * check are stored in res.
747 int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res)
749 if (bs->drv->bdrv_check == NULL) {
753 memset(res, 0, sizeof(*res));
754 return bs->drv->bdrv_check(bs, res);
757 #define COMMIT_BUF_SECTORS 2048
759 /* commit COW file into the raw image */
760 int bdrv_commit(BlockDriverState *bs)
762 BlockDriver *drv = bs->drv;
763 BlockDriver *backing_drv;
764 int64_t sector, total_sectors;
765 int n, ro, open_flags;
766 int ret = 0, rw_ret = 0;
769 BlockDriverState *bs_rw, *bs_ro;
774 if (!bs->backing_hd) {
778 if (bs->backing_hd->keep_read_only) {
782 backing_drv = bs->backing_hd->drv;
783 ro = bs->backing_hd->read_only;
784 strncpy(filename, bs->backing_hd->filename, sizeof(filename));
785 open_flags = bs->backing_hd->open_flags;
789 bdrv_delete(bs->backing_hd);
790 bs->backing_hd = NULL;
791 bs_rw = bdrv_new("");
792 rw_ret = bdrv_open(bs_rw, filename, open_flags | BDRV_O_RDWR,
796 /* try to re-open read-only */
797 bs_ro = bdrv_new("");
798 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
802 /* drive not functional anymore */
806 bs->backing_hd = bs_ro;
809 bs->backing_hd = bs_rw;
812 total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
813 buf = qemu_malloc(COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
815 for (sector = 0; sector < total_sectors; sector += n) {
816 if (drv->bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n)) {
818 if (bdrv_read(bs, sector, buf, n) != 0) {
823 if (bdrv_write(bs->backing_hd, sector, buf, n) != 0) {
830 if (drv->bdrv_make_empty) {
831 ret = drv->bdrv_make_empty(bs);
836 * Make sure all data we wrote to the backing device is actually
840 bdrv_flush(bs->backing_hd);
847 bdrv_delete(bs->backing_hd);
848 bs->backing_hd = NULL;
849 bs_ro = bdrv_new("");
850 ret = bdrv_open(bs_ro, filename, open_flags & ~BDRV_O_RDWR,
854 /* drive not functional anymore */
858 bs->backing_hd = bs_ro;
859 bs->backing_hd->keep_read_only = 0;
865 void bdrv_commit_all(void)
867 BlockDriverState *bs;
869 QTAILQ_FOREACH(bs, &bdrv_states, list) {
877 * -EINVAL - backing format specified, but no file
878 * -ENOSPC - can't update the backing file because no space is left in the
880 * -ENOTSUP - format driver doesn't support changing the backing file
882 int bdrv_change_backing_file(BlockDriverState *bs,
883 const char *backing_file, const char *backing_fmt)
885 BlockDriver *drv = bs->drv;
887 if (drv->bdrv_change_backing_file != NULL) {
888 return drv->bdrv_change_backing_file(bs, backing_file, backing_fmt);
894 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
899 if (!bdrv_is_inserted(bs))
905 len = bdrv_getlength(bs);
910 if ((offset > len) || (len - offset < size))
916 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
919 return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
920 nb_sectors * BDRV_SECTOR_SIZE);
923 /* return < 0 if error. See bdrv_write() for the return codes */
924 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
925 uint8_t *buf, int nb_sectors)
927 BlockDriver *drv = bs->drv;
931 if (bdrv_check_request(bs, sector_num, nb_sectors))
934 return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
937 static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
938 int nb_sectors, int dirty)
941 unsigned long val, idx, bit;
943 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
944 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
946 for (; start <= end; start++) {
947 idx = start / (sizeof(unsigned long) * 8);
948 bit = start % (sizeof(unsigned long) * 8);
949 val = bs->dirty_bitmap[idx];
951 if (!(val & (1UL << bit))) {
956 if (val & (1UL << bit)) {
958 val &= ~(1UL << bit);
961 bs->dirty_bitmap[idx] = val;
965 /* Return < 0 if error. Important errors are:
966 -EIO generic I/O error (may happen for all errors)
967 -ENOMEDIUM No media inserted.
968 -EINVAL Invalid sector number or nb_sectors
969 -EACCES Trying to write a read-only device
971 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
972 const uint8_t *buf, int nb_sectors)
974 BlockDriver *drv = bs->drv;
979 if (bdrv_check_request(bs, sector_num, nb_sectors))
982 if (bs->dirty_bitmap) {
983 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
986 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
987 bs->wr_highest_sector = sector_num + nb_sectors - 1;
990 return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
993 int bdrv_pread(BlockDriverState *bs, int64_t offset,
994 void *buf, int count1)
996 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
997 int len, nb_sectors, count;
1002 /* first read to align to sector start */
1003 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1006 sector_num = offset >> BDRV_SECTOR_BITS;
1008 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1010 memcpy(buf, tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), len);
1018 /* read the sectors "in place" */
1019 nb_sectors = count >> BDRV_SECTOR_BITS;
1020 if (nb_sectors > 0) {
1021 if ((ret = bdrv_read(bs, sector_num, buf, nb_sectors)) < 0)
1023 sector_num += nb_sectors;
1024 len = nb_sectors << BDRV_SECTOR_BITS;
1029 /* add data from the last sector */
1031 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1033 memcpy(buf, tmp_buf, count);
1038 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
1039 const void *buf, int count1)
1041 uint8_t tmp_buf[BDRV_SECTOR_SIZE];
1042 int len, nb_sectors, count;
1047 /* first write to align to sector start */
1048 len = (BDRV_SECTOR_SIZE - offset) & (BDRV_SECTOR_SIZE - 1);
1051 sector_num = offset >> BDRV_SECTOR_BITS;
1053 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1055 memcpy(tmp_buf + (offset & (BDRV_SECTOR_SIZE - 1)), buf, len);
1056 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1065 /* write the sectors "in place" */
1066 nb_sectors = count >> BDRV_SECTOR_BITS;
1067 if (nb_sectors > 0) {
1068 if ((ret = bdrv_write(bs, sector_num, buf, nb_sectors)) < 0)
1070 sector_num += nb_sectors;
1071 len = nb_sectors << BDRV_SECTOR_BITS;
1076 /* add data from the last sector */
1078 if ((ret = bdrv_read(bs, sector_num, tmp_buf, 1)) < 0)
1080 memcpy(tmp_buf, buf, count);
1081 if ((ret = bdrv_write(bs, sector_num, tmp_buf, 1)) < 0)
1088 * Writes to the file and ensures that no writes are reordered across this
1089 * request (acts as a barrier)
1091 * Returns 0 on success, -errno in error cases.
1093 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
1094 const void *buf, int count)
1098 ret = bdrv_pwrite(bs, offset, buf, count);
1103 /* No flush needed for cache=writethrough, it uses O_DSYNC */
1104 if ((bs->open_flags & BDRV_O_CACHE_MASK) != 0) {
1112 * Writes to the file and ensures that no writes are reordered across this
1113 * request (acts as a barrier)
1115 * Returns 0 on success, -errno in error cases.
1117 int bdrv_write_sync(BlockDriverState *bs, int64_t sector_num,
1118 const uint8_t *buf, int nb_sectors)
1120 return bdrv_pwrite_sync(bs, BDRV_SECTOR_SIZE * sector_num,
1121 buf, BDRV_SECTOR_SIZE * nb_sectors);
1125 * Truncate file to 'offset' bytes (needed only for file protocols)
1127 int bdrv_truncate(BlockDriverState *bs, int64_t offset)
1129 BlockDriver *drv = bs->drv;
1133 if (!drv->bdrv_truncate)
1137 if (bdrv_in_use(bs))
1139 ret = drv->bdrv_truncate(bs, offset);
1141 ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
1142 if (bs->change_cb) {
1143 bs->change_cb(bs->change_opaque, CHANGE_SIZE);
1150 * Length of a file in bytes. Return < 0 if error or unknown.
1152 int64_t bdrv_getlength(BlockDriverState *bs)
1154 BlockDriver *drv = bs->drv;
1158 if (bs->growable || bs->removable) {
1159 if (drv->bdrv_getlength) {
1160 return drv->bdrv_getlength(bs);
1163 return bs->total_sectors * BDRV_SECTOR_SIZE;
1166 /* return 0 as number of sectors if no device present or error */
1167 void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
1170 length = bdrv_getlength(bs);
1174 length = length >> BDRV_SECTOR_BITS;
1175 *nb_sectors_ptr = length;
1179 uint8_t boot_ind; /* 0x80 - active */
1180 uint8_t head; /* starting head */
1181 uint8_t sector; /* starting sector */
1182 uint8_t cyl; /* starting cylinder */
1183 uint8_t sys_ind; /* What partition type */
1184 uint8_t end_head; /* end head */
1185 uint8_t end_sector; /* end sector */
1186 uint8_t end_cyl; /* end cylinder */
1187 uint32_t start_sect; /* starting sector counting from 0 */
1188 uint32_t nr_sects; /* nr of sectors in partition */
1189 } __attribute__((packed));
1191 /* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
1192 static int guess_disk_lchs(BlockDriverState *bs,
1193 int *pcylinders, int *pheads, int *psectors)
1195 uint8_t buf[BDRV_SECTOR_SIZE];
1196 int ret, i, heads, sectors, cylinders;
1197 struct partition *p;
1199 uint64_t nb_sectors;
1201 bdrv_get_geometry(bs, &nb_sectors);
1203 ret = bdrv_read(bs, 0, buf, 1);
1206 /* test msdos magic */
1207 if (buf[510] != 0x55 || buf[511] != 0xaa)
1209 for(i = 0; i < 4; i++) {
1210 p = ((struct partition *)(buf + 0x1be)) + i;
1211 nr_sects = le32_to_cpu(p->nr_sects);
1212 if (nr_sects && p->end_head) {
1213 /* We make the assumption that the partition terminates on
1214 a cylinder boundary */
1215 heads = p->end_head + 1;
1216 sectors = p->end_sector & 63;
1219 cylinders = nb_sectors / (heads * sectors);
1220 if (cylinders < 1 || cylinders > 16383)
1223 *psectors = sectors;
1224 *pcylinders = cylinders;
1226 printf("guessed geometry: LCHS=%d %d %d\n",
1227 cylinders, heads, sectors);
1235 void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
1237 int translation, lba_detected = 0;
1238 int cylinders, heads, secs;
1239 uint64_t nb_sectors;
1241 /* if a geometry hint is available, use it */
1242 bdrv_get_geometry(bs, &nb_sectors);
1243 bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
1244 translation = bdrv_get_translation_hint(bs);
1245 if (cylinders != 0) {
1250 if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
1252 /* if heads > 16, it means that a BIOS LBA
1253 translation was active, so the default
1254 hardware geometry is OK */
1256 goto default_geometry;
1261 /* disable any translation to be in sync with
1262 the logical geometry */
1263 if (translation == BIOS_ATA_TRANSLATION_AUTO) {
1264 bdrv_set_translation_hint(bs,
1265 BIOS_ATA_TRANSLATION_NONE);
1270 /* if no geometry, use a standard physical disk geometry */
1271 cylinders = nb_sectors / (16 * 63);
1273 if (cylinders > 16383)
1275 else if (cylinders < 2)
1280 if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
1281 if ((*pcyls * *pheads) <= 131072) {
1282 bdrv_set_translation_hint(bs,
1283 BIOS_ATA_TRANSLATION_LARGE);
1285 bdrv_set_translation_hint(bs,
1286 BIOS_ATA_TRANSLATION_LBA);
1290 bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
1294 void bdrv_set_geometry_hint(BlockDriverState *bs,
1295 int cyls, int heads, int secs)
1302 void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
1304 bs->translation = translation;
1307 void bdrv_get_geometry_hint(BlockDriverState *bs,
1308 int *pcyls, int *pheads, int *psecs)
1311 *pheads = bs->heads;
1315 /* Recognize floppy formats */
1316 typedef struct FDFormat {
1323 static const FDFormat fd_formats[] = {
1324 /* First entry is default format */
1325 /* 1.44 MB 3"1/2 floppy disks */
1326 { FDRIVE_DRV_144, 18, 80, 1, },
1327 { FDRIVE_DRV_144, 20, 80, 1, },
1328 { FDRIVE_DRV_144, 21, 80, 1, },
1329 { FDRIVE_DRV_144, 21, 82, 1, },
1330 { FDRIVE_DRV_144, 21, 83, 1, },
1331 { FDRIVE_DRV_144, 22, 80, 1, },
1332 { FDRIVE_DRV_144, 23, 80, 1, },
1333 { FDRIVE_DRV_144, 24, 80, 1, },
1334 /* 2.88 MB 3"1/2 floppy disks */
1335 { FDRIVE_DRV_288, 36, 80, 1, },
1336 { FDRIVE_DRV_288, 39, 80, 1, },
1337 { FDRIVE_DRV_288, 40, 80, 1, },
1338 { FDRIVE_DRV_288, 44, 80, 1, },
1339 { FDRIVE_DRV_288, 48, 80, 1, },
1340 /* 720 kB 3"1/2 floppy disks */
1341 { FDRIVE_DRV_144, 9, 80, 1, },
1342 { FDRIVE_DRV_144, 10, 80, 1, },
1343 { FDRIVE_DRV_144, 10, 82, 1, },
1344 { FDRIVE_DRV_144, 10, 83, 1, },
1345 { FDRIVE_DRV_144, 13, 80, 1, },
1346 { FDRIVE_DRV_144, 14, 80, 1, },
1347 /* 1.2 MB 5"1/4 floppy disks */
1348 { FDRIVE_DRV_120, 15, 80, 1, },
1349 { FDRIVE_DRV_120, 18, 80, 1, },
1350 { FDRIVE_DRV_120, 18, 82, 1, },
1351 { FDRIVE_DRV_120, 18, 83, 1, },
1352 { FDRIVE_DRV_120, 20, 80, 1, },
1353 /* 720 kB 5"1/4 floppy disks */
1354 { FDRIVE_DRV_120, 9, 80, 1, },
1355 { FDRIVE_DRV_120, 11, 80, 1, },
1356 /* 360 kB 5"1/4 floppy disks */
1357 { FDRIVE_DRV_120, 9, 40, 1, },
1358 { FDRIVE_DRV_120, 9, 40, 0, },
1359 { FDRIVE_DRV_120, 10, 41, 1, },
1360 { FDRIVE_DRV_120, 10, 42, 1, },
1361 /* 320 kB 5"1/4 floppy disks */
1362 { FDRIVE_DRV_120, 8, 40, 1, },
1363 { FDRIVE_DRV_120, 8, 40, 0, },
1364 /* 360 kB must match 5"1/4 better than 3"1/2... */
1365 { FDRIVE_DRV_144, 9, 80, 0, },
1367 { FDRIVE_DRV_NONE, -1, -1, 0, },
1370 void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
1371 int *max_track, int *last_sect,
1372 FDriveType drive_in, FDriveType *drive)
1374 const FDFormat *parse;
1375 uint64_t nb_sectors, size;
1376 int i, first_match, match;
1378 bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
1379 if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
1380 /* User defined disk */
1382 bdrv_get_geometry(bs, &nb_sectors);
1385 for (i = 0; ; i++) {
1386 parse = &fd_formats[i];
1387 if (parse->drive == FDRIVE_DRV_NONE) {
1390 if (drive_in == parse->drive ||
1391 drive_in == FDRIVE_DRV_NONE) {
1392 size = (parse->max_head + 1) * parse->max_track *
1394 if (nb_sectors == size) {
1398 if (first_match == -1) {
1404 if (first_match == -1) {
1407 match = first_match;
1409 parse = &fd_formats[match];
1411 *nb_heads = parse->max_head + 1;
1412 *max_track = parse->max_track;
1413 *last_sect = parse->last_sect;
1414 *drive = parse->drive;
1418 int bdrv_get_translation_hint(BlockDriverState *bs)
1420 return bs->translation;
1423 void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
1424 BlockErrorAction on_write_error)
1426 bs->on_read_error = on_read_error;
1427 bs->on_write_error = on_write_error;
1430 BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read)
1432 return is_read ? bs->on_read_error : bs->on_write_error;
1435 void bdrv_set_removable(BlockDriverState *bs, int removable)
1437 bs->removable = removable;
1438 if (removable && bs == bs_snapshots) {
1439 bs_snapshots = NULL;
1443 int bdrv_is_removable(BlockDriverState *bs)
1445 return bs->removable;
1448 int bdrv_is_read_only(BlockDriverState *bs)
1450 return bs->read_only;
1453 int bdrv_is_sg(BlockDriverState *bs)
1458 int bdrv_enable_write_cache(BlockDriverState *bs)
1460 return bs->enable_write_cache;
1463 /* XXX: no longer used */
1464 void bdrv_set_change_cb(BlockDriverState *bs,
1465 void (*change_cb)(void *opaque, int reason),
1468 bs->change_cb = change_cb;
1469 bs->change_opaque = opaque;
1472 int bdrv_is_encrypted(BlockDriverState *bs)
1474 if (bs->backing_hd && bs->backing_hd->encrypted)
1476 return bs->encrypted;
1479 int bdrv_key_required(BlockDriverState *bs)
1481 BlockDriverState *backing_hd = bs->backing_hd;
1483 if (backing_hd && backing_hd->encrypted && !backing_hd->valid_key)
1485 return (bs->encrypted && !bs->valid_key);
1488 int bdrv_set_key(BlockDriverState *bs, const char *key)
1491 if (bs->backing_hd && bs->backing_hd->encrypted) {
1492 ret = bdrv_set_key(bs->backing_hd, key);
1498 if (!bs->encrypted) {
1500 } else if (!bs->drv || !bs->drv->bdrv_set_key) {
1503 ret = bs->drv->bdrv_set_key(bs, key);
1506 } else if (!bs->valid_key) {
1508 /* call the change callback now, we skipped it on open */
1509 bs->media_changed = 1;
1511 bs->change_cb(bs->change_opaque, CHANGE_MEDIA);
1516 void bdrv_get_format(BlockDriverState *bs, char *buf, int buf_size)
1521 pstrcpy(buf, buf_size, bs->drv->format_name);
1525 void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
1530 QLIST_FOREACH(drv, &bdrv_drivers, list) {
1531 it(opaque, drv->format_name);
1535 BlockDriverState *bdrv_find(const char *name)
1537 BlockDriverState *bs;
1539 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1540 if (!strcmp(name, bs->device_name)) {
1547 BlockDriverState *bdrv_next(BlockDriverState *bs)
1550 return QTAILQ_FIRST(&bdrv_states);
1552 return QTAILQ_NEXT(bs, list);
1555 void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), void *opaque)
1557 BlockDriverState *bs;
1559 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1564 const char *bdrv_get_device_name(BlockDriverState *bs)
1566 return bs->device_name;
1569 int bdrv_flush(BlockDriverState *bs)
1571 if (bs->open_flags & BDRV_O_NO_FLUSH) {
1575 if (bs->drv && bs->drv->bdrv_flush) {
1576 return bs->drv->bdrv_flush(bs);
1580 * Some block drivers always operate in either writethrough or unsafe mode
1581 * and don't support bdrv_flush therefore. Usually qemu doesn't know how
1582 * the server works (because the behaviour is hardcoded or depends on
1583 * server-side configuration), so we can't ensure that everything is safe
1584 * on disk. Returning an error doesn't work because that would break guests
1585 * even if the server operates in writethrough mode.
1587 * Let's hope the user knows what he's doing.
1592 void bdrv_flush_all(void)
1594 BlockDriverState *bs;
1596 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1597 if (bs->drv && !bdrv_is_read_only(bs) &&
1598 (!bdrv_is_removable(bs) || bdrv_is_inserted(bs))) {
1604 int bdrv_has_zero_init(BlockDriverState *bs)
1608 if (bs->drv->bdrv_has_zero_init) {
1609 return bs->drv->bdrv_has_zero_init(bs);
1615 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
1620 if (!bs->drv->bdrv_discard) {
1623 return bs->drv->bdrv_discard(bs, sector_num, nb_sectors);
1627 * Returns true iff the specified sector is present in the disk image. Drivers
1628 * not implementing the functionality are assumed to not support backing files,
1629 * hence all their sectors are reported as allocated.
1631 * 'pnum' is set to the number of sectors (including and immediately following
1632 * the specified sector) that are known to be in the same
1633 * allocated/unallocated state.
1635 * 'nb_sectors' is the max value 'pnum' should be set to.
1637 int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
1641 if (!bs->drv->bdrv_is_allocated) {
1642 if (sector_num >= bs->total_sectors) {
1646 n = bs->total_sectors - sector_num;
1647 *pnum = (n < nb_sectors) ? (n) : (nb_sectors);
1650 return bs->drv->bdrv_is_allocated(bs, sector_num, nb_sectors, pnum);
1653 void bdrv_mon_event(const BlockDriverState *bdrv,
1654 BlockMonEventAction action, int is_read)
1657 const char *action_str;
1660 case BDRV_ACTION_REPORT:
1661 action_str = "report";
1663 case BDRV_ACTION_IGNORE:
1664 action_str = "ignore";
1666 case BDRV_ACTION_STOP:
1667 action_str = "stop";
1673 data = qobject_from_jsonf("{ 'device': %s, 'action': %s, 'operation': %s }",
1676 is_read ? "read" : "write");
1677 monitor_protocol_event(QEVENT_BLOCK_IO_ERROR, data);
1679 qobject_decref(data);
1682 static void bdrv_print_dict(QObject *obj, void *opaque)
1685 Monitor *mon = opaque;
1687 bs_dict = qobject_to_qdict(obj);
1689 monitor_printf(mon, "%s: removable=%d",
1690 qdict_get_str(bs_dict, "device"),
1691 qdict_get_bool(bs_dict, "removable"));
1693 if (qdict_get_bool(bs_dict, "removable")) {
1694 monitor_printf(mon, " locked=%d", qdict_get_bool(bs_dict, "locked"));
1697 if (qdict_haskey(bs_dict, "inserted")) {
1698 QDict *qdict = qobject_to_qdict(qdict_get(bs_dict, "inserted"));
1700 monitor_printf(mon, " file=");
1701 monitor_print_filename(mon, qdict_get_str(qdict, "file"));
1702 if (qdict_haskey(qdict, "backing_file")) {
1703 monitor_printf(mon, " backing_file=");
1704 monitor_print_filename(mon, qdict_get_str(qdict, "backing_file"));
1706 monitor_printf(mon, " ro=%d drv=%s encrypted=%d",
1707 qdict_get_bool(qdict, "ro"),
1708 qdict_get_str(qdict, "drv"),
1709 qdict_get_bool(qdict, "encrypted"));
1711 monitor_printf(mon, " [not inserted]");
1714 monitor_printf(mon, "\n");
1717 void bdrv_info_print(Monitor *mon, const QObject *data)
1719 qlist_iter(qobject_to_qlist(data), bdrv_print_dict, mon);
1722 void bdrv_info(Monitor *mon, QObject **ret_data)
1725 BlockDriverState *bs;
1727 bs_list = qlist_new();
1729 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1732 bs_obj = qobject_from_jsonf("{ 'device': %s, 'type': 'unknown', "
1733 "'removable': %i, 'locked': %i }",
1734 bs->device_name, bs->removable,
1739 QDict *bs_dict = qobject_to_qdict(bs_obj);
1741 obj = qobject_from_jsonf("{ 'file': %s, 'ro': %i, 'drv': %s, "
1742 "'encrypted': %i }",
1743 bs->filename, bs->read_only,
1744 bs->drv->format_name,
1745 bdrv_is_encrypted(bs));
1746 if (bs->backing_file[0] != '\0') {
1747 QDict *qdict = qobject_to_qdict(obj);
1748 qdict_put(qdict, "backing_file",
1749 qstring_from_str(bs->backing_file));
1752 qdict_put_obj(bs_dict, "inserted", obj);
1754 qlist_append_obj(bs_list, bs_obj);
1757 *ret_data = QOBJECT(bs_list);
1760 static void bdrv_stats_iter(QObject *data, void *opaque)
1763 Monitor *mon = opaque;
1765 qdict = qobject_to_qdict(data);
1766 monitor_printf(mon, "%s:", qdict_get_str(qdict, "device"));
1768 qdict = qobject_to_qdict(qdict_get(qdict, "stats"));
1769 monitor_printf(mon, " rd_bytes=%" PRId64
1770 " wr_bytes=%" PRId64
1771 " rd_operations=%" PRId64
1772 " wr_operations=%" PRId64
1774 qdict_get_int(qdict, "rd_bytes"),
1775 qdict_get_int(qdict, "wr_bytes"),
1776 qdict_get_int(qdict, "rd_operations"),
1777 qdict_get_int(qdict, "wr_operations"));
1780 void bdrv_stats_print(Monitor *mon, const QObject *data)
1782 qlist_iter(qobject_to_qlist(data), bdrv_stats_iter, mon);
1785 static QObject* bdrv_info_stats_bs(BlockDriverState *bs)
1790 res = qobject_from_jsonf("{ 'stats': {"
1791 "'rd_bytes': %" PRId64 ","
1792 "'wr_bytes': %" PRId64 ","
1793 "'rd_operations': %" PRId64 ","
1794 "'wr_operations': %" PRId64 ","
1795 "'wr_highest_offset': %" PRId64
1797 bs->rd_bytes, bs->wr_bytes,
1798 bs->rd_ops, bs->wr_ops,
1799 bs->wr_highest_sector *
1800 (uint64_t)BDRV_SECTOR_SIZE);
1801 dict = qobject_to_qdict(res);
1803 if (*bs->device_name) {
1804 qdict_put(dict, "device", qstring_from_str(bs->device_name));
1808 QObject *parent = bdrv_info_stats_bs(bs->file);
1809 qdict_put_obj(dict, "parent", parent);
1815 void bdrv_info_stats(Monitor *mon, QObject **ret_data)
1819 BlockDriverState *bs;
1821 devices = qlist_new();
1823 QTAILQ_FOREACH(bs, &bdrv_states, list) {
1824 obj = bdrv_info_stats_bs(bs);
1825 qlist_append_obj(devices, obj);
1828 *ret_data = QOBJECT(devices);
1831 const char *bdrv_get_encrypted_filename(BlockDriverState *bs)
1833 if (bs->backing_hd && bs->backing_hd->encrypted)
1834 return bs->backing_file;
1835 else if (bs->encrypted)
1836 return bs->filename;
1841 void bdrv_get_backing_filename(BlockDriverState *bs,
1842 char *filename, int filename_size)
1844 if (!bs->backing_file) {
1845 pstrcpy(filename, filename_size, "");
1847 pstrcpy(filename, filename_size, bs->backing_file);
1851 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1852 const uint8_t *buf, int nb_sectors)
1854 BlockDriver *drv = bs->drv;
1857 if (!drv->bdrv_write_compressed)
1859 if (bdrv_check_request(bs, sector_num, nb_sectors))
1862 if (bs->dirty_bitmap) {
1863 set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
1866 return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1869 int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1871 BlockDriver *drv = bs->drv;
1874 if (!drv->bdrv_get_info)
1876 memset(bdi, 0, sizeof(*bdi));
1877 return drv->bdrv_get_info(bs, bdi);
1880 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1881 int64_t pos, int size)
1883 BlockDriver *drv = bs->drv;
1886 if (drv->bdrv_save_vmstate)
1887 return drv->bdrv_save_vmstate(bs, buf, pos, size);
1889 return bdrv_save_vmstate(bs->file, buf, pos, size);
1893 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1894 int64_t pos, int size)
1896 BlockDriver *drv = bs->drv;
1899 if (drv->bdrv_load_vmstate)
1900 return drv->bdrv_load_vmstate(bs, buf, pos, size);
1902 return bdrv_load_vmstate(bs->file, buf, pos, size);
1906 void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
1908 BlockDriver *drv = bs->drv;
1910 if (!drv || !drv->bdrv_debug_event) {
1914 return drv->bdrv_debug_event(bs, event);
1918 /**************************************************************/
1919 /* handling of snapshots */
1921 int bdrv_can_snapshot(BlockDriverState *bs)
1923 BlockDriver *drv = bs->drv;
1924 if (!drv || bdrv_is_removable(bs) || bdrv_is_read_only(bs)) {
1928 if (!drv->bdrv_snapshot_create) {
1929 if (bs->file != NULL) {
1930 return bdrv_can_snapshot(bs->file);
1938 int bdrv_is_snapshot(BlockDriverState *bs)
1940 return !!(bs->open_flags & BDRV_O_SNAPSHOT);
1943 BlockDriverState *bdrv_snapshots(void)
1945 BlockDriverState *bs;
1948 return bs_snapshots;
1952 while ((bs = bdrv_next(bs))) {
1953 if (bdrv_can_snapshot(bs)) {
1961 int bdrv_snapshot_create(BlockDriverState *bs,
1962 QEMUSnapshotInfo *sn_info)
1964 BlockDriver *drv = bs->drv;
1967 if (drv->bdrv_snapshot_create)
1968 return drv->bdrv_snapshot_create(bs, sn_info);
1970 return bdrv_snapshot_create(bs->file, sn_info);
1974 int bdrv_snapshot_goto(BlockDriverState *bs,
1975 const char *snapshot_id)
1977 BlockDriver *drv = bs->drv;
1982 if (drv->bdrv_snapshot_goto)
1983 return drv->bdrv_snapshot_goto(bs, snapshot_id);
1986 drv->bdrv_close(bs);
1987 ret = bdrv_snapshot_goto(bs->file, snapshot_id);
1988 open_ret = drv->bdrv_open(bs, bs->open_flags);
1990 bdrv_delete(bs->file);
2000 int bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
2002 BlockDriver *drv = bs->drv;
2005 if (drv->bdrv_snapshot_delete)
2006 return drv->bdrv_snapshot_delete(bs, snapshot_id);
2008 return bdrv_snapshot_delete(bs->file, snapshot_id);
2012 int bdrv_snapshot_list(BlockDriverState *bs,
2013 QEMUSnapshotInfo **psn_info)
2015 BlockDriver *drv = bs->drv;
2018 if (drv->bdrv_snapshot_list)
2019 return drv->bdrv_snapshot_list(bs, psn_info);
2021 return bdrv_snapshot_list(bs->file, psn_info);
2025 int bdrv_snapshot_load_tmp(BlockDriverState *bs,
2026 const char *snapshot_name)
2028 BlockDriver *drv = bs->drv;
2032 if (!bs->read_only) {
2035 if (drv->bdrv_snapshot_load_tmp) {
2036 return drv->bdrv_snapshot_load_tmp(bs, snapshot_name);
2041 #define NB_SUFFIXES 4
2043 char *get_human_readable_size(char *buf, int buf_size, int64_t size)
2045 static const char suffixes[NB_SUFFIXES] = "KMGT";
2050 snprintf(buf, buf_size, "%" PRId64, size);
2053 for(i = 0; i < NB_SUFFIXES; i++) {
2054 if (size < (10 * base)) {
2055 snprintf(buf, buf_size, "%0.1f%c",
2056 (double)size / base,
2059 } else if (size < (1000 * base) || i == (NB_SUFFIXES - 1)) {
2060 snprintf(buf, buf_size, "%" PRId64 "%c",
2061 ((size + (base >> 1)) / base),
2071 char *bdrv_snapshot_dump(char *buf, int buf_size, QEMUSnapshotInfo *sn)
2073 char buf1[128], date_buf[128], clock_buf[128];
2083 snprintf(buf, buf_size,
2084 "%-10s%-20s%7s%20s%15s",
2085 "ID", "TAG", "VM SIZE", "DATE", "VM CLOCK");
2089 ptm = localtime(&ti);
2090 strftime(date_buf, sizeof(date_buf),
2091 "%Y-%m-%d %H:%M:%S", ptm);
2093 localtime_r(&ti, &tm);
2094 strftime(date_buf, sizeof(date_buf),
2095 "%Y-%m-%d %H:%M:%S", &tm);
2097 secs = sn->vm_clock_nsec / 1000000000;
2098 snprintf(clock_buf, sizeof(clock_buf),
2099 "%02d:%02d:%02d.%03d",
2101 (int)((secs / 60) % 60),
2103 (int)((sn->vm_clock_nsec / 1000000) % 1000));
2104 snprintf(buf, buf_size,
2105 "%-10s%-20s%7s%20s%15s",
2106 sn->id_str, sn->name,
2107 get_human_readable_size(buf1, sizeof(buf1), sn->vm_state_size),
2115 /**************************************************************/
2118 BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
2119 QEMUIOVector *qiov, int nb_sectors,
2120 BlockDriverCompletionFunc *cb, void *opaque)
2122 BlockDriver *drv = bs->drv;
2123 BlockDriverAIOCB *ret;
2125 trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
2129 if (bdrv_check_request(bs, sector_num, nb_sectors))
2132 ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
2136 /* Update stats even though technically transfer has not happened. */
2137 bs->rd_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2144 typedef struct BlockCompleteData {
2145 BlockDriverCompletionFunc *cb;
2147 BlockDriverState *bs;
2150 } BlockCompleteData;
2152 static void block_complete_cb(void *opaque, int ret)
2154 BlockCompleteData *b = opaque;
2156 if (b->bs->dirty_bitmap) {
2157 set_dirty_bitmap(b->bs, b->sector_num, b->nb_sectors, 1);
2159 b->cb(b->opaque, ret);
2163 static BlockCompleteData *blk_dirty_cb_alloc(BlockDriverState *bs,
2166 BlockDriverCompletionFunc *cb,
2169 BlockCompleteData *blkdata = qemu_mallocz(sizeof(BlockCompleteData));
2173 blkdata->opaque = opaque;
2174 blkdata->sector_num = sector_num;
2175 blkdata->nb_sectors = nb_sectors;
2180 BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
2181 QEMUIOVector *qiov, int nb_sectors,
2182 BlockDriverCompletionFunc *cb, void *opaque)
2184 BlockDriver *drv = bs->drv;
2185 BlockDriverAIOCB *ret;
2186 BlockCompleteData *blk_cb_data;
2188 trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
2194 if (bdrv_check_request(bs, sector_num, nb_sectors))
2197 if (bs->dirty_bitmap) {
2198 blk_cb_data = blk_dirty_cb_alloc(bs, sector_num, nb_sectors, cb,
2200 cb = &block_complete_cb;
2201 opaque = blk_cb_data;
2204 ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
2208 /* Update stats even though technically transfer has not happened. */
2209 bs->wr_bytes += (unsigned) nb_sectors * BDRV_SECTOR_SIZE;
2211 if (bs->wr_highest_sector < sector_num + nb_sectors - 1) {
2212 bs->wr_highest_sector = sector_num + nb_sectors - 1;
2220 typedef struct MultiwriteCB {
2225 BlockDriverCompletionFunc *cb;
2227 QEMUIOVector *free_qiov;
2232 static void multiwrite_user_cb(MultiwriteCB *mcb)
2236 for (i = 0; i < mcb->num_callbacks; i++) {
2237 mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
2238 if (mcb->callbacks[i].free_qiov) {
2239 qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
2241 qemu_free(mcb->callbacks[i].free_qiov);
2242 qemu_vfree(mcb->callbacks[i].free_buf);
2246 static void multiwrite_cb(void *opaque, int ret)
2248 MultiwriteCB *mcb = opaque;
2250 trace_multiwrite_cb(mcb, ret);
2252 if (ret < 0 && !mcb->error) {
2256 mcb->num_requests--;
2257 if (mcb->num_requests == 0) {
2258 multiwrite_user_cb(mcb);
2263 static int multiwrite_req_compare(const void *a, const void *b)
2265 const BlockRequest *req1 = a, *req2 = b;
2268 * Note that we can't simply subtract req2->sector from req1->sector
2269 * here as that could overflow the return value.
2271 if (req1->sector > req2->sector) {
2273 } else if (req1->sector < req2->sector) {
2281 * Takes a bunch of requests and tries to merge them. Returns the number of
2282 * requests that remain after merging.
2284 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
2285 int num_reqs, MultiwriteCB *mcb)
2289 // Sort requests by start sector
2290 qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
2292 // Check if adjacent requests touch the same clusters. If so, combine them,
2293 // filling up gaps with zero sectors.
2295 for (i = 1; i < num_reqs; i++) {
2297 int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
2299 // This handles the cases that are valid for all block drivers, namely
2300 // exactly sequential writes and overlapping writes.
2301 if (reqs[i].sector <= oldreq_last) {
2305 // The block driver may decide that it makes sense to combine requests
2306 // even if there is a gap of some sectors between them. In this case,
2307 // the gap is filled with zeros (therefore only applicable for yet
2308 // unused space in format like qcow2).
2309 if (!merge && bs->drv->bdrv_merge_requests) {
2310 merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
2313 if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 > IOV_MAX) {
2319 QEMUIOVector *qiov = qemu_mallocz(sizeof(*qiov));
2320 qemu_iovec_init(qiov,
2321 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
2323 // Add the first request to the merged one. If the requests are
2324 // overlapping, drop the last sectors of the first request.
2325 size = (reqs[i].sector - reqs[outidx].sector) << 9;
2326 qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
2328 // We might need to add some zeros between the two requests
2329 if (reqs[i].sector > oldreq_last) {
2330 size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
2331 uint8_t *buf = qemu_blockalign(bs, zero_bytes);
2332 memset(buf, 0, zero_bytes);
2333 qemu_iovec_add(qiov, buf, zero_bytes);
2334 mcb->callbacks[i].free_buf = buf;
2337 // Add the second request
2338 qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
2340 reqs[outidx].nb_sectors = qiov->size >> 9;
2341 reqs[outidx].qiov = qiov;
2343 mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
2346 reqs[outidx].sector = reqs[i].sector;
2347 reqs[outidx].nb_sectors = reqs[i].nb_sectors;
2348 reqs[outidx].qiov = reqs[i].qiov;
2356 * Submit multiple AIO write requests at once.
2358 * On success, the function returns 0 and all requests in the reqs array have
2359 * been submitted. In error case this function returns -1, and any of the
2360 * requests may or may not be submitted yet. In particular, this means that the
2361 * callback will be called for some of the requests, for others it won't. The
2362 * caller must check the error field of the BlockRequest to wait for the right
2363 * callbacks (if error != 0, no callback will be called).
2365 * The implementation may modify the contents of the reqs array, e.g. to merge
2366 * requests. However, the fields opaque and error are left unmodified as they
2367 * are used to signal failure for a single request to the caller.
2369 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
2371 BlockDriverAIOCB *acb;
2375 /* don't submit writes if we don't have a medium */
2376 if (bs->drv == NULL) {
2377 for (i = 0; i < num_reqs; i++) {
2378 reqs[i].error = -ENOMEDIUM;
2383 if (num_reqs == 0) {
2387 // Create MultiwriteCB structure
2388 mcb = qemu_mallocz(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
2389 mcb->num_requests = 0;
2390 mcb->num_callbacks = num_reqs;
2392 for (i = 0; i < num_reqs; i++) {
2393 mcb->callbacks[i].cb = reqs[i].cb;
2394 mcb->callbacks[i].opaque = reqs[i].opaque;
2397 // Check for mergable requests
2398 num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
2400 trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
2403 * Run the aio requests. As soon as one request can't be submitted
2404 * successfully, fail all requests that are not yet submitted (we must
2405 * return failure for all requests anyway)
2407 * num_requests cannot be set to the right value immediately: If
2408 * bdrv_aio_writev fails for some request, num_requests would be too high
2409 * and therefore multiwrite_cb() would never recognize the multiwrite
2410 * request as completed. We also cannot use the loop variable i to set it
2411 * when the first request fails because the callback may already have been
2412 * called for previously submitted requests. Thus, num_requests must be
2413 * incremented for each request that is submitted.
2415 * The problem that callbacks may be called early also means that we need
2416 * to take care that num_requests doesn't become 0 before all requests are
2417 * submitted - multiwrite_cb() would consider the multiwrite request
2418 * completed. A dummy request that is "completed" by a manual call to
2419 * multiwrite_cb() takes care of this.
2421 mcb->num_requests = 1;
2423 // Run the aio requests
2424 for (i = 0; i < num_reqs; i++) {
2425 mcb->num_requests++;
2426 acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
2427 reqs[i].nb_sectors, multiwrite_cb, mcb);
2430 // We can only fail the whole thing if no request has been
2431 // submitted yet. Otherwise we'll wait for the submitted AIOs to
2432 // complete and report the error in the callback.
2434 trace_bdrv_aio_multiwrite_earlyfail(mcb);
2437 trace_bdrv_aio_multiwrite_latefail(mcb, i);
2438 multiwrite_cb(mcb, -EIO);
2444 /* Complete the dummy request */
2445 multiwrite_cb(mcb, 0);
2450 for (i = 0; i < mcb->num_callbacks; i++) {
2451 reqs[i].error = -EIO;
2457 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2458 BlockDriverCompletionFunc *cb, void *opaque)
2460 BlockDriver *drv = bs->drv;
2462 trace_bdrv_aio_flush(bs, opaque);
2464 if (bs->open_flags & BDRV_O_NO_FLUSH) {
2465 return bdrv_aio_noop_em(bs, cb, opaque);
2470 return drv->bdrv_aio_flush(bs, cb, opaque);
2473 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
2475 acb->pool->cancel(acb);
2479 /**************************************************************/
2480 /* async block device emulation */
2482 typedef struct BlockDriverAIOCBSync {
2483 BlockDriverAIOCB common;
2486 /* vector translation state */
2490 } BlockDriverAIOCBSync;
2492 static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
2494 BlockDriverAIOCBSync *acb =
2495 container_of(blockacb, BlockDriverAIOCBSync, common);
2496 qemu_bh_delete(acb->bh);
2498 qemu_aio_release(acb);
2501 static AIOPool bdrv_em_aio_pool = {
2502 .aiocb_size = sizeof(BlockDriverAIOCBSync),
2503 .cancel = bdrv_aio_cancel_em,
2506 static void bdrv_aio_bh_cb(void *opaque)
2508 BlockDriverAIOCBSync *acb = opaque;
2511 qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
2512 qemu_vfree(acb->bounce);
2513 acb->common.cb(acb->common.opaque, acb->ret);
2514 qemu_bh_delete(acb->bh);
2516 qemu_aio_release(acb);
2519 static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2523 BlockDriverCompletionFunc *cb,
2528 BlockDriverAIOCBSync *acb;
2530 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2531 acb->is_write = is_write;
2533 acb->bounce = qemu_blockalign(bs, qiov->size);
2536 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2539 qemu_iovec_to_buffer(acb->qiov, acb->bounce);
2540 acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2542 acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2545 qemu_bh_schedule(acb->bh);
2547 return &acb->common;
2550 static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2551 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2552 BlockDriverCompletionFunc *cb, void *opaque)
2554 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2557 static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2558 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2559 BlockDriverCompletionFunc *cb, void *opaque)
2561 return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2564 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
2565 BlockDriverCompletionFunc *cb, void *opaque)
2567 BlockDriverAIOCBSync *acb;
2569 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2570 acb->is_write = 1; /* don't bounce in the completion hadler */
2576 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2579 qemu_bh_schedule(acb->bh);
2580 return &acb->common;
2583 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
2584 BlockDriverCompletionFunc *cb, void *opaque)
2586 BlockDriverAIOCBSync *acb;
2588 acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
2589 acb->is_write = 1; /* don't bounce in the completion handler */
2595 acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
2598 qemu_bh_schedule(acb->bh);
2599 return &acb->common;
2602 /**************************************************************/
2603 /* sync block device emulation */
2605 static void bdrv_rw_em_cb(void *opaque, int ret)
2607 *(int *)opaque = ret;
2610 #define NOT_DONE 0x7fffffff
2612 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
2613 uint8_t *buf, int nb_sectors)
2616 BlockDriverAIOCB *acb;
2620 async_context_push();
2622 async_ret = NOT_DONE;
2623 iov.iov_base = (void *)buf;
2624 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2625 qemu_iovec_init_external(&qiov, &iov, 1);
2626 acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,
2627 bdrv_rw_em_cb, &async_ret);
2633 while (async_ret == NOT_DONE) {
2639 async_context_pop();
2643 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
2644 const uint8_t *buf, int nb_sectors)
2647 BlockDriverAIOCB *acb;
2651 async_context_push();
2653 async_ret = NOT_DONE;
2654 iov.iov_base = (void *)buf;
2655 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE;
2656 qemu_iovec_init_external(&qiov, &iov, 1);
2657 acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,
2658 bdrv_rw_em_cb, &async_ret);
2663 while (async_ret == NOT_DONE) {
2668 async_context_pop();
2672 void bdrv_init(void)
2674 module_call_init(MODULE_INIT_BLOCK);
2677 void bdrv_init_with_whitelist(void)
2679 use_bdrv_whitelist = 1;
2683 void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
2684 BlockDriverCompletionFunc *cb, void *opaque)
2686 BlockDriverAIOCB *acb;
2688 if (pool->free_aiocb) {
2689 acb = pool->free_aiocb;
2690 pool->free_aiocb = acb->next;
2692 acb = qemu_mallocz(pool->aiocb_size);
2697 acb->opaque = opaque;
2701 void qemu_aio_release(void *p)
2703 BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
2704 AIOPool *pool = acb->pool;
2705 acb->next = pool->free_aiocb;
2706 pool->free_aiocb = acb;
2709 /**************************************************************/
2710 /* removable device support */
2713 * Return TRUE if the media is present
2715 int bdrv_is_inserted(BlockDriverState *bs)
2717 BlockDriver *drv = bs->drv;
2721 if (!drv->bdrv_is_inserted)
2722 return !bs->tray_open;
2723 ret = drv->bdrv_is_inserted(bs);
2728 * Return TRUE if the media changed since the last call to this
2729 * function. It is currently only used for floppy disks
2731 int bdrv_media_changed(BlockDriverState *bs)
2733 BlockDriver *drv = bs->drv;
2736 if (!drv || !drv->bdrv_media_changed)
2739 ret = drv->bdrv_media_changed(bs);
2740 if (ret == -ENOTSUP)
2741 ret = bs->media_changed;
2742 bs->media_changed = 0;
2747 * If eject_flag is TRUE, eject the media. Otherwise, close the tray
2749 int bdrv_eject(BlockDriverState *bs, int eject_flag)
2751 BlockDriver *drv = bs->drv;
2758 if (!drv || !drv->bdrv_eject) {
2761 ret = drv->bdrv_eject(bs, eject_flag);
2763 if (ret == -ENOTSUP) {
2767 bs->tray_open = eject_flag;
2773 int bdrv_is_locked(BlockDriverState *bs)
2779 * Lock or unlock the media (if it is locked, the user won't be able
2780 * to eject it manually).
2782 void bdrv_set_locked(BlockDriverState *bs, int locked)
2784 BlockDriver *drv = bs->drv;
2786 trace_bdrv_set_locked(bs, locked);
2788 bs->locked = locked;
2789 if (drv && drv->bdrv_set_locked) {
2790 drv->bdrv_set_locked(bs, locked);
2794 /* needed for generic scsi interface */
2796 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2798 BlockDriver *drv = bs->drv;
2800 if (drv && drv->bdrv_ioctl)
2801 return drv->bdrv_ioctl(bs, req, buf);
2805 BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2806 unsigned long int req, void *buf,
2807 BlockDriverCompletionFunc *cb, void *opaque)
2809 BlockDriver *drv = bs->drv;
2811 if (drv && drv->bdrv_aio_ioctl)
2812 return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
2818 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2820 return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
2823 void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
2825 int64_t bitmap_size;
2827 bs->dirty_count = 0;
2829 if (!bs->dirty_bitmap) {
2830 bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
2831 BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
2832 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
2834 bs->dirty_bitmap = qemu_mallocz(bitmap_size);
2837 if (bs->dirty_bitmap) {
2838 qemu_free(bs->dirty_bitmap);
2839 bs->dirty_bitmap = NULL;
2844 int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
2846 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
2848 if (bs->dirty_bitmap &&
2849 (sector << BDRV_SECTOR_BITS) < bdrv_getlength(bs)) {
2850 return !!(bs->dirty_bitmap[chunk / (sizeof(unsigned long) * 8)] &
2851 (1UL << (chunk % (sizeof(unsigned long) * 8))));
2857 void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
2860 set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
2863 int64_t bdrv_get_dirty_count(BlockDriverState *bs)
2865 return bs->dirty_count;
2868 void bdrv_set_in_use(BlockDriverState *bs, int in_use)
2870 assert(bs->in_use != in_use);
2871 bs->in_use = in_use;
2874 int bdrv_in_use(BlockDriverState *bs)
2879 int bdrv_img_create(const char *filename, const char *fmt,
2880 const char *base_filename, const char *base_fmt,
2881 char *options, uint64_t img_size, int flags)
2883 QEMUOptionParameter *param = NULL, *create_options = NULL;
2884 QEMUOptionParameter *backing_fmt, *backing_file, *size;
2885 BlockDriverState *bs = NULL;
2886 BlockDriver *drv, *proto_drv;
2887 BlockDriver *backing_drv = NULL;
2890 /* Find driver and parse its options */
2891 drv = bdrv_find_format(fmt);
2893 error_report("Unknown file format '%s'", fmt);
2898 proto_drv = bdrv_find_protocol(filename);
2900 error_report("Unknown protocol '%s'", filename);
2905 create_options = append_option_parameters(create_options,
2906 drv->create_options);
2907 create_options = append_option_parameters(create_options,
2908 proto_drv->create_options);
2910 /* Create parameter list with default values */
2911 param = parse_option_parameters("", create_options, param);
2913 set_option_parameter_int(param, BLOCK_OPT_SIZE, img_size);
2915 /* Parse -o options */
2917 param = parse_option_parameters(options, create_options, param);
2918 if (param == NULL) {
2919 error_report("Invalid options for file format '%s'.", fmt);
2925 if (base_filename) {
2926 if (set_option_parameter(param, BLOCK_OPT_BACKING_FILE,
2928 error_report("Backing file not supported for file format '%s'",
2936 if (set_option_parameter(param, BLOCK_OPT_BACKING_FMT, base_fmt)) {
2937 error_report("Backing file format not supported for file "
2938 "format '%s'", fmt);
2944 backing_file = get_option_parameter(param, BLOCK_OPT_BACKING_FILE);
2945 if (backing_file && backing_file->value.s) {
2946 if (!strcmp(filename, backing_file->value.s)) {
2947 error_report("Error: Trying to create an image with the "
2948 "same filename as the backing file");
2954 backing_fmt = get_option_parameter(param, BLOCK_OPT_BACKING_FMT);
2955 if (backing_fmt && backing_fmt->value.s) {
2956 backing_drv = bdrv_find_format(backing_fmt->value.s);
2958 error_report("Unknown backing file format '%s'",
2959 backing_fmt->value.s);
2965 // The size for the image must always be specified, with one exception:
2966 // If we are using a backing file, we can obtain the size from there
2967 size = get_option_parameter(param, BLOCK_OPT_SIZE);
2968 if (size && size->value.n == -1) {
2969 if (backing_file && backing_file->value.s) {
2975 ret = bdrv_open(bs, backing_file->value.s, flags, backing_drv);
2977 error_report("Could not open '%s'", backing_file->value.s);
2980 bdrv_get_geometry(bs, &size);
2983 snprintf(buf, sizeof(buf), "%" PRId64, size);
2984 set_option_parameter(param, BLOCK_OPT_SIZE, buf);
2986 error_report("Image creation needs a size parameter");
2992 printf("Formatting '%s', fmt=%s ", filename, fmt);
2993 print_option_parameters(param);
2996 ret = bdrv_create(drv, filename, param);
2999 if (ret == -ENOTSUP) {
3000 error_report("Formatting or formatting option not supported for "
3001 "file format '%s'", fmt);
3002 } else if (ret == -EFBIG) {
3003 error_report("The image size is too large for file format '%s'",
3006 error_report("%s: error while creating %s: %s", filename, fmt,
3012 free_option_parameters(create_options);
3013 free_option_parameters(param);