* THE SOFTWARE.
*/
#include "config-host.h"
-#ifdef HOST_BSD
-/* include native header before sys-queue.h */
-#include <sys/queue.h>
-#endif
-
#include "qemu-common.h"
#include "monitor.h"
#include "block_int.h"
+#include "module.h"
-#ifdef HOST_BSD
+#ifdef CONFIG_BSD
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
+#include <sys/queue.h>
#ifndef __DragonFly__
#include <sys/disk.h>
#endif
#define SECTOR_BITS 9
#define SECTOR_SIZE (1 << SECTOR_BITS)
+#define SECTORS_PER_DIRTY_CHUNK 8
-static AIOPool vectored_aio_pool;
-
-typedef struct BlockDriverAIOCBSync {
- BlockDriverAIOCB common;
- QEMUBH *bh;
- int ret;
-} BlockDriverAIOCBSync;
-
-static BlockDriverAIOCB *bdrv_aio_read_em(BlockDriverState *bs,
- int64_t sector_num, uint8_t *buf, int nb_sectors,
+static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque);
+static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque);
-static BlockDriverAIOCB *bdrv_aio_write_em(BlockDriverState *bs,
- int64_t sector_num, const uint8_t *buf, int nb_sectors,
+static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque);
-static void bdrv_aio_cancel_em(BlockDriverAIOCB *acb);
static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors);
static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
static BlockDriver *first_drv;
+/* If non-zero, use only whitelisted block drivers */
+static int use_bdrv_whitelist;
+
int path_is_absolute(const char *path)
{
const char *p;
}
}
-
-static void bdrv_register(BlockDriver *bdrv)
+void bdrv_register(BlockDriver *bdrv)
{
- if (!bdrv->bdrv_aio_read) {
+ if (!bdrv->bdrv_aio_readv) {
/* add AIO emulation layer */
- bdrv->bdrv_aio_read = bdrv_aio_read_em;
- bdrv->bdrv_aio_write = bdrv_aio_write_em;
- bdrv->bdrv_aio_cancel = bdrv_aio_cancel_em;
- bdrv->aiocb_size = sizeof(BlockDriverAIOCBSync);
+ bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
+ bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
} else if (!bdrv->bdrv_read) {
/* add synchronous IO emulation layer */
bdrv->bdrv_read = bdrv_read_em;
bdrv->bdrv_write = bdrv_write_em;
}
- aio_pool_init(&bdrv->aio_pool, bdrv->aiocb_size, bdrv->bdrv_aio_cancel);
+
+ if (!bdrv->bdrv_aio_flush)
+ bdrv->bdrv_aio_flush = bdrv_aio_flush_em;
+
bdrv->next = first_drv;
first_drv = bdrv;
}
return NULL;
}
-int bdrv_create2(BlockDriver *drv,
- const char *filename, int64_t size_in_sectors,
- const char *backing_file, const char *backing_format,
- int flags)
+static int bdrv_is_whitelisted(BlockDriver *drv)
{
- if (drv->bdrv_create2)
- return drv->bdrv_create2(filename, size_in_sectors, backing_file,
- backing_format, flags);
- if (drv->bdrv_create)
- return drv->bdrv_create(filename, size_in_sectors, backing_file,
- flags);
- return -ENOTSUP;
+ static const char *whitelist[] = {
+ CONFIG_BDRV_WHITELIST
+ };
+ const char **p;
+
+ if (!whitelist[0])
+ return 1; /* no whitelist, anything goes */
+
+ for (p = whitelist; *p; p++) {
+ if (!strcmp(drv->format_name, *p)) {
+ return 1;
+ }
+ }
+ return 0;
}
-int bdrv_create(BlockDriver *drv,
- const char *filename, int64_t size_in_sectors,
- const char *backing_file, int flags)
+BlockDriver *bdrv_find_whitelisted_format(const char *format_name)
+{
+ BlockDriver *drv = bdrv_find_format(format_name);
+ return drv && bdrv_is_whitelisted(drv) ? drv : NULL;
+}
+
+int bdrv_create(BlockDriver *drv, const char* filename,
+ QEMUOptionParameter *options)
{
if (!drv->bdrv_create)
return -ENOTSUP;
- return drv->bdrv_create(filename, size_in_sectors, backing_file, flags);
+
+ return drv->bdrv_create(filename, options);
}
#ifdef _WIN32
filename[1] == ':');
}
-static int is_windows_drive(const char *filename)
+int is_windows_drive(const char *filename)
{
if (is_windows_drive_prefix(filename) &&
filename[2] == '\0')
#ifdef _WIN32
if (is_windows_drive(filename) ||
is_windows_drive_prefix(filename))
- return &bdrv_raw;
+ return bdrv_find_format("raw");
#endif
p = strchr(filename, ':');
if (!p)
- return &bdrv_raw;
+ return bdrv_find_format("raw");
len = p - filename;
if (len > sizeof(protocol) - 1)
len = sizeof(protocol) - 1;
return NULL;
}
-/* XXX: force raw format if block or character device ? It would
- simplify the BSD case */
+/*
+ * Detect host devices. By convention, /dev/cdrom[N] is always
+ * recognized as a host CDROM.
+ */
+static BlockDriver *find_hdev_driver(const char *filename)
+{
+ int score_max = 0, score;
+ BlockDriver *drv = NULL, *d;
+
+ for (d = first_drv; d; d = d->next) {
+ if (d->bdrv_probe_device) {
+ score = d->bdrv_probe_device(filename);
+ if (score > score_max) {
+ score_max = score;
+ drv = d;
+ }
+ }
+ }
+
+ return drv;
+}
+
static BlockDriver *find_image_format(const char *filename)
{
int ret, score, score_max;
uint8_t buf[2048];
BlockDriverState *bs;
- /* detect host devices. By convention, /dev/cdrom[N] is always
- recognized as a host CDROM */
- if (strstart(filename, "/dev/cdrom", NULL))
- return &bdrv_host_device;
-#ifdef _WIN32
- if (is_windows_drive(filename))
- return &bdrv_host_device;
-#else
- {
- struct stat st;
- if (stat(filename, &st) >= 0 &&
- (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode))) {
- return &bdrv_host_device;
- }
- }
-#endif
-
drv = find_protocol(filename);
/* no need to test disk image formats for vvfat */
- if (drv == &bdrv_vvfat)
+ if (drv && strcmp(drv->format_name, "vvfat") == 0)
return drv;
ret = bdrv_file_open(&bs, filename, BDRV_O_RDONLY);
int bdrv_open2(BlockDriverState *bs, const char *filename, int flags,
BlockDriver *drv)
{
- int ret, open_flags;
+ int ret, open_flags, try_rw;
char tmp_filename[PATH_MAX];
char backing_filename[PATH_MAX];
- bs->read_only = 0;
bs->is_temporary = 0;
bs->encrypted = 0;
bs->valid_key = 0;
+ /* buffer_alignment defaulted to 512, drivers can change this value */
+ bs->buffer_alignment = 512;
if (flags & BDRV_O_SNAPSHOT) {
BlockDriverState *bs1;
int64_t total_size;
int is_protocol = 0;
+ BlockDriver *bdrv_qcow2;
+ QEMUOptionParameter *options;
/* if snapshot, we create a temporary backing file and open it
instead of opening 'filename' directly */
else
realpath(filename, backing_filename);
- ret = bdrv_create2(&bdrv_qcow2, tmp_filename,
- total_size, backing_filename,
- (drv ? drv->format_name : NULL), 0);
+ bdrv_qcow2 = bdrv_find_format("qcow2");
+ options = parse_option_parameters("", bdrv_qcow2->create_options, NULL);
+
+ set_option_parameter_int(options, BLOCK_OPT_SIZE, total_size * 512);
+ set_option_parameter(options, BLOCK_OPT_BACKING_FILE, backing_filename);
+ if (drv) {
+ set_option_parameter(options, BLOCK_OPT_BACKING_FMT,
+ drv->format_name);
+ }
+
+ ret = bdrv_create(bdrv_qcow2, tmp_filename, options);
if (ret < 0) {
return ret;
}
+
filename = tmp_filename;
- drv = &bdrv_qcow2;
+ drv = bdrv_qcow2;
bs->is_temporary = 1;
}
if (flags & BDRV_O_FILE) {
drv = find_protocol(filename);
} else if (!drv) {
- drv = find_image_format(filename);
+ drv = find_hdev_driver(filename);
+ if (!drv) {
+ drv = find_image_format(filename);
+ }
}
if (!drv) {
ret = -ENOENT;
}
bs->drv = drv;
bs->opaque = qemu_mallocz(drv->instance_size);
+
+ /*
+ * Yes, BDRV_O_NOCACHE aka O_DIRECT means we have to present a
+ * write cache to the guest. We do need the fdatasync to flush
+ * out transactions for block allocations, and we maybe have a
+ * volatile write cache in our backing device to deal with.
+ */
+ if (flags & (BDRV_O_CACHE_WB|BDRV_O_NOCACHE))
+ bs->enable_write_cache = 1;
+
/* Note: for compatibility, we open disk image files as RDWR, and
RDONLY as fallback */
+ try_rw = !bs->read_only || bs->is_temporary;
if (!(flags & BDRV_O_FILE))
- open_flags = BDRV_O_RDWR | (flags & BDRV_O_CACHE_MASK);
+ open_flags = (try_rw ? BDRV_O_RDWR : 0) |
+ (flags & (BDRV_O_CACHE_MASK|BDRV_O_NATIVE_AIO));
else
open_flags = flags & ~(BDRV_O_FILE | BDRV_O_SNAPSHOT);
- ret = drv->bdrv_open(bs, filename, open_flags);
+ if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv))
+ ret = -ENOTSUP;
+ else
+ ret = drv->bdrv_open(bs, filename, open_flags);
if ((ret == -EACCES || ret == -EPERM) && !(flags & BDRV_O_FILE)) {
ret = drv->bdrv_open(bs, filename, open_flags & ~BDRV_O_RDWR);
bs->read_only = 1;
/* if there is a backing file, use it */
BlockDriver *back_drv = NULL;
bs->backing_hd = bdrv_new("");
+ /* pass on read_only property to the backing_hd */
+ bs->backing_hd->read_only = bs->read_only;
path_combine(backing_filename, sizeof(backing_filename),
filename, bs->backing_file);
if (bs->backing_format[0] != '\0')
qemu_free(bs);
}
+/*
+ * Run consistency checks on an image
+ *
+ * Returns the number of errors or -errno when an internal error occurs
+ */
+int bdrv_check(BlockDriverState *bs)
+{
+ if (bs->drv->bdrv_check == NULL) {
+ return -ENOTSUP;
+ }
+
+ return bs->drv->bdrv_check(bs);
+}
+
/* commit COW file into the raw image */
int bdrv_commit(BlockDriverState *bs)
{
len = bdrv_getlength(bs);
- if ((offset + size) > len)
+ if (offset < 0)
+ return -EIO;
+
+ if ((offset > len) || (len - offset < size))
return -EIO;
return 0;
return drv->bdrv_read(bs, sector_num, buf, nb_sectors);
}
+static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
+ int nb_sectors, int dirty)
+{
+ int64_t start, end;
+ start = sector_num / SECTORS_PER_DIRTY_CHUNK;
+ end = (sector_num + nb_sectors) / SECTORS_PER_DIRTY_CHUNK;
+
+ for(; start <= end; start++) {
+ bs->dirty_bitmap[start] = dirty;
+ }
+}
+
/* Return < 0 if error. Important errors are:
-EIO generic I/O error (may happen for all errors)
-ENOMEDIUM No media inserted.
return -EACCES;
if (bdrv_check_request(bs, sector_num, nb_sectors))
return -EIO;
-
+
+ if(bs->dirty_tracking) {
+ set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
+ }
+
return drv->bdrv_write(bs, sector_num, buf, nb_sectors);
}
return -ENOMEDIUM;
if (!drv->bdrv_truncate)
return -ENOTSUP;
+ if (bs->read_only)
+ return -EACCES;
return drv->bdrv_truncate(bs, offset);
}
return bs->read_only;
}
+int bdrv_set_read_only(BlockDriverState *bs, int read_only)
+{
+ int ret = bs->read_only;
+ bs->read_only = read_only;
+ return ret;
+}
+
int bdrv_is_sg(BlockDriverState *bs)
{
return bs->sg;
}
+int bdrv_enable_write_cache(BlockDriverState *bs)
+{
+ return bs->enable_write_cache;
+}
+
/* XXX: no longer used */
void bdrv_set_change_cb(BlockDriverState *bs,
void (*change_cb)(void *opaque), void *opaque)
return -ENOMEDIUM;
if (!drv->bdrv_write_compressed)
return -ENOTSUP;
+ if (bdrv_check_request(bs, sector_num, nb_sectors))
+ return -EIO;
+
+ if(bs->dirty_tracking) {
+ set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
+ }
+
return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
}
return drv->bdrv_get_info(bs, bdi);
}
-int bdrv_put_buffer(BlockDriverState *bs, const uint8_t *buf, int64_t pos, int size)
+int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
+ int64_t pos, int size)
{
BlockDriver *drv = bs->drv;
if (!drv)
return -ENOMEDIUM;
- if (!drv->bdrv_put_buffer)
+ if (!drv->bdrv_save_vmstate)
return -ENOTSUP;
- return drv->bdrv_put_buffer(bs, buf, pos, size);
+ return drv->bdrv_save_vmstate(bs, buf, pos, size);
}
-int bdrv_get_buffer(BlockDriverState *bs, uint8_t *buf, int64_t pos, int size)
+int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
+ int64_t pos, int size)
{
BlockDriver *drv = bs->drv;
if (!drv)
return -ENOMEDIUM;
- if (!drv->bdrv_get_buffer)
+ if (!drv->bdrv_load_vmstate)
return -ENOTSUP;
- return drv->bdrv_get_buffer(bs, buf, pos, size);
+ return drv->bdrv_load_vmstate(bs, buf, pos, size);
}
/**************************************************************/
/**************************************************************/
/* async I/Os */
-typedef struct VectorTranslationAIOCB {
- BlockDriverAIOCB common;
- QEMUIOVector *iov;
- uint8_t *bounce;
- int is_write;
- BlockDriverAIOCB *aiocb;
-} VectorTranslationAIOCB;
-
-static void bdrv_aio_cancel_vector(BlockDriverAIOCB *_acb)
-{
- VectorTranslationAIOCB *acb
- = container_of(_acb, VectorTranslationAIOCB, common);
-
- bdrv_aio_cancel(acb->aiocb);
-}
-
-static void bdrv_aio_rw_vector_cb(void *opaque, int ret)
-{
- VectorTranslationAIOCB *s = (VectorTranslationAIOCB *)opaque;
-
- if (!s->is_write) {
- qemu_iovec_from_buffer(s->iov, s->bounce, s->iov->size);
- }
- qemu_vfree(s->bounce);
- s->common.cb(s->common.opaque, ret);
- qemu_aio_release(s);
-}
-
-static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
- int64_t sector_num,
- QEMUIOVector *iov,
- int nb_sectors,
- BlockDriverCompletionFunc *cb,
- void *opaque,
- int is_write)
-
-{
- VectorTranslationAIOCB *s = qemu_aio_get_pool(&vectored_aio_pool, bs,
- cb, opaque);
-
- s->iov = iov;
- s->bounce = qemu_memalign(512, nb_sectors * 512);
- s->is_write = is_write;
- if (is_write) {
- qemu_iovec_to_buffer(s->iov, s->bounce);
- s->aiocb = bdrv_aio_write(bs, sector_num, s->bounce, nb_sectors,
- bdrv_aio_rw_vector_cb, s);
- } else {
- s->aiocb = bdrv_aio_read(bs, sector_num, s->bounce, nb_sectors,
- bdrv_aio_rw_vector_cb, s);
- }
- if (!s->aiocb) {
- qemu_vfree(s->bounce);
- qemu_aio_release(s);
- return NULL;
- }
- return &s->common;
-}
-
BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
+ QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
-{
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return NULL;
-
- return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors,
- cb, opaque, 0);
-}
-
-BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
- QEMUIOVector *iov, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
-{
- if (bdrv_check_request(bs, sector_num, nb_sectors))
- return NULL;
-
- return bdrv_aio_rw_vector(bs, sector_num, iov, nb_sectors,
- cb, opaque, 1);
-}
-
-BlockDriverAIOCB *bdrv_aio_read(BlockDriverState *bs, int64_t sector_num,
- uint8_t *buf, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
{
BlockDriver *drv = bs->drv;
BlockDriverAIOCB *ret;
if (bdrv_check_request(bs, sector_num, nb_sectors))
return NULL;
- ret = drv->bdrv_aio_read(bs, sector_num, buf, nb_sectors, cb, opaque);
+ ret = drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
+ cb, opaque);
if (ret) {
/* Update stats even though technically transfer has not happened. */
return ret;
}
-BlockDriverAIOCB *bdrv_aio_write(BlockDriverState *bs, int64_t sector_num,
- const uint8_t *buf, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
+ QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
{
BlockDriver *drv = bs->drv;
BlockDriverAIOCB *ret;
if (bdrv_check_request(bs, sector_num, nb_sectors))
return NULL;
- ret = drv->bdrv_aio_write(bs, sector_num, buf, nb_sectors, cb, opaque);
+ if(bs->dirty_tracking) {
+ set_dirty_bitmap(bs, sector_num, nb_sectors, 1);
+ }
+
+ ret = drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
+ cb, opaque);
if (ret) {
/* Update stats even though technically transfer has not happened. */
return ret;
}
+
+typedef struct MultiwriteCB {
+ int error;
+ int num_requests;
+ int num_callbacks;
+ struct {
+ BlockDriverCompletionFunc *cb;
+ void *opaque;
+ QEMUIOVector *free_qiov;
+ void *free_buf;
+ } callbacks[];
+} MultiwriteCB;
+
+static void multiwrite_user_cb(MultiwriteCB *mcb)
+{
+ int i;
+
+ for (i = 0; i < mcb->num_callbacks; i++) {
+ mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
+ qemu_free(mcb->callbacks[i].free_qiov);
+ qemu_free(mcb->callbacks[i].free_buf);
+ }
+}
+
+static void multiwrite_cb(void *opaque, int ret)
+{
+ MultiwriteCB *mcb = opaque;
+
+ if (ret < 0) {
+ mcb->error = ret;
+ multiwrite_user_cb(mcb);
+ }
+
+ mcb->num_requests--;
+ if (mcb->num_requests == 0) {
+ if (mcb->error == 0) {
+ multiwrite_user_cb(mcb);
+ }
+ qemu_free(mcb);
+ }
+}
+
+static int multiwrite_req_compare(const void *a, const void *b)
+{
+ return (((BlockRequest*) a)->sector - ((BlockRequest*) b)->sector);
+}
+
+/*
+ * Takes a bunch of requests and tries to merge them. Returns the number of
+ * requests that remain after merging.
+ */
+static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
+ int num_reqs, MultiwriteCB *mcb)
+{
+ int i, outidx;
+
+ // Sort requests by start sector
+ qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
+
+ // Check if adjacent requests touch the same clusters. If so, combine them,
+ // filling up gaps with zero sectors.
+ outidx = 0;
+ for (i = 1; i < num_reqs; i++) {
+ int merge = 0;
+ int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
+
+ // This handles the cases that are valid for all block drivers, namely
+ // exactly sequential writes and overlapping writes.
+ if (reqs[i].sector <= oldreq_last) {
+ merge = 1;
+ }
+
+ // The block driver may decide that it makes sense to combine requests
+ // even if there is a gap of some sectors between them. In this case,
+ // the gap is filled with zeros (therefore only applicable for yet
+ // unused space in format like qcow2).
+ if (!merge && bs->drv->bdrv_merge_requests) {
+ merge = bs->drv->bdrv_merge_requests(bs, &reqs[outidx], &reqs[i]);
+ }
+
+ if (merge) {
+ size_t size;
+ QEMUIOVector *qiov = qemu_mallocz(sizeof(*qiov));
+ qemu_iovec_init(qiov,
+ reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
+
+ // Add the first request to the merged one. If the requests are
+ // overlapping, drop the last sectors of the first request.
+ size = (reqs[i].sector - reqs[outidx].sector) << 9;
+ qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
+
+ // We might need to add some zeros between the two requests
+ if (reqs[i].sector > oldreq_last) {
+ size_t zero_bytes = (reqs[i].sector - oldreq_last) << 9;
+ uint8_t *buf = qemu_blockalign(bs, zero_bytes);
+ memset(buf, 0, zero_bytes);
+ qemu_iovec_add(qiov, buf, zero_bytes);
+ mcb->callbacks[i].free_buf = buf;
+ }
+
+ // Add the second request
+ qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
+
+ reqs[outidx].nb_sectors += reqs[i].nb_sectors;
+ reqs[outidx].qiov = qiov;
+
+ mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
+ } else {
+ outidx++;
+ reqs[outidx].sector = reqs[i].sector;
+ reqs[outidx].nb_sectors = reqs[i].nb_sectors;
+ reqs[outidx].qiov = reqs[i].qiov;
+ }
+ }
+
+ return outidx + 1;
+}
+
+/*
+ * Submit multiple AIO write requests at once.
+ *
+ * On success, the function returns 0 and all requests in the reqs array have
+ * been submitted. In error case this function returns -1, and any of the
+ * requests may or may not be submitted yet. In particular, this means that the
+ * callback will be called for some of the requests, for others it won't. The
+ * caller must check the error field of the BlockRequest to wait for the right
+ * callbacks (if error != 0, no callback will be called).
+ *
+ * The implementation may modify the contents of the reqs array, e.g. to merge
+ * requests. However, the fields opaque and error are left unmodified as they
+ * are used to signal failure for a single request to the caller.
+ */
+int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
+{
+ BlockDriverAIOCB *acb;
+ MultiwriteCB *mcb;
+ int i;
+
+ if (num_reqs == 0) {
+ return 0;
+ }
+
+ // Create MultiwriteCB structure
+ mcb = qemu_mallocz(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
+ mcb->num_requests = 0;
+ mcb->num_callbacks = num_reqs;
+
+ for (i = 0; i < num_reqs; i++) {
+ mcb->callbacks[i].cb = reqs[i].cb;
+ mcb->callbacks[i].opaque = reqs[i].opaque;
+ }
+
+ // Check for mergable requests
+ num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
+
+ // Run the aio requests
+ for (i = 0; i < num_reqs; i++) {
+ acb = bdrv_aio_writev(bs, reqs[i].sector, reqs[i].qiov,
+ reqs[i].nb_sectors, multiwrite_cb, mcb);
+
+ if (acb == NULL) {
+ // We can only fail the whole thing if no request has been
+ // submitted yet. Otherwise we'll wait for the submitted AIOs to
+ // complete and report the error in the callback.
+ if (mcb->num_requests == 0) {
+ reqs[i].error = EIO;
+ goto fail;
+ } else {
+ mcb->error = EIO;
+ break;
+ }
+ } else {
+ mcb->num_requests++;
+ }
+ }
+
+ return 0;
+
+fail:
+ free(mcb);
+ return -1;
+}
+
+BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ BlockDriver *drv = bs->drv;
+
+ if (!drv)
+ return NULL;
+
+ /*
+ * Note that unlike bdrv_flush the driver is reponsible for flushing a
+ * backing image if it exists.
+ */
+ return drv->bdrv_aio_flush(bs, cb, opaque);
+}
+
void bdrv_aio_cancel(BlockDriverAIOCB *acb)
{
acb->pool->cancel(acb);
/**************************************************************/
/* async block device emulation */
+typedef struct BlockDriverAIOCBSync {
+ BlockDriverAIOCB common;
+ QEMUBH *bh;
+ int ret;
+ /* vector translation state */
+ QEMUIOVector *qiov;
+ uint8_t *bounce;
+ int is_write;
+} BlockDriverAIOCBSync;
+
+static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
+{
+ BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;
+ qemu_bh_delete(acb->bh);
+ acb->bh = NULL;
+ qemu_aio_release(acb);
+}
+
+static AIOPool bdrv_em_aio_pool = {
+ .aiocb_size = sizeof(BlockDriverAIOCBSync),
+ .cancel = bdrv_aio_cancel_em,
+};
+
static void bdrv_aio_bh_cb(void *opaque)
{
BlockDriverAIOCBSync *acb = opaque;
+
+ if (!acb->is_write)
+ qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
+ qemu_vfree(acb->bounce);
acb->common.cb(acb->common.opaque, acb->ret);
+ qemu_bh_delete(acb->bh);
+ acb->bh = NULL;
qemu_aio_release(acb);
}
-static BlockDriverAIOCB *bdrv_aio_read_em(BlockDriverState *bs,
- int64_t sector_num, uint8_t *buf, int nb_sectors,
- BlockDriverCompletionFunc *cb, void *opaque)
+static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
+ int64_t sector_num,
+ QEMUIOVector *qiov,
+ int nb_sectors,
+ BlockDriverCompletionFunc *cb,
+ void *opaque,
+ int is_write)
+
{
BlockDriverAIOCBSync *acb;
- int ret;
- acb = qemu_aio_get(bs, cb, opaque);
+ acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
+ acb->is_write = is_write;
+ acb->qiov = qiov;
+ acb->bounce = qemu_blockalign(bs, qiov->size);
+
if (!acb->bh)
acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
- ret = bdrv_read(bs, sector_num, buf, nb_sectors);
- acb->ret = ret;
+
+ if (is_write) {
+ qemu_iovec_to_buffer(acb->qiov, acb->bounce);
+ acb->ret = bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
+ } else {
+ acb->ret = bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
+ }
+
qemu_bh_schedule(acb->bh);
+
return &acb->common;
}
-static BlockDriverAIOCB *bdrv_aio_write_em(BlockDriverState *bs,
- int64_t sector_num, const uint8_t *buf, int nb_sectors,
+static BlockDriverAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
+}
+
+static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
+ int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
+}
+
+static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
BlockDriverAIOCBSync *acb;
- int ret;
- acb = qemu_aio_get(bs, cb, opaque);
+ acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
+ acb->is_write = 1; /* don't bounce in the completion hadler */
+ acb->qiov = NULL;
+ acb->bounce = NULL;
+ acb->ret = 0;
+
if (!acb->bh)
acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
- ret = bdrv_write(bs, sector_num, buf, nb_sectors);
- acb->ret = ret;
+
+ bdrv_flush(bs);
qemu_bh_schedule(acb->bh);
return &acb->common;
}
-static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
-{
- BlockDriverAIOCBSync *acb = (BlockDriverAIOCBSync *)blockacb;
- qemu_bh_cancel(acb->bh);
- qemu_aio_release(acb);
-}
-
/**************************************************************/
/* sync block device emulation */
{
int async_ret;
BlockDriverAIOCB *acb;
+ struct iovec iov;
+ QEMUIOVector qiov;
+
+ async_context_push();
async_ret = NOT_DONE;
- acb = bdrv_aio_read(bs, sector_num, buf, nb_sectors,
- bdrv_rw_em_cb, &async_ret);
- if (acb == NULL)
- return -1;
+ iov.iov_base = (void *)buf;
+ iov.iov_len = nb_sectors * 512;
+ qemu_iovec_init_external(&qiov, &iov, 1);
+ acb = bdrv_aio_readv(bs, sector_num, &qiov, nb_sectors,
+ bdrv_rw_em_cb, &async_ret);
+ if (acb == NULL) {
+ async_ret = -1;
+ goto fail;
+ }
while (async_ret == NOT_DONE) {
qemu_aio_wait();
}
+
+fail:
+ async_context_pop();
return async_ret;
}
{
int async_ret;
BlockDriverAIOCB *acb;
+ struct iovec iov;
+ QEMUIOVector qiov;
+
+ async_context_push();
async_ret = NOT_DONE;
- acb = bdrv_aio_write(bs, sector_num, buf, nb_sectors,
- bdrv_rw_em_cb, &async_ret);
- if (acb == NULL)
- return -1;
+ iov.iov_base = (void *)buf;
+ iov.iov_len = nb_sectors * 512;
+ qemu_iovec_init_external(&qiov, &iov, 1);
+ acb = bdrv_aio_writev(bs, sector_num, &qiov, nb_sectors,
+ bdrv_rw_em_cb, &async_ret);
+ if (acb == NULL) {
+ async_ret = -1;
+ goto fail;
+ }
while (async_ret == NOT_DONE) {
qemu_aio_wait();
}
+
+fail:
+ async_context_pop();
return async_ret;
}
void bdrv_init(void)
{
- aio_pool_init(&vectored_aio_pool, sizeof(VectorTranslationAIOCB),
- bdrv_aio_cancel_vector);
-
- bdrv_register(&bdrv_raw);
- bdrv_register(&bdrv_host_device);
-#ifndef _WIN32
- bdrv_register(&bdrv_cow);
-#endif
- bdrv_register(&bdrv_qcow);
- bdrv_register(&bdrv_vmdk);
- bdrv_register(&bdrv_cloop);
- bdrv_register(&bdrv_dmg);
- bdrv_register(&bdrv_bochs);
- bdrv_register(&bdrv_vpc);
- bdrv_register(&bdrv_vvfat);
- bdrv_register(&bdrv_qcow2);
- bdrv_register(&bdrv_parallels);
- bdrv_register(&bdrv_nbd);
+ module_call_init(MODULE_INIT_BLOCK);
}
-void aio_pool_init(AIOPool *pool, int aiocb_size,
- void (*cancel)(BlockDriverAIOCB *acb))
+void bdrv_init_with_whitelist(void)
{
- pool->aiocb_size = aiocb_size;
- pool->cancel = cancel;
- pool->free_aiocb = NULL;
+ use_bdrv_whitelist = 1;
+ bdrv_init();
}
-void *qemu_aio_get_pool(AIOPool *pool, BlockDriverState *bs,
- BlockDriverCompletionFunc *cb, void *opaque)
+void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
+ BlockDriverCompletionFunc *cb, void *opaque)
{
BlockDriverAIOCB *acb;
return acb;
}
-void *qemu_aio_get(BlockDriverState *bs, BlockDriverCompletionFunc *cb,
- void *opaque)
-{
- return qemu_aio_get_pool(&bs->drv->aio_pool, bs, cb, opaque);
-}
-
void qemu_aio_release(void *p)
{
BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
/**
* If eject_flag is TRUE, eject the media. Otherwise, close the tray
*/
-void bdrv_eject(BlockDriverState *bs, int eject_flag)
+int bdrv_eject(BlockDriverState *bs, int eject_flag)
{
BlockDriver *drv = bs->drv;
int ret;
+ if (bs->locked) {
+ return -EBUSY;
+ }
+
if (!drv || !drv->bdrv_eject) {
ret = -ENOTSUP;
} else {
if (ret == -ENOTSUP) {
if (eject_flag)
bdrv_close(bs);
+ ret = 0;
}
+
+ return ret;
}
int bdrv_is_locked(BlockDriverState *bs)
return drv->bdrv_aio_ioctl(bs, req, buf, cb, opaque);
return NULL;
}
+
+
+
+void *qemu_blockalign(BlockDriverState *bs, size_t size)
+{
+ return qemu_memalign((bs && bs->buffer_alignment) ? bs->buffer_alignment : 512, size);
+}
+
+void bdrv_set_dirty_tracking(BlockDriverState *bs, int enable)
+{
+ int64_t bitmap_size;
+ if(enable) {
+ if(bs->dirty_tracking == 0) {
+ int64_t i;
+ uint8_t test;
+ bitmap_size = (bdrv_getlength(bs) >> SECTOR_BITS);
+ bitmap_size /= SECTORS_PER_DIRTY_CHUNK;
+ bitmap_size++;
+
+ bs->dirty_bitmap = qemu_mallocz(bitmap_size);
+
+ bs->dirty_tracking = enable;
+ for(i = 0; i < bitmap_size; i++) test = bs->dirty_bitmap[i];
+ }
+ } else {
+ if(bs->dirty_tracking != 0) {
+ qemu_free(bs->dirty_bitmap);
+ bs->dirty_tracking = enable;
+ }
+ }
+}
+
+int bdrv_get_dirty(BlockDriverState *bs, int64_t sector)
+{
+ int64_t chunk = sector / (int64_t)SECTORS_PER_DIRTY_CHUNK;
+
+ if(bs->dirty_bitmap != NULL &&
+ (sector << SECTOR_BITS) <= bdrv_getlength(bs)) {
+ return bs->dirty_bitmap[chunk];
+ } else {
+ return 0;
+ }
+}
+
+void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector,
+ int nr_sectors)
+{
+ set_dirty_bitmap(bs, cur_sector, nr_sectors, 0);
+}
+
+int bdrv_get_sectors_per_chunk(void)
+{
+ /* size must be 2^x */
+ return SECTORS_PER_DIRTY_CHUNK;
+}