#include "block/block_int.h"
#include "sysemu/block-backend.h"
#include "qemu/module.h"
+#include "qemu/bswap.h"
#include "migration/migration.h"
#include "qemu/coroutine.h"
#include "qemu/cutils.h"
-
-#if defined(CONFIG_UUID)
-#include <uuid/uuid.h>
-#else
-/* TODO: move uuid emulation to some central place in QEMU. */
-#include "sysemu/sysemu.h" /* UUID_FMT */
-typedef unsigned char uuid_t[16];
-#endif
+#include "qemu/uuid.h"
/* Code configuration options. */
#define VDI_DISK_SIZE_MAX ((uint64_t)VDI_BLOCKS_IN_IMAGE_MAX * \
(uint64_t)DEFAULT_CLUSTER_SIZE)
-#if !defined(CONFIG_UUID)
-static inline void uuid_generate(uuid_t out)
-{
- memset(out, 0, sizeof(uuid_t));
-}
-
-static inline int uuid_is_null(const uuid_t uu)
-{
- uuid_t null_uuid = { 0 };
- return memcmp(uu, null_uuid, sizeof(uuid_t)) == 0;
-}
-
-# if defined(CONFIG_VDI_DEBUG)
-static inline void uuid_unparse(const uuid_t uu, char *out)
-{
- snprintf(out, 37, UUID_FMT,
- uu[0], uu[1], uu[2], uu[3], uu[4], uu[5], uu[6], uu[7],
- uu[8], uu[9], uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]);
-}
-# endif
-#endif
-
typedef struct {
char text[0x40];
uint32_t signature;
uint32_t block_extra; /* unused here */
uint32_t blocks_in_image;
uint32_t blocks_allocated;
- uuid_t uuid_image;
- uuid_t uuid_last_snap;
- uuid_t uuid_link;
- uuid_t uuid_parent;
+ QemuUUID uuid_image;
+ QemuUUID uuid_last_snap;
+ QemuUUID uuid_link;
+ QemuUUID uuid_parent;
uint64_t unused2[7];
} QEMU_PACKED VdiHeader;
Error *migration_blocker;
} BDRVVdiState;
-/* Change UUID from little endian (IPRT = VirtualBox format) to big endian
- * format (network byte order, standard, see RFC 4122) and vice versa.
- */
-static void uuid_convert(uuid_t uuid)
-{
- bswap32s((uint32_t *)&uuid[0]);
- bswap16s((uint16_t *)&uuid[4]);
- bswap16s((uint16_t *)&uuid[6]);
-}
-
static void vdi_header_to_cpu(VdiHeader *header)
{
le32_to_cpus(&header->signature);
le32_to_cpus(&header->block_extra);
le32_to_cpus(&header->blocks_in_image);
le32_to_cpus(&header->blocks_allocated);
- uuid_convert(header->uuid_image);
- uuid_convert(header->uuid_last_snap);
- uuid_convert(header->uuid_link);
- uuid_convert(header->uuid_parent);
+ qemu_uuid_bswap(&header->uuid_image);
+ qemu_uuid_bswap(&header->uuid_last_snap);
+ qemu_uuid_bswap(&header->uuid_link);
+ qemu_uuid_bswap(&header->uuid_parent);
}
static void vdi_header_to_le(VdiHeader *header)
cpu_to_le32s(&header->block_extra);
cpu_to_le32s(&header->blocks_in_image);
cpu_to_le32s(&header->blocks_allocated);
- uuid_convert(header->uuid_image);
- uuid_convert(header->uuid_last_snap);
- uuid_convert(header->uuid_link);
- uuid_convert(header->uuid_parent);
+ qemu_uuid_bswap(&header->uuid_image);
+ qemu_uuid_bswap(&header->uuid_last_snap);
+ qemu_uuid_bswap(&header->uuid_link);
+ qemu_uuid_bswap(&header->uuid_parent);
}
#if defined(CONFIG_VDI_DEBUG)
logout("\n");
- ret = bdrv_read(bs->file->bs, 0, (uint8_t *)&header, 1);
+ ret = bdrv_read(bs->file, 0, (uint8_t *)&header, 1);
if (ret < 0) {
goto fail;
}
(uint64_t)header.blocks_in_image * header.block_size);
ret = -ENOTSUP;
goto fail;
- } else if (!uuid_is_null(header.uuid_link)) {
+ } else if (!qemu_uuid_is_null(&header.uuid_link)) {
error_setg(errp, "unsupported VDI image (non-NULL link UUID)");
ret = -ENOTSUP;
goto fail;
- } else if (!uuid_is_null(header.uuid_parent)) {
+ } else if (!qemu_uuid_is_null(&header.uuid_parent)) {
error_setg(errp, "unsupported VDI image (non-NULL parent UUID)");
ret = -ENOTSUP;
goto fail;
goto fail;
}
- ret = bdrv_read(bs->file->bs, s->bmap_sector, (uint8_t *)s->bmap,
+ ret = bdrv_read(bs->file, s->bmap_sector, (uint8_t *)s->bmap,
bmap_size);
if (ret < 0) {
goto fail_free_bmap;
return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
}
-static int vdi_co_read(BlockDriverState *bs,
- int64_t sector_num, uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVdiState *s = bs->opaque;
+ QEMUIOVector local_qiov;
uint32_t bmap_entry;
uint32_t block_index;
- uint32_t sector_in_block;
- uint32_t n_sectors;
+ uint32_t offset_in_block;
+ uint32_t n_bytes;
+ uint64_t bytes_done = 0;
int ret = 0;
logout("\n");
- while (ret >= 0 && nb_sectors > 0) {
- block_index = sector_num / s->block_sectors;
- sector_in_block = sector_num % s->block_sectors;
- n_sectors = s->block_sectors - sector_in_block;
- if (n_sectors > nb_sectors) {
- n_sectors = nb_sectors;
- }
+ qemu_iovec_init(&local_qiov, qiov->niov);
- logout("will read %u sectors starting at sector %" PRIu64 "\n",
- n_sectors, sector_num);
+ while (ret >= 0 && bytes > 0) {
+ block_index = offset / s->block_size;
+ offset_in_block = offset % s->block_size;
+ n_bytes = MIN(bytes, s->block_size - offset_in_block);
+
+ logout("will read %u bytes starting at offset %" PRIu64 "\n",
+ n_bytes, offset);
/* prepare next AIO request */
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Block not allocated, return zeros, no need to wait. */
- memset(buf, 0, n_sectors * SECTOR_SIZE);
+ qemu_iovec_memset(qiov, bytes_done, 0, n_bytes);
ret = 0;
} else {
- uint64_t offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors +
- sector_in_block;
- ret = bdrv_read(bs->file->bs, offset, buf, n_sectors);
+ uint64_t data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size +
+ offset_in_block;
+
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_preadv(bs->file, data_offset, n_bytes,
+ &local_qiov, 0);
}
- logout("%u sectors read\n", n_sectors);
+ logout("%u bytes read\n", n_bytes);
- nb_sectors -= n_sectors;
- sector_num += n_sectors;
- buf += n_sectors * SECTOR_SIZE;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
}
+ qemu_iovec_destroy(&local_qiov);
+
return ret;
}
-static int vdi_co_write(BlockDriverState *bs,
- int64_t sector_num, const uint8_t *buf, int nb_sectors)
+static int coroutine_fn
+vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
+ QEMUIOVector *qiov, int flags)
{
BDRVVdiState *s = bs->opaque;
+ QEMUIOVector local_qiov;
uint32_t bmap_entry;
uint32_t block_index;
- uint32_t sector_in_block;
- uint32_t n_sectors;
+ uint32_t offset_in_block;
+ uint32_t n_bytes;
uint32_t bmap_first = VDI_UNALLOCATED;
uint32_t bmap_last = VDI_UNALLOCATED;
uint8_t *block = NULL;
+ uint64_t bytes_done = 0;
int ret = 0;
logout("\n");
- while (ret >= 0 && nb_sectors > 0) {
- block_index = sector_num / s->block_sectors;
- sector_in_block = sector_num % s->block_sectors;
- n_sectors = s->block_sectors - sector_in_block;
- if (n_sectors > nb_sectors) {
- n_sectors = nb_sectors;
- }
+ qemu_iovec_init(&local_qiov, qiov->niov);
+
+ while (ret >= 0 && bytes > 0) {
+ block_index = offset / s->block_size;
+ offset_in_block = offset % s->block_size;
+ n_bytes = MIN(bytes, s->block_size - offset_in_block);
- logout("will write %u sectors starting at sector %" PRIu64 "\n",
- n_sectors, sector_num);
+ logout("will write %u bytes starting at offset %" PRIu64 "\n",
+ n_bytes, offset);
/* prepare next AIO request */
bmap_entry = le32_to_cpu(s->bmap[block_index]);
if (!VDI_IS_ALLOCATED(bmap_entry)) {
/* Allocate new block and write to it. */
- uint64_t offset;
+ uint64_t data_offset;
bmap_entry = s->header.blocks_allocated;
s->bmap[block_index] = cpu_to_le32(bmap_entry);
s->header.blocks_allocated++;
- offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors;
+ data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size;
if (block == NULL) {
block = g_malloc(s->block_size);
bmap_first = block_index;
}
bmap_last = block_index;
/* Copy data to be written to new block and zero unused parts. */
- memset(block, 0, sector_in_block * SECTOR_SIZE);
- memcpy(block + sector_in_block * SECTOR_SIZE,
- buf, n_sectors * SECTOR_SIZE);
- memset(block + (sector_in_block + n_sectors) * SECTOR_SIZE, 0,
- (s->block_sectors - n_sectors - sector_in_block) * SECTOR_SIZE);
+ memset(block, 0, offset_in_block);
+ qemu_iovec_to_buf(qiov, bytes_done, block + offset_in_block,
+ n_bytes);
+ memset(block + offset_in_block + n_bytes, 0,
+ s->block_size - n_bytes - offset_in_block);
/* Note that this coroutine does not yield anywhere from reading the
* bmap entry until here, so in regards to all the coroutines trying
* acquire the lock and thus the padded cluster is written before
* the other coroutines can write to the affected area. */
qemu_co_mutex_lock(&s->write_lock);
- ret = bdrv_write(bs->file->bs, offset, block, s->block_sectors);
+ ret = bdrv_pwrite(bs->file, data_offset, block, s->block_size);
qemu_co_mutex_unlock(&s->write_lock);
} else {
- uint64_t offset = s->header.offset_data / SECTOR_SIZE +
- (uint64_t)bmap_entry * s->block_sectors +
- sector_in_block;
+ uint64_t data_offset = s->header.offset_data +
+ (uint64_t)bmap_entry * s->block_size +
+ offset_in_block;
qemu_co_mutex_lock(&s->write_lock);
/* This lock is only used to make sure the following write operation
* is executed after the write issued by the coroutine allocating
* that that write operation has returned (there may be other writes
* in flight, but they do not concern this very operation). */
qemu_co_mutex_unlock(&s->write_lock);
- ret = bdrv_write(bs->file->bs, offset, buf, n_sectors);
+
+ qemu_iovec_reset(&local_qiov);
+ qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
+
+ ret = bdrv_co_pwritev(bs->file, data_offset, n_bytes,
+ &local_qiov, 0);
}
- nb_sectors -= n_sectors;
- sector_num += n_sectors;
- buf += n_sectors * SECTOR_SIZE;
+ bytes -= n_bytes;
+ offset += n_bytes;
+ bytes_done += n_bytes;
- logout("%u sectors written\n", n_sectors);
+ logout("%u bytes written\n", n_bytes);
}
+ qemu_iovec_destroy(&local_qiov);
+
logout("finished data write\n");
if (ret < 0) {
return ret;
VdiHeader *header = (VdiHeader *) block;
uint8_t *base;
uint64_t offset;
+ uint32_t n_sectors;
logout("now writing modified header\n");
assert(VDI_IS_ALLOCATED(bmap_first));
*header = s->header;
vdi_header_to_le(header);
- ret = bdrv_write(bs->file->bs, 0, block, 1);
+ ret = bdrv_write(bs->file, 0, block, 1);
g_free(block);
block = NULL;
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
- ret = bdrv_write(bs->file->bs, offset, base, n_sectors);
+ ret = bdrv_write(bs->file, offset, base, n_sectors);
}
return ret;
if (image_type == VDI_TYPE_STATIC) {
header.blocks_allocated = blocks;
}
- uuid_generate(header.uuid_image);
- uuid_generate(header.uuid_last_snap);
+ qemu_uuid_generate(&header.uuid_image);
+ qemu_uuid_generate(&header.uuid_last_snap);
/* There is no need to set header.uuid_link or header.uuid_parent here. */
#if defined(CONFIG_VDI_DEBUG)
vdi_header_print(&header);
#endif
vdi_header_to_le(&header);
- ret = blk_pwrite(blk, offset, &header, sizeof(header));
+ ret = blk_pwrite(blk, offset, &header, sizeof(header), 0);
if (ret < 0) {
error_setg(errp, "Error writing header to %s", filename);
goto exit;
bmap[i] = VDI_UNALLOCATED;
}
}
- ret = blk_pwrite(blk, offset, bmap, bmap_size);
+ ret = blk_pwrite(blk, offset, bmap, bmap_size, 0);
if (ret < 0) {
error_setg(errp, "Error writing bmap to %s", filename);
goto exit;
.bdrv_co_get_block_status = vdi_co_get_block_status,
.bdrv_make_empty = vdi_make_empty,
- .bdrv_read = vdi_co_read,
+ .bdrv_co_preadv = vdi_co_preadv,
#if defined(CONFIG_VDI_WRITE)
- .bdrv_write = vdi_co_write,
+ .bdrv_co_pwritev = vdi_co_pwritev,
#endif
.bdrv_get_info = vdi_get_info,