inactive_header->log_guid = *log_guid;
}
- ret = vhdx_write_header(bs->file, inactive_header, header_offset, true);
+ ret = vhdx_write_header(bs->file->bs, inactive_header, header_offset, true);
if (ret < 0) {
goto exit;
}
/* We have to read the whole VHDX_HEADER_SIZE instead of
* sizeof(VHDXHeader), because the checksum is over the whole
* region */
- ret = bdrv_pread(bs->file, VHDX_HEADER1_OFFSET, buffer, VHDX_HEADER_SIZE);
+ ret = bdrv_pread(bs->file->bs, VHDX_HEADER1_OFFSET, buffer,
+ VHDX_HEADER_SIZE);
if (ret < 0) {
goto fail;
}
}
}
- ret = bdrv_pread(bs->file, VHDX_HEADER2_OFFSET, buffer, VHDX_HEADER_SIZE);
+ ret = bdrv_pread(bs->file->bs, VHDX_HEADER2_OFFSET, buffer,
+ VHDX_HEADER_SIZE);
if (ret < 0) {
goto fail;
}
* whole block */
buffer = qemu_blockalign(bs, VHDX_HEADER_BLOCK_SIZE);
- ret = bdrv_pread(bs->file, VHDX_REGION_TABLE_OFFSET, buffer,
+ ret = bdrv_pread(bs->file->bs, VHDX_REGION_TABLE_OFFSET, buffer,
VHDX_HEADER_BLOCK_SIZE);
if (ret < 0) {
goto fail;
buffer = qemu_blockalign(bs, VHDX_METADATA_TABLE_MAX_SIZE);
- ret = bdrv_pread(bs->file, s->metadata_rt.file_offset, buffer,
+ ret = bdrv_pread(bs->file->bs, s->metadata_rt.file_offset, buffer,
VHDX_METADATA_TABLE_MAX_SIZE);
if (ret < 0) {
goto exit;
goto exit;
}
- ret = bdrv_pread(bs->file,
+ ret = bdrv_pread(bs->file->bs,
s->metadata_entries.file_parameters_entry.offset
+ s->metadata_rt.file_offset,
&s->params,
/* determine virtual disk size, logical sector size,
* and phys sector size */
- ret = bdrv_pread(bs->file,
+ ret = bdrv_pread(bs->file->bs,
s->metadata_entries.virtual_disk_size_entry.offset
+ s->metadata_rt.file_offset,
&s->virtual_disk_size,
if (ret < 0) {
goto exit;
}
- ret = bdrv_pread(bs->file,
+ ret = bdrv_pread(bs->file->bs,
s->metadata_entries.logical_sector_size_entry.offset
+ s->metadata_rt.file_offset,
&s->logical_sector_size,
if (ret < 0) {
goto exit;
}
- ret = bdrv_pread(bs->file,
+ ret = bdrv_pread(bs->file->bs,
s->metadata_entries.phys_sector_size_entry.offset
+ s->metadata_rt.file_offset,
&s->physical_sector_size,
QLIST_INIT(&s->regions);
/* validate the file signature */
- ret = bdrv_pread(bs->file, 0, &signature, sizeof(uint64_t));
+ ret = bdrv_pread(bs->file->bs, 0, &signature, sizeof(uint64_t));
if (ret < 0) {
goto fail;
}
}
/* s->bat is freed in vhdx_close() */
- s->bat = qemu_try_blockalign(bs->file, s->bat_rt.length);
+ s->bat = qemu_try_blockalign(bs->file->bs, s->bat_rt.length);
if (s->bat == NULL) {
ret = -ENOMEM;
goto fail;
}
- ret = bdrv_pread(bs->file, s->bat_offset, s->bat, s->bat_rt.length);
+ ret = bdrv_pread(bs->file->bs, s->bat_offset, s->bat, s->bat_rt.length);
if (ret < 0) {
goto fail;
}
/* TODO: differencing files */
/* Disable migration when VHDX images are used */
- error_set(&s->migration_blocker,
- QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
- "vhdx", bdrv_get_device_name(bs), "live migration");
+ error_setg(&s->migration_blocker, "The vhdx format used by node '%s' "
+ "does not support live migration",
+ bdrv_get_device_or_node_name(bs));
migrate_add_blocker(s->migration_blocker);
return 0;
/* check the payload block state */
switch (s->bat[sinfo.bat_idx] & VHDX_BAT_STATE_BIT_MASK) {
case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
- case PAYLOAD_BLOCK_UNDEFINED: /* fall through */
- case PAYLOAD_BLOCK_UNMAPPED: /* fall through */
+ case PAYLOAD_BLOCK_UNDEFINED:
+ case PAYLOAD_BLOCK_UNMAPPED:
+ case PAYLOAD_BLOCK_UNMAPPED_v095:
case PAYLOAD_BLOCK_ZERO:
/* return zero */
qemu_iovec_memset(&hd_qiov, 0, 0, sinfo.bytes_avail);
break;
case PAYLOAD_BLOCK_FULLY_PRESENT:
qemu_co_mutex_unlock(&s->lock);
- ret = bdrv_co_readv(bs->file,
+ ret = bdrv_co_readv(bs->file->bs,
sinfo.file_offset >> BDRV_SECTOR_BITS,
sinfo.sectors_avail, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
static int vhdx_allocate_block(BlockDriverState *bs, BDRVVHDXState *s,
uint64_t *new_offset)
{
- *new_offset = bdrv_getlength(bs->file);
+ *new_offset = bdrv_getlength(bs->file->bs);
/* per the spec, the address for a block is in units of 1MB */
*new_offset = ROUND_UP(*new_offset, 1024 * 1024);
- return bdrv_truncate(bs->file, *new_offset + s->block_size);
+ return bdrv_truncate(bs->file->bs, *new_offset + s->block_size);
}
/*
{
/* The BAT entry is a uint64, with 44 bits for the file offset in units of
* 1MB, and 3 bits for the block state. */
- s->bat[sinfo->bat_idx] = sinfo->file_offset;
+ if ((state == PAYLOAD_BLOCK_ZERO) ||
+ (state == PAYLOAD_BLOCK_UNDEFINED) ||
+ (state == PAYLOAD_BLOCK_NOT_PRESENT) ||
+ (state == PAYLOAD_BLOCK_UNMAPPED)) {
+ s->bat[sinfo->bat_idx] = 0; /* For PAYLOAD_BLOCK_ZERO, the
+ FileOffsetMB field is denoted as
+ 'reserved' in the v1.0 spec. If it is
+ non-zero, MS Hyper-V will fail to read
+ the disk image */
+ } else {
+ s->bat[sinfo->bat_idx] = sinfo->file_offset;
+ }
s->bat[sinfo->bat_idx] |= state & VHDX_BAT_STATE_BIT_MASK;
/* Queue another write of zero buffers if the underlying file
* does not zero-fill on file extension */
- if (bdrv_has_zero_init(bs->file) == 0) {
+ if (bdrv_has_zero_init(bs->file->bs) == 0) {
use_zero_buffers = true;
/* zero fill the front, if any */
iov1.iov_base = qemu_blockalign(bs, iov1.iov_len);
memset(iov1.iov_base, 0, iov1.iov_len);
qemu_iovec_concat_iov(&hd_qiov, &iov1, 1, 0,
- sinfo.block_offset);
+ iov1.iov_len);
sectors_to_write += iov1.iov_len >> BDRV_SECTOR_BITS;
}
iov2.iov_base = qemu_blockalign(bs, iov2.iov_len);
memset(iov2.iov_base, 0, iov2.iov_len);
qemu_iovec_concat_iov(&hd_qiov, &iov2, 1, 0,
- sinfo.block_offset);
+ iov2.iov_len);
sectors_to_write += iov2.iov_len >> BDRV_SECTOR_BITS;
}
}
-
/* fall through */
case PAYLOAD_BLOCK_NOT_PRESENT: /* fall through */
- case PAYLOAD_BLOCK_UNMAPPED: /* fall through */
- case PAYLOAD_BLOCK_UNDEFINED: /* fall through */
+ case PAYLOAD_BLOCK_UNMAPPED:
+ case PAYLOAD_BLOCK_UNMAPPED_v095:
+ case PAYLOAD_BLOCK_UNDEFINED:
bat_prior_offset = sinfo.file_offset;
ret = vhdx_allocate_block(bs, s, &sinfo.file_offset);
if (ret < 0) {
}
/* block exists, so we can just overwrite it */
qemu_co_mutex_unlock(&s->lock);
- ret = bdrv_co_writev(bs->file,
+ ret = bdrv_co_writev(bs->file->bs,
sinfo.file_offset >> BDRV_SECTOR_BITS,
sectors_to_write, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
uint32_t offset = 0;
void *buffer = NULL;
void *entry_buffer;
- VHDXMetadataTableHeader *md_table;;
+ VHDXMetadataTableHeader *md_table;
VHDXMetadataTableEntry *md_table_entry;
/* Metadata entries */
log_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_LOG_SIZE, 0);
block_size = qemu_opt_get_size_del(opts, VHDX_BLOCK_OPT_BLOCK_SIZE, 0);
type = qemu_opt_get_del(opts, BLOCK_OPT_SUBFMT);
- use_zero_blocks = qemu_opt_get_bool_del(opts, VHDX_BLOCK_OPT_ZERO, false);
+ use_zero_blocks = qemu_opt_get_bool_del(opts, VHDX_BLOCK_OPT_ZERO, true);
if (image_size > VHDX_MAX_IMAGE_SIZE) {
error_setg_errno(errp, EINVAL, "Image size too large; max of 64TB");
bs = NULL;
ret = bdrv_open(&bs, filename, NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL,
- NULL, &local_err);
+ &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
goto exit;
{
.name = VHDX_BLOCK_OPT_ZERO,
.type = QEMU_OPT_BOOL,
- .help = "Force use of payload blocks of type 'ZERO'. Non-standard."
+ .help = "Force use of payload blocks of type 'ZERO'. "\
+ "Non-standard, but default. Do not set to 'off' when "\
+ "using 'qemu-img convert' with subformat=dynamic."
},
{ NULL }
}
.bdrv_create = vhdx_create,
.bdrv_get_info = vhdx_get_info,
.bdrv_check = vhdx_check,
+ .bdrv_has_zero_init = bdrv_has_zero_init_1,
.create_opts = &vhdx_create_opts,
};