uint32_t l2_cache_counts[L2_CACHE_SIZE];
int64_t cluster_sectors;
+ int64_t next_cluster_sector;
char *type;
} VmdkExtent;
} BDRVVmdkState;
typedef struct VmdkMetaData {
- uint32_t offset;
unsigned int l1_index;
unsigned int l2_index;
unsigned int l2_offset;
return;
}
s->num_extents--;
- s->extents = g_realloc(s->extents, s->num_extents * sizeof(VmdkExtent));
+ s->extents = g_renew(VmdkExtent, s->extents, s->num_extents);
}
static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent)
{
VmdkExtent *extent;
BDRVVmdkState *s = bs->opaque;
+ int64_t nb_sectors;
if (cluster_sectors > 0x200000) {
/* 0x200000 * 512Bytes = 1GB for one cluster is unrealistic */
return -EFBIG;
}
- s->extents = g_realloc(s->extents,
- (s->num_extents + 1) * sizeof(VmdkExtent));
+ nb_sectors = bdrv_nb_sectors(file);
+ if (nb_sectors < 0) {
+ return nb_sectors;
+ }
+
+ s->extents = g_renew(VmdkExtent, s->extents, s->num_extents + 1);
extent = &s->extents[s->num_extents];
s->num_extents++;
extent->l1_entry_sectors = l2_size * cluster_sectors;
extent->l2_size = l2_size;
extent->cluster_sectors = flat ? sectors : cluster_sectors;
+ extent->next_cluster_sector = ROUND_UP(nb_sectors, cluster_sectors);
if (s->num_extents > 1) {
extent->end_sector = (*(extent - 1)).end_sector + extent->sectors;
/* read the L1 table */
l1_size = extent->l1_size * sizeof(uint32_t);
- extent->l1_table = g_malloc(l1_size);
+ extent->l1_table = g_try_malloc(l1_size);
+ if (l1_size && extent->l1_table == NULL) {
+ return -ENOMEM;
+ }
+
ret = bdrv_pread(extent->file,
extent->l1_table_offset,
extent->l1_table,
}
if (extent->l1_backup_table_offset) {
- extent->l1_backup_table = g_malloc(l1_size);
+ extent->l1_backup_table = g_try_malloc(l1_size);
+ if (l1_size && extent->l1_backup_table == NULL) {
+ ret = -ENOMEM;
+ goto fail_l1;
+ }
ret = bdrv_pread(extent->file,
extent->l1_backup_table_offset,
extent->l1_backup_table,
}
extent->l2_cache =
- g_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
+ g_new(uint32_t, extent->l2_size * L2_CACHE_SIZE);
return 0;
fail_l1b:
g_free(extent->l1_backup_table);
if (le32_to_cpu(header.flags) & VMDK4_FLAG_RGD) {
l1_backup_offset = le64_to_cpu(header.rgd_offset) << 9;
}
- if (bdrv_getlength(file) <
- le64_to_cpu(header.grain_offset) * BDRV_SECTOR_SIZE) {
+ if (bdrv_nb_sectors(file) < le64_to_cpu(header.grain_offset)) {
error_setg(errp, "File truncated, expecting at least %" PRId64 " bytes",
(int64_t)(le64_to_cpu(header.grain_offset)
* BDRV_SECTOR_SIZE));
ret = vmdk_add_extent(bs, extent_file, true, sectors,
0, 0, 0, 0, 0, &extent, errp);
if (ret < 0) {
+ bdrv_unref(extent_file);
return ret;
}
extent->flat_start_offset = flat_offset << 9;
} else {
ret = vmdk_open_sparse(bs, extent_file, bs->open_flags, buf, errp);
}
+ g_free(buf);
if (ret) {
- g_free(buf);
bdrv_unref(extent_file);
return ret;
}
extent = &s->extents[s->num_extents - 1];
} else {
error_setg(errp, "Unsupported extent type '%s'", type);
+ bdrv_unref(extent_file);
return -ENOTSUP;
}
extent->type = g_strdup(type);
}
-static int vmdk_refresh_limits(BlockDriverState *bs)
+static void vmdk_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVVmdkState *s = bs->opaque;
int i;
s->extents[i].cluster_sectors);
}
}
-
- return 0;
}
+/**
+ * get_whole_cluster
+ *
+ * Copy backing file's cluster that covers @sector_num, otherwise write zero,
+ * to the cluster at @cluster_sector_num.
+ *
+ * If @skip_start_sector < @skip_end_sector, the relative range
+ * [@skip_start_sector, @skip_end_sector) is not copied or written, and leave
+ * it for call to write user data in the request.
+ */
static int get_whole_cluster(BlockDriverState *bs,
- VmdkExtent *extent,
- uint64_t cluster_offset,
- uint64_t offset,
- bool allocate)
+ VmdkExtent *extent,
+ uint64_t cluster_sector_num,
+ uint64_t sector_num,
+ uint64_t skip_start_sector,
+ uint64_t skip_end_sector)
{
int ret = VMDK_OK;
- uint8_t *whole_grain = NULL;
+ int64_t cluster_bytes;
+ uint8_t *whole_grain;
+
+ /* For COW, align request sector_num to cluster start */
+ sector_num = QEMU_ALIGN_DOWN(sector_num, extent->cluster_sectors);
+ cluster_bytes = extent->cluster_sectors << BDRV_SECTOR_BITS;
+ whole_grain = qemu_blockalign(bs, cluster_bytes);
+
+ if (!bs->backing_hd) {
+ memset(whole_grain, 0, skip_start_sector << BDRV_SECTOR_BITS);
+ memset(whole_grain + (skip_end_sector << BDRV_SECTOR_BITS), 0,
+ cluster_bytes - (skip_end_sector << BDRV_SECTOR_BITS));
+ }
+ assert(skip_end_sector <= extent->cluster_sectors);
/* we will be here if it's first write on non-exist grain(cluster).
* try to read from parent image, if exist */
- if (bs->backing_hd) {
- whole_grain =
- qemu_blockalign(bs, extent->cluster_sectors << BDRV_SECTOR_BITS);
- if (!vmdk_is_cid_valid(bs)) {
- ret = VMDK_ERROR;
- goto exit;
- }
+ if (bs->backing_hd && !vmdk_is_cid_valid(bs)) {
+ ret = VMDK_ERROR;
+ goto exit;
+ }
- /* floor offset to cluster */
- offset -= offset % (extent->cluster_sectors * 512);
- ret = bdrv_read(bs->backing_hd, offset >> 9, whole_grain,
- extent->cluster_sectors);
+ /* Read backing data before skip range */
+ if (skip_start_sector > 0) {
+ if (bs->backing_hd) {
+ ret = bdrv_read(bs->backing_hd, sector_num,
+ whole_grain, skip_start_sector);
+ if (ret < 0) {
+ ret = VMDK_ERROR;
+ goto exit;
+ }
+ }
+ ret = bdrv_write(extent->file, cluster_sector_num, whole_grain,
+ skip_start_sector);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
-
- /* Write grain only into the active image */
- ret = bdrv_write(extent->file, cluster_offset, whole_grain,
- extent->cluster_sectors);
+ }
+ /* Read backing data after skip range */
+ if (skip_end_sector < extent->cluster_sectors) {
+ if (bs->backing_hd) {
+ ret = bdrv_read(bs->backing_hd, sector_num + skip_end_sector,
+ whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
+ extent->cluster_sectors - skip_end_sector);
+ if (ret < 0) {
+ ret = VMDK_ERROR;
+ goto exit;
+ }
+ }
+ ret = bdrv_write(extent->file, cluster_sector_num + skip_end_sector,
+ whole_grain + (skip_end_sector << BDRV_SECTOR_BITS),
+ extent->cluster_sectors - skip_end_sector);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
}
+
exit:
qemu_vfree(whole_grain);
return ret;
}
-static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data)
+static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
+ uint32_t offset)
{
- uint32_t offset;
- QEMU_BUILD_BUG_ON(sizeof(offset) != sizeof(m_data->offset));
- offset = cpu_to_le32(m_data->offset);
+ offset = cpu_to_le32(offset);
/* update L2 table */
if (bdrv_pwrite_sync(
extent->file,
((int64_t)m_data->l2_offset * 512)
- + (m_data->l2_index * sizeof(m_data->offset)),
+ + (m_data->l2_index * sizeof(offset)),
&offset, sizeof(offset)) < 0) {
return VMDK_ERROR;
}
if (bdrv_pwrite_sync(
extent->file,
((int64_t)m_data->l2_offset * 512)
- + (m_data->l2_index * sizeof(m_data->offset)),
+ + (m_data->l2_index * sizeof(offset)),
&offset, sizeof(offset)) < 0) {
return VMDK_ERROR;
}
return VMDK_OK;
}
+/**
+ * get_cluster_offset
+ *
+ * Look up cluster offset in extent file by sector number, and store in
+ * @cluster_offset.
+ *
+ * For flat extents, the start offset as parsed from the description file is
+ * returned.
+ *
+ * For sparse extents, look up in L1, L2 table. If allocate is true, return an
+ * offset for a new cluster and update L2 cache. If there is a backing file,
+ * COW is done before returning; otherwise, zeroes are written to the allocated
+ * cluster. Both COW and zero writing skips the sector range
+ * [@skip_start_sector, @skip_end_sector) passed in by caller, because caller
+ * has new data to write there.
+ *
+ * Returns: VMDK_OK if cluster exists and mapped in the image.
+ * VMDK_UNALLOC if cluster is not mapped and @allocate is false.
+ * VMDK_ERROR if failed.
+ */
static int get_cluster_offset(BlockDriverState *bs,
- VmdkExtent *extent,
- VmdkMetaData *m_data,
- uint64_t offset,
- int allocate,
- uint64_t *cluster_offset)
+ VmdkExtent *extent,
+ VmdkMetaData *m_data,
+ uint64_t offset,
+ bool allocate,
+ uint64_t *cluster_offset,
+ uint64_t skip_start_sector,
+ uint64_t skip_end_sector)
{
unsigned int l1_index, l2_offset, l2_index;
int min_index, i, j;
uint32_t min_count, *l2_table;
bool zeroed = false;
+ int64_t ret;
+ int64_t cluster_sector;
if (m_data) {
m_data->valid = 0;
extent->l2_cache_counts[min_index] = 1;
found:
l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
- *cluster_offset = le32_to_cpu(l2_table[l2_index]);
+ cluster_sector = le32_to_cpu(l2_table[l2_index]);
if (m_data) {
m_data->valid = 1;
m_data->l1_index = l1_index;
m_data->l2_index = l2_index;
- m_data->offset = *cluster_offset;
m_data->l2_offset = l2_offset;
m_data->l2_cache_entry = &l2_table[l2_index];
}
- if (extent->has_zero_grain && *cluster_offset == VMDK_GTE_ZEROED) {
+ if (extent->has_zero_grain && cluster_sector == VMDK_GTE_ZEROED) {
zeroed = true;
}
- if (!*cluster_offset || zeroed) {
+ if (!cluster_sector || zeroed) {
if (!allocate) {
return zeroed ? VMDK_ZEROED : VMDK_UNALLOC;
}
- /* Avoid the L2 tables update for the images that have snapshots. */
- *cluster_offset = bdrv_getlength(extent->file);
- if (!extent->compressed) {
- bdrv_truncate(
- extent->file,
- *cluster_offset + (extent->cluster_sectors << 9)
- );
- }
-
- *cluster_offset >>= 9;
- l2_table[l2_index] = cpu_to_le32(*cluster_offset);
+ cluster_sector = extent->next_cluster_sector;
+ extent->next_cluster_sector += extent->cluster_sectors;
/* First of all we write grain itself, to avoid race condition
* that may to corrupt the image.
* This problem may occur because of insufficient space on host disk
* or inappropriate VM shutdown.
*/
- if (get_whole_cluster(
- bs, extent, *cluster_offset, offset, allocate) == -1) {
- return VMDK_ERROR;
- }
-
- if (m_data) {
- m_data->offset = *cluster_offset;
+ ret = get_whole_cluster(bs, extent,
+ cluster_sector,
+ offset >> BDRV_SECTOR_BITS,
+ skip_start_sector, skip_end_sector);
+ if (ret) {
+ return ret;
}
}
- *cluster_offset <<= 9;
+ *cluster_offset = cluster_sector << BDRV_SECTOR_BITS;
return VMDK_OK;
}
}
qemu_co_mutex_lock(&s->lock);
ret = get_cluster_offset(bs, extent, NULL,
- sector_num * 512, 0, &offset);
+ sector_num * 512, false, &offset,
+ 0, 0);
qemu_co_mutex_unlock(&s->lock);
switch (ret) {
if (!extent) {
return -EIO;
}
- ret = get_cluster_offset(
- bs, extent, NULL,
- sector_num << 9, 0, &cluster_offset);
+ ret = get_cluster_offset(bs, extent, NULL,
+ sector_num << 9, false, &cluster_offset,
+ 0, 0);
extent_begin_sector = extent->end_sector - extent->sectors;
extent_relative_sector_num = sector_num - extent_begin_sector;
index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
if (!extent) {
return -EIO;
}
- ret = get_cluster_offset(
- bs,
- extent,
- &m_data,
- sector_num << 9, !extent->compressed,
- &cluster_offset);
+ extent_begin_sector = extent->end_sector - extent->sectors;
+ extent_relative_sector_num = sector_num - extent_begin_sector;
+ index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
+ n = extent->cluster_sectors - index_in_cluster;
+ if (n > nb_sectors) {
+ n = nb_sectors;
+ }
+ ret = get_cluster_offset(bs, extent, &m_data, sector_num << 9,
+ !(extent->compressed || zeroed),
+ &cluster_offset,
+ index_in_cluster, index_in_cluster + n);
if (extent->compressed) {
if (ret == VMDK_OK) {
/* Refuse write to allocated cluster for streamOptimized */
return -EIO;
} else {
/* allocate */
- ret = get_cluster_offset(
- bs,
- extent,
- &m_data,
- sector_num << 9, 1,
- &cluster_offset);
+ ret = get_cluster_offset(bs, extent, &m_data, sector_num << 9,
+ true, &cluster_offset, 0, 0);
}
}
if (ret == VMDK_ERROR) {
return -EINVAL;
}
- extent_begin_sector = extent->end_sector - extent->sectors;
- extent_relative_sector_num = sector_num - extent_begin_sector;
- index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
- n = extent->cluster_sectors - index_in_cluster;
- if (n > nb_sectors) {
- n = nb_sectors;
- }
if (zeroed) {
/* Do zeroed write, buf is ignored */
if (extent->has_zero_grain &&
n >= extent->cluster_sectors) {
n = extent->cluster_sectors;
if (!zero_dry_run) {
- m_data.offset = VMDK_GTE_ZEROED;
/* update L2 tables */
- if (vmdk_L2update(extent, &m_data) != VMDK_OK) {
+ if (vmdk_L2update(extent, &m_data, VMDK_GTE_ZEROED)
+ != VMDK_OK) {
return -EIO;
}
}
}
if (m_data.valid) {
/* update L2 tables */
- if (vmdk_L2update(extent, &m_data) != VMDK_OK) {
+ if (vmdk_L2update(extent, &m_data,
+ cluster_offset >> BDRV_SECTOR_BITS)
+ != VMDK_OK) {
return -EIO;
}
}
static int vmdk_create_extent(const char *filename, int64_t filesize,
bool flat, bool compress, bool zeroed_grain,
- Error **errp)
+ QemuOpts *opts, Error **errp)
{
int ret, i;
BlockDriverState *bs = NULL;
uint32_t *gd_buf = NULL;
int gd_buf_size;
- ret = bdrv_create_file(filename, NULL, &local_err);
+ ret = bdrv_create_file(filename, opts, &local_err);
if (ret < 0) {
error_propagate(errp, local_err);
goto exit;
goto exit;
}
/* Read out options */
- total_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
+ total_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
+ BDRV_SECTOR_SIZE);
adapter_type = qemu_opt_get_del(opts, BLOCK_OPT_ADAPTER_TYPE);
backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
if (qemu_opt_get_bool_del(opts, BLOCK_OPT_COMPAT6, false)) {
path, desc_filename);
if (vmdk_create_extent(ext_filename, size,
- flat, compress, zeroed_grain, errp)) {
+ flat, compress, zeroed_grain, opts, errp)) {
ret = -EINVAL;
goto exit;
}
BDRVVmdkState *s = bs->opaque;
VmdkExtent *extent = NULL;
int64_t sector_num = 0;
- int64_t total_sectors = bdrv_getlength(bs) / BDRV_SECTOR_SIZE;
+ int64_t total_sectors = bdrv_nb_sectors(bs);
int ret;
uint64_t cluster_offset;
}
ret = get_cluster_offset(bs, extent, NULL,
sector_num << BDRV_SECTOR_BITS,
- 0, &cluster_offset);
+ false, &cluster_offset, 0, 0);
if (ret == VMDK_ERROR) {
fprintf(stderr,
"ERROR: could not get cluster_offset for sector %"