2 * Block driver for the VMDK format
4 * Copyright (c) 2004 Fabrice Bellard
5 * Copyright (c) 2005 Filip Navara
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu-common.h"
27 #include "block/block_int.h"
28 #include "qemu/module.h"
29 #include "migration/migration.h"
32 #define VMDK3_MAGIC (('C' << 24) | ('O' << 16) | ('W' << 8) | 'D')
33 #define VMDK4_MAGIC (('K' << 24) | ('D' << 16) | ('M' << 8) | 'V')
34 #define VMDK4_COMPRESSION_DEFLATE 1
35 #define VMDK4_FLAG_RGD (1 << 1)
36 #define VMDK4_FLAG_COMPRESS (1 << 16)
37 #define VMDK4_FLAG_MARKER (1 << 17)
38 #define VMDK4_GD_AT_END 0xffffffffffffffffULL
41 /* VMDK internal error codes */
43 #define VMDK_ERROR (-1)
44 /* Cluster not allocated */
45 #define VMDK_UNALLOC (-2)
46 #define VMDK_ZEROED (-3)
51 uint32_t disk_sectors;
53 uint32_t l1dir_offset;
55 uint32_t file_sectors;
58 uint32_t sectors_per_track;
68 int32_t num_gtes_per_gte;
74 uint16_t compressAlgorithm;
75 } QEMU_PACKED VMDK4Header;
77 #define L2_CACHE_SIZE 16
79 typedef struct VmdkExtent {
80 BlockDriverState *file;
86 int64_t flat_start_offset;
87 int64_t l1_table_offset;
88 int64_t l1_backup_table_offset;
90 uint32_t *l1_backup_table;
92 uint32_t l1_entry_sectors;
96 uint32_t l2_cache_offsets[L2_CACHE_SIZE];
97 uint32_t l2_cache_counts[L2_CACHE_SIZE];
99 unsigned int cluster_sectors;
102 typedef struct BDRVVmdkState {
108 /* Extent array with num_extents entries, ascend ordered by address */
110 Error *migration_blocker;
113 typedef struct VmdkMetaData {
115 unsigned int l1_index;
116 unsigned int l2_index;
117 unsigned int l2_offset;
121 typedef struct VmdkGrainMarker {
128 MARKER_END_OF_STREAM = 0,
129 MARKER_GRAIN_TABLE = 1,
130 MARKER_GRAIN_DIRECTORY = 2,
134 static int vmdk_probe(const uint8_t *buf, int buf_size, const char *filename)
141 magic = be32_to_cpu(*(uint32_t *)buf);
142 if (magic == VMDK3_MAGIC ||
143 magic == VMDK4_MAGIC) {
146 const char *p = (const char *)buf;
147 const char *end = p + buf_size;
150 /* skip comment line */
151 while (p < end && *p != '\n') {
158 while (p < end && *p == ' ') {
161 /* skip '\r' if windows line endings used. */
162 if (p < end && *p == '\r') {
165 /* only accept blank lines before 'version=' line */
166 if (p == end || *p != '\n') {
172 if (end - p >= strlen("version=X\n")) {
173 if (strncmp("version=1\n", p, strlen("version=1\n")) == 0 ||
174 strncmp("version=2\n", p, strlen("version=2\n")) == 0) {
178 if (end - p >= strlen("version=X\r\n")) {
179 if (strncmp("version=1\r\n", p, strlen("version=1\r\n")) == 0 ||
180 strncmp("version=2\r\n", p, strlen("version=2\r\n")) == 0) {
192 #define SECTOR_SIZE 512
193 #define DESC_SIZE (20 * SECTOR_SIZE) /* 20 sectors of 512 bytes each */
194 #define BUF_SIZE 4096
195 #define HEADER_SIZE 512 /* first sector of 512 bytes */
197 static void vmdk_free_extents(BlockDriverState *bs)
200 BDRVVmdkState *s = bs->opaque;
203 for (i = 0; i < s->num_extents; i++) {
207 g_free(e->l1_backup_table);
208 if (e->file != bs->file) {
209 bdrv_delete(e->file);
215 static void vmdk_free_last_extent(BlockDriverState *bs)
217 BDRVVmdkState *s = bs->opaque;
219 if (s->num_extents == 0) {
223 s->extents = g_realloc(s->extents, s->num_extents * sizeof(VmdkExtent));
226 static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent)
228 char desc[DESC_SIZE];
229 uint32_t cid = 0xffffffff;
230 const char *p_name, *cid_str;
232 BDRVVmdkState *s = bs->opaque;
235 ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE);
241 cid_str = "parentCID";
242 cid_str_size = sizeof("parentCID");
245 cid_str_size = sizeof("CID");
248 desc[DESC_SIZE - 1] = '\0';
249 p_name = strstr(desc, cid_str);
250 if (p_name != NULL) {
251 p_name += cid_str_size;
252 sscanf(p_name, "%x", &cid);
258 static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
260 char desc[DESC_SIZE], tmp_desc[DESC_SIZE];
261 char *p_name, *tmp_str;
262 BDRVVmdkState *s = bs->opaque;
265 ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE);
270 desc[DESC_SIZE - 1] = '\0';
271 tmp_str = strstr(desc, "parentCID");
272 if (tmp_str == NULL) {
276 pstrcpy(tmp_desc, sizeof(tmp_desc), tmp_str);
277 p_name = strstr(desc, "CID");
278 if (p_name != NULL) {
279 p_name += sizeof("CID");
280 snprintf(p_name, sizeof(desc) - (p_name - desc), "%x\n", cid);
281 pstrcat(desc, sizeof(desc), tmp_desc);
284 ret = bdrv_pwrite_sync(bs->file, s->desc_offset, desc, DESC_SIZE);
292 static int vmdk_is_cid_valid(BlockDriverState *bs)
295 BDRVVmdkState *s = bs->opaque;
296 BlockDriverState *p_bs = bs->backing_hd;
300 cur_pcid = vmdk_read_cid(p_bs, 0);
301 if (s->parent_cid != cur_pcid) {
311 /* Queue extents, if any, for reopen() */
312 static int vmdk_reopen_prepare(BDRVReopenState *state,
313 BlockReopenQueue *queue, Error **errp)
320 assert(state != NULL);
321 assert(state->bs != NULL);
324 error_set(errp, ERROR_CLASS_GENERIC_ERROR,
325 "No reopen queue for VMDK extents");
329 s = state->bs->opaque;
333 for (i = 0; i < s->num_extents; i++) {
335 if (e->file != state->bs->file) {
336 bdrv_reopen_queue(queue, e->file, state->flags);
345 static int vmdk_parent_open(BlockDriverState *bs)
348 char desc[DESC_SIZE + 1];
349 BDRVVmdkState *s = bs->opaque;
352 desc[DESC_SIZE] = '\0';
353 ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE);
358 p_name = strstr(desc, "parentFileNameHint");
359 if (p_name != NULL) {
362 p_name += sizeof("parentFileNameHint") + 1;
363 end_name = strchr(p_name, '\"');
364 if (end_name == NULL) {
367 if ((end_name - p_name) > sizeof(bs->backing_file) - 1) {
371 pstrcpy(bs->backing_file, end_name - p_name + 1, p_name);
377 /* Create and append extent to the extent array. Return the added VmdkExtent
378 * address. return NULL if allocation failed. */
379 static VmdkExtent *vmdk_add_extent(BlockDriverState *bs,
380 BlockDriverState *file, bool flat, int64_t sectors,
381 int64_t l1_offset, int64_t l1_backup_offset,
383 int l2_size, unsigned int cluster_sectors)
386 BDRVVmdkState *s = bs->opaque;
388 s->extents = g_realloc(s->extents,
389 (s->num_extents + 1) * sizeof(VmdkExtent));
390 extent = &s->extents[s->num_extents];
393 memset(extent, 0, sizeof(VmdkExtent));
396 extent->sectors = sectors;
397 extent->l1_table_offset = l1_offset;
398 extent->l1_backup_table_offset = l1_backup_offset;
399 extent->l1_size = l1_size;
400 extent->l1_entry_sectors = l2_size * cluster_sectors;
401 extent->l2_size = l2_size;
402 extent->cluster_sectors = cluster_sectors;
404 if (s->num_extents > 1) {
405 extent->end_sector = (*(extent - 1)).end_sector + extent->sectors;
407 extent->end_sector = extent->sectors;
409 bs->total_sectors = extent->end_sector;
413 static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent)
418 /* read the L1 table */
419 l1_size = extent->l1_size * sizeof(uint32_t);
420 extent->l1_table = g_malloc(l1_size);
421 ret = bdrv_pread(extent->file,
422 extent->l1_table_offset,
428 for (i = 0; i < extent->l1_size; i++) {
429 le32_to_cpus(&extent->l1_table[i]);
432 if (extent->l1_backup_table_offset) {
433 extent->l1_backup_table = g_malloc(l1_size);
434 ret = bdrv_pread(extent->file,
435 extent->l1_backup_table_offset,
436 extent->l1_backup_table,
441 for (i = 0; i < extent->l1_size; i++) {
442 le32_to_cpus(&extent->l1_backup_table[i]);
447 g_malloc(extent->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
450 g_free(extent->l1_backup_table);
452 g_free(extent->l1_table);
456 static int vmdk_open_vmdk3(BlockDriverState *bs,
457 BlockDriverState *file,
465 ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header));
469 extent = vmdk_add_extent(bs,
471 le32_to_cpu(header.disk_sectors),
472 le32_to_cpu(header.l1dir_offset) << 9,
474 le32_to_cpu(header.granularity));
475 ret = vmdk_init_tables(bs, extent);
477 /* free extent allocated by vmdk_add_extent */
478 vmdk_free_last_extent(bs);
483 static int vmdk_open_desc_file(BlockDriverState *bs, int flags,
484 int64_t desc_offset);
486 static int vmdk_open_vmdk4(BlockDriverState *bs,
487 BlockDriverState *file,
492 uint32_t l1_size, l1_entry_sectors;
495 int64_t l1_backup_offset = 0;
497 ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header));
501 if (header.capacity == 0 && header.desc_offset) {
502 return vmdk_open_desc_file(bs, flags, header.desc_offset << 9);
505 if (le64_to_cpu(header.gd_offset) == VMDK4_GD_AT_END) {
507 * The footer takes precedence over the header, so read it in. The
508 * footer starts at offset -1024 from the end: One sector for the
509 * footer, and another one for the end-of-stream marker.
516 uint8_t pad[512 - 16];
517 } QEMU_PACKED footer_marker;
521 uint8_t pad[512 - 4 - sizeof(VMDK4Header)];
527 uint8_t pad[512 - 16];
528 } QEMU_PACKED eos_marker;
529 } QEMU_PACKED footer;
531 ret = bdrv_pread(file,
532 bs->file->total_sectors * 512 - 1536,
533 &footer, sizeof(footer));
538 /* Some sanity checks for the footer */
539 if (be32_to_cpu(footer.magic) != VMDK4_MAGIC ||
540 le32_to_cpu(footer.footer_marker.size) != 0 ||
541 le32_to_cpu(footer.footer_marker.type) != MARKER_FOOTER ||
542 le64_to_cpu(footer.eos_marker.val) != 0 ||
543 le32_to_cpu(footer.eos_marker.size) != 0 ||
544 le32_to_cpu(footer.eos_marker.type) != MARKER_END_OF_STREAM)
549 header = footer.header;
552 l1_entry_sectors = le32_to_cpu(header.num_gtes_per_gte)
553 * le64_to_cpu(header.granularity);
554 if (l1_entry_sectors == 0) {
557 l1_size = (le64_to_cpu(header.capacity) + l1_entry_sectors - 1)
559 if (le32_to_cpu(header.flags) & VMDK4_FLAG_RGD) {
560 l1_backup_offset = le64_to_cpu(header.rgd_offset) << 9;
562 extent = vmdk_add_extent(bs, file, false,
563 le64_to_cpu(header.capacity),
564 le64_to_cpu(header.gd_offset) << 9,
567 le32_to_cpu(header.num_gtes_per_gte),
568 le64_to_cpu(header.granularity));
570 le16_to_cpu(header.compressAlgorithm) == VMDK4_COMPRESSION_DEFLATE;
571 extent->has_marker = le32_to_cpu(header.flags) & VMDK4_FLAG_MARKER;
572 ret = vmdk_init_tables(bs, extent);
574 /* free extent allocated by vmdk_add_extent */
575 vmdk_free_last_extent(bs);
580 /* find an option value out of descriptor file */
581 static int vmdk_parse_description(const char *desc, const char *opt_name,
582 char *buf, int buf_size)
584 char *opt_pos, *opt_end;
585 const char *end = desc + strlen(desc);
587 opt_pos = strstr(desc, opt_name);
591 /* Skip "=\"" following opt_name */
592 opt_pos += strlen(opt_name) + 2;
593 if (opt_pos >= end) {
597 while (opt_end < end && *opt_end != '"') {
600 if (opt_end == end || buf_size < opt_end - opt_pos + 1) {
603 pstrcpy(buf, opt_end - opt_pos + 1, opt_pos);
607 /* Open an extent file and append to bs array */
608 static int vmdk_open_sparse(BlockDriverState *bs,
609 BlockDriverState *file,
614 if (bdrv_pread(file, 0, &magic, sizeof(magic)) != sizeof(magic)) {
618 magic = be32_to_cpu(magic);
621 return vmdk_open_vmdk3(bs, file, flags);
624 return vmdk_open_vmdk4(bs, file, flags);
632 static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
633 const char *desc_file_path)
639 const char *p = desc;
642 char extent_path[PATH_MAX];
643 BlockDriverState *extent_file;
646 /* parse extent line:
647 * RW [size in sectors] FLAT "file-name.vmdk" OFFSET
649 * RW [size in sectors] SPARSE "file-name.vmdk"
652 ret = sscanf(p, "%10s %" SCNd64 " %10s \"%511[^\n\r\"]\" %" SCNd64,
653 access, §ors, type, fname, &flat_offset);
654 if (ret < 4 || strcmp(access, "RW")) {
656 } else if (!strcmp(type, "FLAT")) {
657 if (ret != 5 || flat_offset < 0) {
660 } else if (ret != 4) {
665 (strcmp(type, "FLAT") && strcmp(type, "SPARSE")) ||
666 (strcmp(access, "RW"))) {
670 path_combine(extent_path, sizeof(extent_path),
671 desc_file_path, fname);
672 ret = bdrv_file_open(&extent_file, extent_path, NULL, bs->open_flags);
677 /* save to extents array */
678 if (!strcmp(type, "FLAT")) {
682 extent = vmdk_add_extent(bs, extent_file, true, sectors,
683 0, 0, 0, 0, sectors);
684 extent->flat_start_offset = flat_offset << 9;
685 } else if (!strcmp(type, "SPARSE")) {
687 ret = vmdk_open_sparse(bs, extent_file, bs->open_flags);
689 bdrv_delete(extent_file);
694 "VMDK: Not supported extent type \"%s\""".\n", type);
698 /* move to next line */
699 while (*p && *p != '\n') {
707 static int vmdk_open_desc_file(BlockDriverState *bs, int flags,
713 BDRVVmdkState *s = bs->opaque;
715 ret = bdrv_pread(bs->file, desc_offset, buf, sizeof(buf));
720 if (vmdk_parse_description(buf, "createType", ct, sizeof(ct))) {
723 if (strcmp(ct, "monolithicFlat") &&
724 strcmp(ct, "twoGbMaxExtentSparse") &&
725 strcmp(ct, "twoGbMaxExtentFlat")) {
727 "VMDK: Not supported image type \"%s\""".\n", ct);
731 return vmdk_parse_extents(buf, bs, bs->file->filename);
734 static int vmdk_open(BlockDriverState *bs, QDict *options, int flags)
737 BDRVVmdkState *s = bs->opaque;
739 if (vmdk_open_sparse(bs, bs->file, flags) == 0) {
740 s->desc_offset = 0x200;
742 ret = vmdk_open_desc_file(bs, flags, 0);
747 /* try to open parent images, if exist */
748 ret = vmdk_parent_open(bs);
752 s->parent_cid = vmdk_read_cid(bs, 1);
753 qemu_co_mutex_init(&s->lock);
755 /* Disable migration when VMDK images are used */
756 error_set(&s->migration_blocker,
757 QERR_BLOCK_FORMAT_FEATURE_NOT_SUPPORTED,
758 "vmdk", bs->device_name, "live migration");
759 migrate_add_blocker(s->migration_blocker);
764 vmdk_free_extents(bs);
768 static int get_whole_cluster(BlockDriverState *bs,
770 uint64_t cluster_offset,
774 /* 128 sectors * 512 bytes each = grain size 64KB */
775 uint8_t whole_grain[extent->cluster_sectors * 512];
777 /* we will be here if it's first write on non-exist grain(cluster).
778 * try to read from parent image, if exist */
779 if (bs->backing_hd) {
782 if (!vmdk_is_cid_valid(bs)) {
786 /* floor offset to cluster */
787 offset -= offset % (extent->cluster_sectors * 512);
788 ret = bdrv_read(bs->backing_hd, offset >> 9, whole_grain,
789 extent->cluster_sectors);
794 /* Write grain only into the active image */
795 ret = bdrv_write(extent->file, cluster_offset, whole_grain,
796 extent->cluster_sectors);
804 static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data)
806 /* update L2 table */
807 if (bdrv_pwrite_sync(
809 ((int64_t)m_data->l2_offset * 512)
810 + (m_data->l2_index * sizeof(m_data->offset)),
812 sizeof(m_data->offset)
816 /* update backup L2 table */
817 if (extent->l1_backup_table_offset != 0) {
818 m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
819 if (bdrv_pwrite_sync(
821 ((int64_t)m_data->l2_offset * 512)
822 + (m_data->l2_index * sizeof(m_data->offset)),
823 &(m_data->offset), sizeof(m_data->offset)
832 static int get_cluster_offset(BlockDriverState *bs,
834 VmdkMetaData *m_data,
837 uint64_t *cluster_offset)
839 unsigned int l1_index, l2_offset, l2_index;
841 uint32_t min_count, *l2_table, tmp = 0;
847 *cluster_offset = extent->flat_start_offset;
851 offset -= (extent->end_sector - extent->sectors) * SECTOR_SIZE;
852 l1_index = (offset >> 9) / extent->l1_entry_sectors;
853 if (l1_index >= extent->l1_size) {
856 l2_offset = extent->l1_table[l1_index];
860 for (i = 0; i < L2_CACHE_SIZE; i++) {
861 if (l2_offset == extent->l2_cache_offsets[i]) {
862 /* increment the hit count */
863 if (++extent->l2_cache_counts[i] == 0xffffffff) {
864 for (j = 0; j < L2_CACHE_SIZE; j++) {
865 extent->l2_cache_counts[j] >>= 1;
868 l2_table = extent->l2_cache + (i * extent->l2_size);
872 /* not found: load a new entry in the least used one */
874 min_count = 0xffffffff;
875 for (i = 0; i < L2_CACHE_SIZE; i++) {
876 if (extent->l2_cache_counts[i] < min_count) {
877 min_count = extent->l2_cache_counts[i];
881 l2_table = extent->l2_cache + (min_index * extent->l2_size);
884 (int64_t)l2_offset * 512,
886 extent->l2_size * sizeof(uint32_t)
887 ) != extent->l2_size * sizeof(uint32_t)) {
891 extent->l2_cache_offsets[min_index] = l2_offset;
892 extent->l2_cache_counts[min_index] = 1;
894 l2_index = ((offset >> 9) / extent->cluster_sectors) % extent->l2_size;
895 *cluster_offset = le32_to_cpu(l2_table[l2_index]);
897 if (!*cluster_offset) {
902 /* Avoid the L2 tables update for the images that have snapshots. */
903 *cluster_offset = bdrv_getlength(extent->file);
904 if (!extent->compressed) {
907 *cluster_offset + (extent->cluster_sectors << 9)
911 *cluster_offset >>= 9;
912 tmp = cpu_to_le32(*cluster_offset);
913 l2_table[l2_index] = tmp;
915 /* First of all we write grain itself, to avoid race condition
916 * that may to corrupt the image.
917 * This problem may occur because of insufficient space on host disk
918 * or inappropriate VM shutdown.
920 if (get_whole_cluster(
921 bs, extent, *cluster_offset, offset, allocate) == -1) {
926 m_data->offset = tmp;
927 m_data->l1_index = l1_index;
928 m_data->l2_index = l2_index;
929 m_data->l2_offset = l2_offset;
933 *cluster_offset <<= 9;
937 static VmdkExtent *find_extent(BDRVVmdkState *s,
938 int64_t sector_num, VmdkExtent *start_hint)
940 VmdkExtent *extent = start_hint;
943 extent = &s->extents[0];
945 while (extent < &s->extents[s->num_extents]) {
946 if (sector_num < extent->end_sector) {
954 static int coroutine_fn vmdk_co_is_allocated(BlockDriverState *bs,
955 int64_t sector_num, int nb_sectors, int *pnum)
957 BDRVVmdkState *s = bs->opaque;
958 int64_t index_in_cluster, n, ret;
962 extent = find_extent(s, sector_num, NULL);
966 qemu_co_mutex_lock(&s->lock);
967 ret = get_cluster_offset(bs, extent, NULL,
968 sector_num * 512, 0, &offset);
969 qemu_co_mutex_unlock(&s->lock);
970 /* get_cluster_offset returning 0 means success */
973 index_in_cluster = sector_num % extent->cluster_sectors;
974 n = extent->cluster_sectors - index_in_cluster;
975 if (n > nb_sectors) {
982 static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
983 int64_t offset_in_cluster, const uint8_t *buf,
984 int nb_sectors, int64_t sector_num)
987 VmdkGrainMarker *data = NULL;
989 const uint8_t *write_buf = buf;
990 int write_len = nb_sectors * 512;
992 if (extent->compressed) {
993 if (!extent->has_marker) {
997 buf_len = (extent->cluster_sectors << 9) * 2;
998 data = g_malloc(buf_len + sizeof(VmdkGrainMarker));
999 if (compress(data->data, &buf_len, buf, nb_sectors << 9) != Z_OK ||
1004 data->lba = sector_num;
1005 data->size = buf_len;
1006 write_buf = (uint8_t *)data;
1007 write_len = buf_len + sizeof(VmdkGrainMarker);
1009 ret = bdrv_pwrite(extent->file,
1010 cluster_offset + offset_in_cluster,
1013 if (ret != write_len) {
1014 ret = ret < 0 ? ret : -EIO;
1023 static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
1024 int64_t offset_in_cluster, uint8_t *buf,
1028 int cluster_bytes, buf_bytes;
1029 uint8_t *cluster_buf, *compressed_data;
1030 uint8_t *uncomp_buf;
1032 VmdkGrainMarker *marker;
1036 if (!extent->compressed) {
1037 ret = bdrv_pread(extent->file,
1038 cluster_offset + offset_in_cluster,
1039 buf, nb_sectors * 512);
1040 if (ret == nb_sectors * 512) {
1046 cluster_bytes = extent->cluster_sectors * 512;
1047 /* Read two clusters in case GrainMarker + compressed data > one cluster */
1048 buf_bytes = cluster_bytes * 2;
1049 cluster_buf = g_malloc(buf_bytes);
1050 uncomp_buf = g_malloc(cluster_bytes);
1051 ret = bdrv_pread(extent->file,
1053 cluster_buf, buf_bytes);
1057 compressed_data = cluster_buf;
1058 buf_len = cluster_bytes;
1059 data_len = cluster_bytes;
1060 if (extent->has_marker) {
1061 marker = (VmdkGrainMarker *)cluster_buf;
1062 compressed_data = marker->data;
1063 data_len = le32_to_cpu(marker->size);
1065 if (!data_len || data_len > buf_bytes) {
1069 ret = uncompress(uncomp_buf, &buf_len, compressed_data, data_len);
1075 if (offset_in_cluster < 0 ||
1076 offset_in_cluster + nb_sectors * 512 > buf_len) {
1080 memcpy(buf, uncomp_buf + offset_in_cluster, nb_sectors * 512);
1085 g_free(cluster_buf);
1089 static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
1090 uint8_t *buf, int nb_sectors)
1092 BDRVVmdkState *s = bs->opaque;
1094 uint64_t n, index_in_cluster;
1095 uint64_t extent_begin_sector, extent_relative_sector_num;
1096 VmdkExtent *extent = NULL;
1097 uint64_t cluster_offset;
1099 while (nb_sectors > 0) {
1100 extent = find_extent(s, sector_num, extent);
1104 ret = get_cluster_offset(
1106 sector_num << 9, 0, &cluster_offset);
1107 extent_begin_sector = extent->end_sector - extent->sectors;
1108 extent_relative_sector_num = sector_num - extent_begin_sector;
1109 index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
1110 n = extent->cluster_sectors - index_in_cluster;
1111 if (n > nb_sectors) {
1115 /* if not allocated, try to read from parent image, if exist */
1116 if (bs->backing_hd) {
1117 if (!vmdk_is_cid_valid(bs)) {
1120 ret = bdrv_read(bs->backing_hd, sector_num, buf, n);
1125 memset(buf, 0, 512 * n);
1128 ret = vmdk_read_extent(extent,
1129 cluster_offset, index_in_cluster * 512,
1142 static coroutine_fn int vmdk_co_read(BlockDriverState *bs, int64_t sector_num,
1143 uint8_t *buf, int nb_sectors)
1146 BDRVVmdkState *s = bs->opaque;
1147 qemu_co_mutex_lock(&s->lock);
1148 ret = vmdk_read(bs, sector_num, buf, nb_sectors);
1149 qemu_co_mutex_unlock(&s->lock);
1153 static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
1154 const uint8_t *buf, int nb_sectors)
1156 BDRVVmdkState *s = bs->opaque;
1157 VmdkExtent *extent = NULL;
1159 int64_t index_in_cluster;
1160 uint64_t extent_begin_sector, extent_relative_sector_num;
1161 uint64_t cluster_offset;
1162 VmdkMetaData m_data;
1164 if (sector_num > bs->total_sectors) {
1166 "(VMDK) Wrong offset: sector_num=0x%" PRIx64
1167 " total_sectors=0x%" PRIx64 "\n",
1168 sector_num, bs->total_sectors);
1172 while (nb_sectors > 0) {
1173 extent = find_extent(s, sector_num, extent);
1177 ret = get_cluster_offset(
1181 sector_num << 9, !extent->compressed,
1183 if (extent->compressed) {
1184 if (ret == VMDK_OK) {
1185 /* Refuse write to allocated cluster for streamOptimized */
1187 "VMDK: can't write to allocated cluster"
1188 " for streamOptimized\n");
1192 ret = get_cluster_offset(
1203 extent_begin_sector = extent->end_sector - extent->sectors;
1204 extent_relative_sector_num = sector_num - extent_begin_sector;
1205 index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
1206 n = extent->cluster_sectors - index_in_cluster;
1207 if (n > nb_sectors) {
1211 ret = vmdk_write_extent(extent,
1212 cluster_offset, index_in_cluster * 512,
1213 buf, n, sector_num);
1218 /* update L2 tables */
1219 if (vmdk_L2update(extent, &m_data) == -1) {
1227 /* update CID on the first write every time the virtual disk is
1229 if (!s->cid_updated) {
1230 ret = vmdk_write_cid(bs, time(NULL));
1234 s->cid_updated = true;
1240 static coroutine_fn int vmdk_co_write(BlockDriverState *bs, int64_t sector_num,
1241 const uint8_t *buf, int nb_sectors)
1244 BDRVVmdkState *s = bs->opaque;
1245 qemu_co_mutex_lock(&s->lock);
1246 ret = vmdk_write(bs, sector_num, buf, nb_sectors);
1247 qemu_co_mutex_unlock(&s->lock);
1252 static int vmdk_create_extent(const char *filename, int64_t filesize,
1253 bool flat, bool compress)
1258 uint32_t tmp, magic, grains, gd_size, gt_size, gt_count;
1260 fd = qemu_open(filename,
1261 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
1267 ret = ftruncate(fd, filesize);
1273 magic = cpu_to_be32(VMDK4_MAGIC);
1274 memset(&header, 0, sizeof(header));
1277 3 | (compress ? VMDK4_FLAG_COMPRESS | VMDK4_FLAG_MARKER : 0);
1278 header.compressAlgorithm = compress ? VMDK4_COMPRESSION_DEFLATE : 0;
1279 header.capacity = filesize / 512;
1280 header.granularity = 128;
1281 header.num_gtes_per_gte = 512;
1283 grains = (filesize / 512 + header.granularity - 1) / header.granularity;
1284 gt_size = ((header.num_gtes_per_gte * sizeof(uint32_t)) + 511) >> 9;
1286 (grains + header.num_gtes_per_gte - 1) / header.num_gtes_per_gte;
1287 gd_size = (gt_count * sizeof(uint32_t) + 511) >> 9;
1289 header.desc_offset = 1;
1290 header.desc_size = 20;
1291 header.rgd_offset = header.desc_offset + header.desc_size;
1292 header.gd_offset = header.rgd_offset + gd_size + (gt_size * gt_count);
1293 header.grain_offset =
1294 ((header.gd_offset + gd_size + (gt_size * gt_count) +
1295 header.granularity - 1) / header.granularity) *
1297 /* swap endianness for all header fields */
1298 header.version = cpu_to_le32(header.version);
1299 header.flags = cpu_to_le32(header.flags);
1300 header.capacity = cpu_to_le64(header.capacity);
1301 header.granularity = cpu_to_le64(header.granularity);
1302 header.num_gtes_per_gte = cpu_to_le32(header.num_gtes_per_gte);
1303 header.desc_offset = cpu_to_le64(header.desc_offset);
1304 header.desc_size = cpu_to_le64(header.desc_size);
1305 header.rgd_offset = cpu_to_le64(header.rgd_offset);
1306 header.gd_offset = cpu_to_le64(header.gd_offset);
1307 header.grain_offset = cpu_to_le64(header.grain_offset);
1308 header.compressAlgorithm = cpu_to_le16(header.compressAlgorithm);
1310 header.check_bytes[0] = 0xa;
1311 header.check_bytes[1] = 0x20;
1312 header.check_bytes[2] = 0xd;
1313 header.check_bytes[3] = 0xa;
1315 /* write all the data */
1316 ret = qemu_write_full(fd, &magic, sizeof(magic));
1317 if (ret != sizeof(magic)) {
1321 ret = qemu_write_full(fd, &header, sizeof(header));
1322 if (ret != sizeof(header)) {
1327 ret = ftruncate(fd, le64_to_cpu(header.grain_offset) << 9);
1333 /* write grain directory */
1334 lseek(fd, le64_to_cpu(header.rgd_offset) << 9, SEEK_SET);
1335 for (i = 0, tmp = le64_to_cpu(header.rgd_offset) + gd_size;
1336 i < gt_count; i++, tmp += gt_size) {
1337 ret = qemu_write_full(fd, &tmp, sizeof(tmp));
1338 if (ret != sizeof(tmp)) {
1344 /* write backup grain directory */
1345 lseek(fd, le64_to_cpu(header.gd_offset) << 9, SEEK_SET);
1346 for (i = 0, tmp = le64_to_cpu(header.gd_offset) + gd_size;
1347 i < gt_count; i++, tmp += gt_size) {
1348 ret = qemu_write_full(fd, &tmp, sizeof(tmp));
1349 if (ret != sizeof(tmp)) {
1361 static int filename_decompose(const char *filename, char *path, char *prefix,
1362 char *postfix, size_t buf_len)
1366 if (filename == NULL || !strlen(filename)) {
1367 fprintf(stderr, "Vmdk: no filename provided.\n");
1370 p = strrchr(filename, '/');
1372 p = strrchr(filename, '\\');
1375 p = strrchr(filename, ':');
1379 if (p - filename >= buf_len) {
1382 pstrcpy(path, p - filename + 1, filename);
1387 q = strrchr(p, '.');
1389 pstrcpy(prefix, buf_len, p);
1392 if (q - p >= buf_len) {
1395 pstrcpy(prefix, q - p + 1, p);
1396 pstrcpy(postfix, buf_len, q);
1401 static int relative_path(char *dest, int dest_size,
1402 const char *base, const char *target)
1408 const char *sep = "\\";
1410 const char *sep = "/";
1413 if (!(dest && base && target)) {
1416 if (path_is_absolute(target)) {
1417 pstrcpy(dest, dest_size, target);
1420 while (base[i] == target[i]) {
1433 pstrcat(dest, dest_size, "..");
1434 pstrcat(dest, dest_size, sep);
1436 pstrcat(dest, dest_size, q);
1440 static int vmdk_create(const char *filename, QEMUOptionParameter *options)
1443 char desc[BUF_SIZE];
1444 int64_t total_size = 0, filesize;
1445 const char *adapter_type = NULL;
1446 const char *backing_file = NULL;
1447 const char *fmt = NULL;
1450 bool flat, split, compress;
1451 char ext_desc_lines[BUF_SIZE] = "";
1452 char path[PATH_MAX], prefix[PATH_MAX], postfix[PATH_MAX];
1453 const int64_t split_size = 0x80000000; /* VMDK has constant split size */
1454 const char *desc_extent_line;
1455 char parent_desc_line[BUF_SIZE] = "";
1456 uint32_t parent_cid = 0xffffffff;
1457 uint32_t number_heads = 16;
1458 const char desc_template[] =
1459 "# Disk DescriptorFile\n"
1463 "createType=\"%s\"\n"
1466 "# Extent description\n"
1469 "# The Disk Data Base\n"
1472 "ddb.virtualHWVersion = \"%d\"\n"
1473 "ddb.geometry.cylinders = \"%" PRId64 "\"\n"
1474 "ddb.geometry.heads = \"%d\"\n"
1475 "ddb.geometry.sectors = \"63\"\n"
1476 "ddb.adapterType = \"%s\"\n";
1478 if (filename_decompose(filename, path, prefix, postfix, PATH_MAX)) {
1481 /* Read out options */
1482 while (options && options->name) {
1483 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
1484 total_size = options->value.n;
1485 } else if (!strcmp(options->name, BLOCK_OPT_ADAPTER_TYPE)) {
1486 adapter_type = options->value.s;
1487 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
1488 backing_file = options->value.s;
1489 } else if (!strcmp(options->name, BLOCK_OPT_COMPAT6)) {
1490 flags |= options->value.n ? BLOCK_FLAG_COMPAT6 : 0;
1491 } else if (!strcmp(options->name, BLOCK_OPT_SUBFMT)) {
1492 fmt = options->value.s;
1496 if (!adapter_type) {
1497 adapter_type = "ide";
1498 } else if (strcmp(adapter_type, "ide") &&
1499 strcmp(adapter_type, "buslogic") &&
1500 strcmp(adapter_type, "lsilogic") &&
1501 strcmp(adapter_type, "legacyESX")) {
1502 fprintf(stderr, "VMDK: Unknown adapter type: '%s'.\n", adapter_type);
1505 if (strcmp(adapter_type, "ide") != 0) {
1506 /* that's the number of heads with which vmware operates when
1507 creating, exporting, etc. vmdk files with a non-ide adapter type */
1511 /* Default format to monolithicSparse */
1512 fmt = "monolithicSparse";
1513 } else if (strcmp(fmt, "monolithicFlat") &&
1514 strcmp(fmt, "monolithicSparse") &&
1515 strcmp(fmt, "twoGbMaxExtentSparse") &&
1516 strcmp(fmt, "twoGbMaxExtentFlat") &&
1517 strcmp(fmt, "streamOptimized")) {
1518 fprintf(stderr, "VMDK: Unknown subformat: %s\n", fmt);
1521 split = !(strcmp(fmt, "twoGbMaxExtentFlat") &&
1522 strcmp(fmt, "twoGbMaxExtentSparse"));
1523 flat = !(strcmp(fmt, "monolithicFlat") &&
1524 strcmp(fmt, "twoGbMaxExtentFlat"));
1525 compress = !strcmp(fmt, "streamOptimized");
1527 desc_extent_line = "RW %lld FLAT \"%s\" 0\n";
1529 desc_extent_line = "RW %lld SPARSE \"%s\"\n";
1531 if (flat && backing_file) {
1532 /* not supporting backing file for flat image */
1536 char parent_filename[PATH_MAX];
1537 BlockDriverState *bs = bdrv_new("");
1538 ret = bdrv_open(bs, backing_file, NULL, 0, NULL);
1543 if (strcmp(bs->drv->format_name, "vmdk")) {
1547 parent_cid = vmdk_read_cid(bs, 0);
1549 relative_path(parent_filename, sizeof(parent_filename),
1550 filename, backing_file);
1551 snprintf(parent_desc_line, sizeof(parent_desc_line),
1552 "parentFileNameHint=\"%s\"", parent_filename);
1555 /* Create extents */
1556 filesize = total_size;
1557 while (filesize > 0) {
1558 char desc_line[BUF_SIZE];
1559 char ext_filename[PATH_MAX];
1560 char desc_filename[PATH_MAX];
1561 int64_t size = filesize;
1563 if (split && size > split_size) {
1567 snprintf(desc_filename, sizeof(desc_filename), "%s-%c%03d%s",
1568 prefix, flat ? 'f' : 's', ++idx, postfix);
1570 snprintf(desc_filename, sizeof(desc_filename), "%s-flat%s",
1573 snprintf(desc_filename, sizeof(desc_filename), "%s%s",
1576 snprintf(ext_filename, sizeof(ext_filename), "%s%s",
1577 path, desc_filename);
1579 if (vmdk_create_extent(ext_filename, size, flat, compress)) {
1584 /* Format description line */
1585 snprintf(desc_line, sizeof(desc_line),
1586 desc_extent_line, size / 512, desc_filename);
1587 pstrcat(ext_desc_lines, sizeof(ext_desc_lines), desc_line);
1589 /* generate descriptor file */
1590 snprintf(desc, sizeof(desc), desc_template,
1591 (unsigned int)time(NULL),
1596 (flags & BLOCK_FLAG_COMPAT6 ? 6 : 4),
1597 total_size / (int64_t)(63 * number_heads * 512), number_heads,
1599 if (split || flat) {
1600 fd = qemu_open(filename,
1601 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY | O_LARGEFILE,
1604 fd = qemu_open(filename,
1605 O_WRONLY | O_BINARY | O_LARGEFILE,
1611 /* the descriptor offset = 0x200 */
1612 if (!split && !flat && 0x200 != lseek(fd, 0x200, SEEK_SET)) {
1616 ret = qemu_write_full(fd, desc, strlen(desc));
1617 if (ret != strlen(desc)) {
1627 static void vmdk_close(BlockDriverState *bs)
1629 BDRVVmdkState *s = bs->opaque;
1631 vmdk_free_extents(bs);
1633 migrate_del_blocker(s->migration_blocker);
1634 error_free(s->migration_blocker);
1637 static coroutine_fn int vmdk_co_flush(BlockDriverState *bs)
1639 BDRVVmdkState *s = bs->opaque;
1643 for (i = 0; i < s->num_extents; i++) {
1644 err = bdrv_co_flush(s->extents[i].file);
1652 static int64_t vmdk_get_allocated_file_size(BlockDriverState *bs)
1657 BDRVVmdkState *s = bs->opaque;
1659 ret = bdrv_get_allocated_file_size(bs->file);
1663 for (i = 0; i < s->num_extents; i++) {
1664 if (s->extents[i].file == bs->file) {
1667 r = bdrv_get_allocated_file_size(s->extents[i].file);
1676 static QEMUOptionParameter vmdk_create_options[] = {
1678 .name = BLOCK_OPT_SIZE,
1680 .help = "Virtual disk size"
1683 .name = BLOCK_OPT_ADAPTER_TYPE,
1685 .help = "Virtual adapter type, can be one of "
1686 "ide (default), lsilogic, buslogic or legacyESX"
1689 .name = BLOCK_OPT_BACKING_FILE,
1691 .help = "File name of a base image"
1694 .name = BLOCK_OPT_COMPAT6,
1696 .help = "VMDK version 6 image"
1699 .name = BLOCK_OPT_SUBFMT,
1702 "VMDK flat extent format, can be one of "
1703 "{monolithicSparse (default) | monolithicFlat | twoGbMaxExtentSparse | twoGbMaxExtentFlat | streamOptimized} "
1708 static BlockDriver bdrv_vmdk = {
1709 .format_name = "vmdk",
1710 .instance_size = sizeof(BDRVVmdkState),
1711 .bdrv_probe = vmdk_probe,
1712 .bdrv_open = vmdk_open,
1713 .bdrv_reopen_prepare = vmdk_reopen_prepare,
1714 .bdrv_read = vmdk_co_read,
1715 .bdrv_write = vmdk_co_write,
1716 .bdrv_close = vmdk_close,
1717 .bdrv_create = vmdk_create,
1718 .bdrv_co_flush_to_disk = vmdk_co_flush,
1719 .bdrv_co_is_allocated = vmdk_co_is_allocated,
1720 .bdrv_get_allocated_file_size = vmdk_get_allocated_file_size,
1722 .create_options = vmdk_create_options,
1725 static void bdrv_vmdk_init(void)
1727 bdrv_register(&bdrv_vmdk);
1730 block_init(bdrv_vmdk_init);