2 * Block driver for the VMDK format
4 * Copyright (c) 2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "block_int.h"
27 /* XXX: this code is untested */
28 /* XXX: add write support */
30 #define VMDK3_MAGIC (('C' << 24) | ('O' << 16) | ('W' << 8) | 'D')
31 #define VMDK4_MAGIC (('K' << 24) | ('D' << 16) | ('M' << 8) | 'V')
36 uint32_t disk_sectors;
38 uint32_t l1dir_offset;
40 uint32_t file_sectors;
43 uint32_t sectors_per_track;
53 int32_t num_gtes_per_gte;
61 #define L2_CACHE_SIZE 16
63 typedef struct BDRVVmdkState {
65 int64_t l1_table_offset;
68 uint32_t l1_entry_sectors;
72 uint32_t l2_cache_offsets[L2_CACHE_SIZE];
73 uint32_t l2_cache_counts[L2_CACHE_SIZE];
75 unsigned int cluster_sectors;
78 static int vmdk_probe(const uint8_t *buf, int buf_size, const char *filename)
84 magic = be32_to_cpu(*(uint32_t *)buf);
85 if (magic == VMDK3_MAGIC ||
92 static int vmdk_open(BlockDriverState *bs, const char *filename)
94 BDRVVmdkState *s = bs->opaque;
99 fd = open(filename, O_RDONLY | O_BINARY | O_LARGEFILE);
102 if (read(fd, &magic, sizeof(magic)) != sizeof(magic))
104 magic = be32_to_cpu(magic);
105 if (magic == VMDK3_MAGIC) {
107 if (read(fd, &header, sizeof(header)) !=
110 s->cluster_sectors = le32_to_cpu(header.granularity);
113 bs->total_sectors = le32_to_cpu(header.disk_sectors);
114 s->l1_table_offset = le32_to_cpu(header.l1dir_offset) * 512;
115 s->l1_entry_sectors = s->l2_size * s->cluster_sectors;
116 } else if (magic == VMDK4_MAGIC) {
119 if (read(fd, &header, sizeof(header)) != sizeof(header))
121 bs->total_sectors = le32_to_cpu(header.capacity);
122 s->cluster_sectors = le32_to_cpu(header.granularity);
123 s->l2_size = le32_to_cpu(header.num_gtes_per_gte);
124 s->l1_entry_sectors = s->l2_size * s->cluster_sectors;
125 if (s->l1_entry_sectors <= 0)
127 s->l1_size = (bs->total_sectors + s->l1_entry_sectors - 1)
128 / s->l1_entry_sectors;
129 s->l1_table_offset = le64_to_cpu(header.rgd_offset) * 512;
133 /* read the L1 table */
134 l1_size = s->l1_size * sizeof(uint32_t);
135 s->l1_table = qemu_malloc(l1_size);
138 if (lseek(fd, s->l1_table_offset, SEEK_SET) == -1)
140 if (read(fd, s->l1_table, l1_size) != l1_size)
142 for(i = 0; i < s->l1_size; i++) {
143 le32_to_cpus(&s->l1_table[i]);
146 s->l2_cache = qemu_malloc(s->l2_size * L2_CACHE_SIZE * sizeof(uint32_t));
150 /* XXX: currently only read only */
154 qemu_free(s->l1_table);
155 qemu_free(s->l2_cache);
160 static uint64_t get_cluster_offset(BlockDriverState *bs,
163 BDRVVmdkState *s = bs->opaque;
164 unsigned int l1_index, l2_offset, l2_index;
166 uint32_t min_count, *l2_table;
167 uint64_t cluster_offset;
169 l1_index = (offset >> 9) / s->l1_entry_sectors;
170 if (l1_index >= s->l1_size)
172 l2_offset = s->l1_table[l1_index];
176 for(i = 0; i < L2_CACHE_SIZE; i++) {
177 if (l2_offset == s->l2_cache_offsets[i]) {
178 /* increment the hit count */
179 if (++s->l2_cache_counts[i] == 0xffffffff) {
180 for(j = 0; j < L2_CACHE_SIZE; j++) {
181 s->l2_cache_counts[j] >>= 1;
184 l2_table = s->l2_cache + (i * s->l2_size);
188 /* not found: load a new entry in the least used one */
190 min_count = 0xffffffff;
191 for(i = 0; i < L2_CACHE_SIZE; i++) {
192 if (s->l2_cache_counts[i] < min_count) {
193 min_count = s->l2_cache_counts[i];
197 l2_table = s->l2_cache + (min_index * s->l2_size);
198 lseek(s->fd, (int64_t)l2_offset * 512, SEEK_SET);
199 if (read(s->fd, l2_table, s->l2_size * sizeof(uint32_t)) !=
200 s->l2_size * sizeof(uint32_t))
202 s->l2_cache_offsets[min_index] = l2_offset;
203 s->l2_cache_counts[min_index] = 1;
205 l2_index = ((offset >> 9) / s->cluster_sectors) % s->l2_size;
206 cluster_offset = le32_to_cpu(l2_table[l2_index]);
207 cluster_offset <<= 9;
208 return cluster_offset;
211 static int vmdk_is_allocated(BlockDriverState *bs, int64_t sector_num,
212 int nb_sectors, int *pnum)
214 BDRVVmdkState *s = bs->opaque;
215 int index_in_cluster, n;
216 uint64_t cluster_offset;
218 cluster_offset = get_cluster_offset(bs, sector_num << 9);
219 index_in_cluster = sector_num % s->cluster_sectors;
220 n = s->cluster_sectors - index_in_cluster;
224 return (cluster_offset != 0);
227 static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
228 uint8_t *buf, int nb_sectors)
230 BDRVVmdkState *s = bs->opaque;
231 int ret, index_in_cluster, n;
232 uint64_t cluster_offset;
234 while (nb_sectors > 0) {
235 cluster_offset = get_cluster_offset(bs, sector_num << 9);
236 index_in_cluster = sector_num % s->cluster_sectors;
237 n = s->cluster_sectors - index_in_cluster;
240 if (!cluster_offset) {
241 memset(buf, 0, 512 * n);
243 lseek(s->fd, cluster_offset + index_in_cluster * 512, SEEK_SET);
244 ret = read(s->fd, buf, n * 512);
255 static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
256 const uint8_t *buf, int nb_sectors)
261 static int vmdk_close(BlockDriverState *bs)
263 BDRVVmdkState *s = bs->opaque;
264 qemu_free(s->l1_table);
265 qemu_free(s->l2_cache);
269 BlockDriver bdrv_vmdk = {
271 sizeof(BDRVVmdkState),
277 NULL, /* no create yet */