]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Block driver for the QCOW version 2 format | |
3 | * | |
4 | * Copyright (c) 2004-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | #include "qemu-common.h" | |
25 | #include "block_int.h" | |
26 | #include "module.h" | |
27 | #include <zlib.h> | |
28 | #include "aes.h" | |
29 | #include "block/qcow2.h" | |
30 | #include "qemu-error.h" | |
31 | #include "qerror.h" | |
32 | #include "trace.h" | |
33 | ||
34 | /* | |
35 | Differences with QCOW: | |
36 | ||
37 | - Support for multiple incremental snapshots. | |
38 | - Memory management by reference counts. | |
39 | - Clusters which have a reference count of one have the bit | |
40 | QCOW_OFLAG_COPIED to optimize write performance. | |
41 | - Size of compressed clusters is stored in sectors to reduce bit usage | |
42 | in the cluster offsets. | |
43 | - Support for storing additional data (such as the VM state) in the | |
44 | snapshots. | |
45 | - If a backing store is used, the cluster size is not constrained | |
46 | (could be backported to QCOW). | |
47 | - L2 tables have always a size of one cluster. | |
48 | */ | |
49 | ||
50 | ||
51 | typedef struct { | |
52 | uint32_t magic; | |
53 | uint32_t len; | |
54 | } QCowExtension; | |
55 | ||
56 | #define QCOW2_EXT_MAGIC_END 0 | |
57 | #define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xE2792ACA | |
58 | #define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857 | |
59 | ||
60 | static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename) | |
61 | { | |
62 | const QCowHeader *cow_header = (const void *)buf; | |
63 | ||
64 | if (buf_size >= sizeof(QCowHeader) && | |
65 | be32_to_cpu(cow_header->magic) == QCOW_MAGIC && | |
66 | be32_to_cpu(cow_header->version) >= 2) | |
67 | return 100; | |
68 | else | |
69 | return 0; | |
70 | } | |
71 | ||
72 | ||
73 | /* | |
74 | * read qcow2 extension and fill bs | |
75 | * start reading from start_offset | |
76 | * finish reading upon magic of value 0 or when end_offset reached | |
77 | * unknown magic is skipped (future extension this version knows nothing about) | |
78 | * return 0 upon success, non-0 otherwise | |
79 | */ | |
80 | static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, | |
81 | uint64_t end_offset, void **p_feature_table) | |
82 | { | |
83 | BDRVQcowState *s = bs->opaque; | |
84 | QCowExtension ext; | |
85 | uint64_t offset; | |
86 | int ret; | |
87 | ||
88 | #ifdef DEBUG_EXT | |
89 | printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset); | |
90 | #endif | |
91 | offset = start_offset; | |
92 | while (offset < end_offset) { | |
93 | ||
94 | #ifdef DEBUG_EXT | |
95 | /* Sanity check */ | |
96 | if (offset > s->cluster_size) | |
97 | printf("qcow2_read_extension: suspicious offset %lu\n", offset); | |
98 | ||
99 | printf("attempting to read extended header in offset %lu\n", offset); | |
100 | #endif | |
101 | ||
102 | if (bdrv_pread(bs->file, offset, &ext, sizeof(ext)) != sizeof(ext)) { | |
103 | fprintf(stderr, "qcow2_read_extension: ERROR: " | |
104 | "pread fail from offset %" PRIu64 "\n", | |
105 | offset); | |
106 | return 1; | |
107 | } | |
108 | be32_to_cpus(&ext.magic); | |
109 | be32_to_cpus(&ext.len); | |
110 | offset += sizeof(ext); | |
111 | #ifdef DEBUG_EXT | |
112 | printf("ext.magic = 0x%x\n", ext.magic); | |
113 | #endif | |
114 | if (ext.len > end_offset - offset) { | |
115 | error_report("Header extension too large"); | |
116 | return -EINVAL; | |
117 | } | |
118 | ||
119 | switch (ext.magic) { | |
120 | case QCOW2_EXT_MAGIC_END: | |
121 | return 0; | |
122 | ||
123 | case QCOW2_EXT_MAGIC_BACKING_FORMAT: | |
124 | if (ext.len >= sizeof(bs->backing_format)) { | |
125 | fprintf(stderr, "ERROR: ext_backing_format: len=%u too large" | |
126 | " (>=%zu)\n", | |
127 | ext.len, sizeof(bs->backing_format)); | |
128 | return 2; | |
129 | } | |
130 | if (bdrv_pread(bs->file, offset , bs->backing_format, | |
131 | ext.len) != ext.len) | |
132 | return 3; | |
133 | bs->backing_format[ext.len] = '\0'; | |
134 | #ifdef DEBUG_EXT | |
135 | printf("Qcow2: Got format extension %s\n", bs->backing_format); | |
136 | #endif | |
137 | break; | |
138 | ||
139 | case QCOW2_EXT_MAGIC_FEATURE_TABLE: | |
140 | if (p_feature_table != NULL) { | |
141 | void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); | |
142 | ret = bdrv_pread(bs->file, offset , feature_table, ext.len); | |
143 | if (ret < 0) { | |
144 | return ret; | |
145 | } | |
146 | ||
147 | *p_feature_table = feature_table; | |
148 | } | |
149 | break; | |
150 | ||
151 | default: | |
152 | /* unknown magic - save it in case we need to rewrite the header */ | |
153 | { | |
154 | Qcow2UnknownHeaderExtension *uext; | |
155 | ||
156 | uext = g_malloc0(sizeof(*uext) + ext.len); | |
157 | uext->magic = ext.magic; | |
158 | uext->len = ext.len; | |
159 | QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next); | |
160 | ||
161 | ret = bdrv_pread(bs->file, offset , uext->data, uext->len); | |
162 | if (ret < 0) { | |
163 | return ret; | |
164 | } | |
165 | } | |
166 | break; | |
167 | } | |
168 | ||
169 | offset += ((ext.len + 7) & ~7); | |
170 | } | |
171 | ||
172 | return 0; | |
173 | } | |
174 | ||
175 | static void cleanup_unknown_header_ext(BlockDriverState *bs) | |
176 | { | |
177 | BDRVQcowState *s = bs->opaque; | |
178 | Qcow2UnknownHeaderExtension *uext, *next; | |
179 | ||
180 | QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) { | |
181 | QLIST_REMOVE(uext, next); | |
182 | g_free(uext); | |
183 | } | |
184 | } | |
185 | ||
186 | static void GCC_FMT_ATTR(2, 3) report_unsupported(BlockDriverState *bs, | |
187 | const char *fmt, ...) | |
188 | { | |
189 | char msg[64]; | |
190 | va_list ap; | |
191 | ||
192 | va_start(ap, fmt); | |
193 | vsnprintf(msg, sizeof(msg), fmt, ap); | |
194 | va_end(ap); | |
195 | ||
196 | qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, | |
197 | bs->device_name, "qcow2", msg); | |
198 | } | |
199 | ||
200 | static void report_unsupported_feature(BlockDriverState *bs, | |
201 | Qcow2Feature *table, uint64_t mask) | |
202 | { | |
203 | while (table && table->name[0] != '\0') { | |
204 | if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) { | |
205 | if (mask & (1 << table->bit)) { | |
206 | report_unsupported(bs, "%.46s",table->name); | |
207 | mask &= ~(1 << table->bit); | |
208 | } | |
209 | } | |
210 | table++; | |
211 | } | |
212 | ||
213 | if (mask) { | |
214 | report_unsupported(bs, "Unknown incompatible feature: %" PRIx64, mask); | |
215 | } | |
216 | } | |
217 | ||
218 | /* | |
219 | * Sets the dirty bit and flushes afterwards if necessary. | |
220 | * | |
221 | * The incompatible_features bit is only set if the image file header was | |
222 | * updated successfully. Therefore it is not required to check the return | |
223 | * value of this function. | |
224 | */ | |
225 | static int qcow2_mark_dirty(BlockDriverState *bs) | |
226 | { | |
227 | BDRVQcowState *s = bs->opaque; | |
228 | uint64_t val; | |
229 | int ret; | |
230 | ||
231 | assert(s->qcow_version >= 3); | |
232 | ||
233 | if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { | |
234 | return 0; /* already dirty */ | |
235 | } | |
236 | ||
237 | val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY); | |
238 | ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features), | |
239 | &val, sizeof(val)); | |
240 | if (ret < 0) { | |
241 | return ret; | |
242 | } | |
243 | ret = bdrv_flush(bs->file); | |
244 | if (ret < 0) { | |
245 | return ret; | |
246 | } | |
247 | ||
248 | /* Only treat image as dirty if the header was updated successfully */ | |
249 | s->incompatible_features |= QCOW2_INCOMPAT_DIRTY; | |
250 | return 0; | |
251 | } | |
252 | ||
253 | /* | |
254 | * Clears the dirty bit and flushes before if necessary. Only call this | |
255 | * function when there are no pending requests, it does not guard against | |
256 | * concurrent requests dirtying the image. | |
257 | */ | |
258 | static int qcow2_mark_clean(BlockDriverState *bs) | |
259 | { | |
260 | BDRVQcowState *s = bs->opaque; | |
261 | ||
262 | if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) { | |
263 | int ret = bdrv_flush(bs); | |
264 | if (ret < 0) { | |
265 | return ret; | |
266 | } | |
267 | ||
268 | s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY; | |
269 | return qcow2_update_header(bs); | |
270 | } | |
271 | return 0; | |
272 | } | |
273 | ||
274 | static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result, | |
275 | BdrvCheckMode fix) | |
276 | { | |
277 | int ret = qcow2_check_refcounts(bs, result, fix); | |
278 | if (ret < 0) { | |
279 | return ret; | |
280 | } | |
281 | ||
282 | if (fix && result->check_errors == 0 && result->corruptions == 0) { | |
283 | return qcow2_mark_clean(bs); | |
284 | } | |
285 | return ret; | |
286 | } | |
287 | ||
288 | static int qcow2_open(BlockDriverState *bs, int flags) | |
289 | { | |
290 | BDRVQcowState *s = bs->opaque; | |
291 | int len, i, ret = 0; | |
292 | QCowHeader header; | |
293 | uint64_t ext_end; | |
294 | ||
295 | ret = bdrv_pread(bs->file, 0, &header, sizeof(header)); | |
296 | if (ret < 0) { | |
297 | goto fail; | |
298 | } | |
299 | be32_to_cpus(&header.magic); | |
300 | be32_to_cpus(&header.version); | |
301 | be64_to_cpus(&header.backing_file_offset); | |
302 | be32_to_cpus(&header.backing_file_size); | |
303 | be64_to_cpus(&header.size); | |
304 | be32_to_cpus(&header.cluster_bits); | |
305 | be32_to_cpus(&header.crypt_method); | |
306 | be64_to_cpus(&header.l1_table_offset); | |
307 | be32_to_cpus(&header.l1_size); | |
308 | be64_to_cpus(&header.refcount_table_offset); | |
309 | be32_to_cpus(&header.refcount_table_clusters); | |
310 | be64_to_cpus(&header.snapshots_offset); | |
311 | be32_to_cpus(&header.nb_snapshots); | |
312 | ||
313 | if (header.magic != QCOW_MAGIC) { | |
314 | ret = -EINVAL; | |
315 | goto fail; | |
316 | } | |
317 | if (header.version < 2 || header.version > 3) { | |
318 | report_unsupported(bs, "QCOW version %d", header.version); | |
319 | ret = -ENOTSUP; | |
320 | goto fail; | |
321 | } | |
322 | ||
323 | s->qcow_version = header.version; | |
324 | ||
325 | /* Initialise version 3 header fields */ | |
326 | if (header.version == 2) { | |
327 | header.incompatible_features = 0; | |
328 | header.compatible_features = 0; | |
329 | header.autoclear_features = 0; | |
330 | header.refcount_order = 4; | |
331 | header.header_length = 72; | |
332 | } else { | |
333 | be64_to_cpus(&header.incompatible_features); | |
334 | be64_to_cpus(&header.compatible_features); | |
335 | be64_to_cpus(&header.autoclear_features); | |
336 | be32_to_cpus(&header.refcount_order); | |
337 | be32_to_cpus(&header.header_length); | |
338 | } | |
339 | ||
340 | if (header.header_length > sizeof(header)) { | |
341 | s->unknown_header_fields_size = header.header_length - sizeof(header); | |
342 | s->unknown_header_fields = g_malloc(s->unknown_header_fields_size); | |
343 | ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields, | |
344 | s->unknown_header_fields_size); | |
345 | if (ret < 0) { | |
346 | goto fail; | |
347 | } | |
348 | } | |
349 | ||
350 | if (header.backing_file_offset) { | |
351 | ext_end = header.backing_file_offset; | |
352 | } else { | |
353 | ext_end = 1 << header.cluster_bits; | |
354 | } | |
355 | ||
356 | /* Handle feature bits */ | |
357 | s->incompatible_features = header.incompatible_features; | |
358 | s->compatible_features = header.compatible_features; | |
359 | s->autoclear_features = header.autoclear_features; | |
360 | ||
361 | if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) { | |
362 | void *feature_table = NULL; | |
363 | qcow2_read_extensions(bs, header.header_length, ext_end, | |
364 | &feature_table); | |
365 | report_unsupported_feature(bs, feature_table, | |
366 | s->incompatible_features & | |
367 | ~QCOW2_INCOMPAT_MASK); | |
368 | ret = -ENOTSUP; | |
369 | goto fail; | |
370 | } | |
371 | ||
372 | /* Check support for various header values */ | |
373 | if (header.refcount_order != 4) { | |
374 | report_unsupported(bs, "%d bit reference counts", | |
375 | 1 << header.refcount_order); | |
376 | ret = -ENOTSUP; | |
377 | goto fail; | |
378 | } | |
379 | ||
380 | if (header.cluster_bits < MIN_CLUSTER_BITS || | |
381 | header.cluster_bits > MAX_CLUSTER_BITS) { | |
382 | ret = -EINVAL; | |
383 | goto fail; | |
384 | } | |
385 | if (header.crypt_method > QCOW_CRYPT_AES) { | |
386 | ret = -EINVAL; | |
387 | goto fail; | |
388 | } | |
389 | s->crypt_method_header = header.crypt_method; | |
390 | if (s->crypt_method_header) { | |
391 | bs->encrypted = 1; | |
392 | } | |
393 | s->cluster_bits = header.cluster_bits; | |
394 | s->cluster_size = 1 << s->cluster_bits; | |
395 | s->cluster_sectors = 1 << (s->cluster_bits - 9); | |
396 | s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */ | |
397 | s->l2_size = 1 << s->l2_bits; | |
398 | bs->total_sectors = header.size / 512; | |
399 | s->csize_shift = (62 - (s->cluster_bits - 8)); | |
400 | s->csize_mask = (1 << (s->cluster_bits - 8)) - 1; | |
401 | s->cluster_offset_mask = (1LL << s->csize_shift) - 1; | |
402 | s->refcount_table_offset = header.refcount_table_offset; | |
403 | s->refcount_table_size = | |
404 | header.refcount_table_clusters << (s->cluster_bits - 3); | |
405 | ||
406 | s->snapshots_offset = header.snapshots_offset; | |
407 | s->nb_snapshots = header.nb_snapshots; | |
408 | ||
409 | /* read the level 1 table */ | |
410 | s->l1_size = header.l1_size; | |
411 | s->l1_vm_state_index = size_to_l1(s, header.size); | |
412 | /* the L1 table must contain at least enough entries to put | |
413 | header.size bytes */ | |
414 | if (s->l1_size < s->l1_vm_state_index) { | |
415 | ret = -EINVAL; | |
416 | goto fail; | |
417 | } | |
418 | s->l1_table_offset = header.l1_table_offset; | |
419 | if (s->l1_size > 0) { | |
420 | s->l1_table = g_malloc0( | |
421 | align_offset(s->l1_size * sizeof(uint64_t), 512)); | |
422 | ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table, | |
423 | s->l1_size * sizeof(uint64_t)); | |
424 | if (ret < 0) { | |
425 | goto fail; | |
426 | } | |
427 | for(i = 0;i < s->l1_size; i++) { | |
428 | be64_to_cpus(&s->l1_table[i]); | |
429 | } | |
430 | } | |
431 | ||
432 | /* alloc L2 table/refcount block cache */ | |
433 | s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE); | |
434 | s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE); | |
435 | ||
436 | s->cluster_cache = g_malloc(s->cluster_size); | |
437 | /* one more sector for decompressed data alignment */ | |
438 | s->cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size | |
439 | + 512); | |
440 | s->cluster_cache_offset = -1; | |
441 | s->flags = flags; | |
442 | ||
443 | ret = qcow2_refcount_init(bs); | |
444 | if (ret != 0) { | |
445 | goto fail; | |
446 | } | |
447 | ||
448 | QLIST_INIT(&s->cluster_allocs); | |
449 | ||
450 | /* read qcow2 extensions */ | |
451 | if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL)) { | |
452 | ret = -EINVAL; | |
453 | goto fail; | |
454 | } | |
455 | ||
456 | /* read the backing file name */ | |
457 | if (header.backing_file_offset != 0) { | |
458 | len = header.backing_file_size; | |
459 | if (len > 1023) { | |
460 | len = 1023; | |
461 | } | |
462 | ret = bdrv_pread(bs->file, header.backing_file_offset, | |
463 | bs->backing_file, len); | |
464 | if (ret < 0) { | |
465 | goto fail; | |
466 | } | |
467 | bs->backing_file[len] = '\0'; | |
468 | } | |
469 | ||
470 | ret = qcow2_read_snapshots(bs); | |
471 | if (ret < 0) { | |
472 | goto fail; | |
473 | } | |
474 | ||
475 | /* Clear unknown autoclear feature bits */ | |
476 | if (!bs->read_only && s->autoclear_features != 0) { | |
477 | s->autoclear_features = 0; | |
478 | ret = qcow2_update_header(bs); | |
479 | if (ret < 0) { | |
480 | goto fail; | |
481 | } | |
482 | } | |
483 | ||
484 | /* Initialise locks */ | |
485 | qemu_co_mutex_init(&s->lock); | |
486 | ||
487 | /* Repair image if dirty */ | |
488 | if (!(flags & BDRV_O_CHECK) && !bs->read_only && | |
489 | (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) { | |
490 | BdrvCheckResult result = {0}; | |
491 | ||
492 | ret = qcow2_check(bs, &result, BDRV_FIX_ERRORS); | |
493 | if (ret < 0) { | |
494 | goto fail; | |
495 | } | |
496 | } | |
497 | ||
498 | #ifdef DEBUG_ALLOC | |
499 | { | |
500 | BdrvCheckResult result = {0}; | |
501 | qcow2_check_refcounts(bs, &result, 0); | |
502 | } | |
503 | #endif | |
504 | return ret; | |
505 | ||
506 | fail: | |
507 | g_free(s->unknown_header_fields); | |
508 | cleanup_unknown_header_ext(bs); | |
509 | qcow2_free_snapshots(bs); | |
510 | qcow2_refcount_close(bs); | |
511 | g_free(s->l1_table); | |
512 | if (s->l2_table_cache) { | |
513 | qcow2_cache_destroy(bs, s->l2_table_cache); | |
514 | } | |
515 | g_free(s->cluster_cache); | |
516 | qemu_vfree(s->cluster_data); | |
517 | return ret; | |
518 | } | |
519 | ||
520 | static int qcow2_set_key(BlockDriverState *bs, const char *key) | |
521 | { | |
522 | BDRVQcowState *s = bs->opaque; | |
523 | uint8_t keybuf[16]; | |
524 | int len, i; | |
525 | ||
526 | memset(keybuf, 0, 16); | |
527 | len = strlen(key); | |
528 | if (len > 16) | |
529 | len = 16; | |
530 | /* XXX: we could compress the chars to 7 bits to increase | |
531 | entropy */ | |
532 | for(i = 0;i < len;i++) { | |
533 | keybuf[i] = key[i]; | |
534 | } | |
535 | s->crypt_method = s->crypt_method_header; | |
536 | ||
537 | if (AES_set_encrypt_key(keybuf, 128, &s->aes_encrypt_key) != 0) | |
538 | return -1; | |
539 | if (AES_set_decrypt_key(keybuf, 128, &s->aes_decrypt_key) != 0) | |
540 | return -1; | |
541 | #if 0 | |
542 | /* test */ | |
543 | { | |
544 | uint8_t in[16]; | |
545 | uint8_t out[16]; | |
546 | uint8_t tmp[16]; | |
547 | for(i=0;i<16;i++) | |
548 | in[i] = i; | |
549 | AES_encrypt(in, tmp, &s->aes_encrypt_key); | |
550 | AES_decrypt(tmp, out, &s->aes_decrypt_key); | |
551 | for(i = 0; i < 16; i++) | |
552 | printf(" %02x", tmp[i]); | |
553 | printf("\n"); | |
554 | for(i = 0; i < 16; i++) | |
555 | printf(" %02x", out[i]); | |
556 | printf("\n"); | |
557 | } | |
558 | #endif | |
559 | return 0; | |
560 | } | |
561 | ||
562 | /* We have nothing to do for QCOW2 reopen, stubs just return | |
563 | * success */ | |
564 | static int qcow2_reopen_prepare(BDRVReopenState *state, | |
565 | BlockReopenQueue *queue, Error **errp) | |
566 | { | |
567 | return 0; | |
568 | } | |
569 | ||
570 | static int coroutine_fn qcow2_co_is_allocated(BlockDriverState *bs, | |
571 | int64_t sector_num, int nb_sectors, int *pnum) | |
572 | { | |
573 | BDRVQcowState *s = bs->opaque; | |
574 | uint64_t cluster_offset; | |
575 | int ret; | |
576 | ||
577 | *pnum = nb_sectors; | |
578 | /* FIXME We can get errors here, but the bdrv_co_is_allocated interface | |
579 | * can't pass them on today */ | |
580 | qemu_co_mutex_lock(&s->lock); | |
581 | ret = qcow2_get_cluster_offset(bs, sector_num << 9, pnum, &cluster_offset); | |
582 | qemu_co_mutex_unlock(&s->lock); | |
583 | if (ret < 0) { | |
584 | *pnum = 0; | |
585 | } | |
586 | ||
587 | return (cluster_offset != 0); | |
588 | } | |
589 | ||
590 | /* handle reading after the end of the backing file */ | |
591 | int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov, | |
592 | int64_t sector_num, int nb_sectors) | |
593 | { | |
594 | int n1; | |
595 | if ((sector_num + nb_sectors) <= bs->total_sectors) | |
596 | return nb_sectors; | |
597 | if (sector_num >= bs->total_sectors) | |
598 | n1 = 0; | |
599 | else | |
600 | n1 = bs->total_sectors - sector_num; | |
601 | ||
602 | qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1)); | |
603 | ||
604 | return n1; | |
605 | } | |
606 | ||
607 | static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num, | |
608 | int remaining_sectors, QEMUIOVector *qiov) | |
609 | { | |
610 | BDRVQcowState *s = bs->opaque; | |
611 | int index_in_cluster, n1; | |
612 | int ret; | |
613 | int cur_nr_sectors; /* number of sectors in current iteration */ | |
614 | uint64_t cluster_offset = 0; | |
615 | uint64_t bytes_done = 0; | |
616 | QEMUIOVector hd_qiov; | |
617 | uint8_t *cluster_data = NULL; | |
618 | ||
619 | qemu_iovec_init(&hd_qiov, qiov->niov); | |
620 | ||
621 | qemu_co_mutex_lock(&s->lock); | |
622 | ||
623 | while (remaining_sectors != 0) { | |
624 | ||
625 | /* prepare next request */ | |
626 | cur_nr_sectors = remaining_sectors; | |
627 | if (s->crypt_method) { | |
628 | cur_nr_sectors = MIN(cur_nr_sectors, | |
629 | QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); | |
630 | } | |
631 | ||
632 | ret = qcow2_get_cluster_offset(bs, sector_num << 9, | |
633 | &cur_nr_sectors, &cluster_offset); | |
634 | if (ret < 0) { | |
635 | goto fail; | |
636 | } | |
637 | ||
638 | index_in_cluster = sector_num & (s->cluster_sectors - 1); | |
639 | ||
640 | qemu_iovec_reset(&hd_qiov); | |
641 | qemu_iovec_concat(&hd_qiov, qiov, bytes_done, | |
642 | cur_nr_sectors * 512); | |
643 | ||
644 | switch (ret) { | |
645 | case QCOW2_CLUSTER_UNALLOCATED: | |
646 | ||
647 | if (bs->backing_hd) { | |
648 | /* read from the base image */ | |
649 | n1 = qcow2_backing_read1(bs->backing_hd, &hd_qiov, | |
650 | sector_num, cur_nr_sectors); | |
651 | if (n1 > 0) { | |
652 | BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO); | |
653 | qemu_co_mutex_unlock(&s->lock); | |
654 | ret = bdrv_co_readv(bs->backing_hd, sector_num, | |
655 | n1, &hd_qiov); | |
656 | qemu_co_mutex_lock(&s->lock); | |
657 | if (ret < 0) { | |
658 | goto fail; | |
659 | } | |
660 | } | |
661 | } else { | |
662 | /* Note: in this case, no need to wait */ | |
663 | qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); | |
664 | } | |
665 | break; | |
666 | ||
667 | case QCOW2_CLUSTER_ZERO: | |
668 | if (s->qcow_version < 3) { | |
669 | ret = -EIO; | |
670 | goto fail; | |
671 | } | |
672 | qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors); | |
673 | break; | |
674 | ||
675 | case QCOW2_CLUSTER_COMPRESSED: | |
676 | /* add AIO support for compressed blocks ? */ | |
677 | ret = qcow2_decompress_cluster(bs, cluster_offset); | |
678 | if (ret < 0) { | |
679 | goto fail; | |
680 | } | |
681 | ||
682 | qemu_iovec_from_buf(&hd_qiov, 0, | |
683 | s->cluster_cache + index_in_cluster * 512, | |
684 | 512 * cur_nr_sectors); | |
685 | break; | |
686 | ||
687 | case QCOW2_CLUSTER_NORMAL: | |
688 | if ((cluster_offset & 511) != 0) { | |
689 | ret = -EIO; | |
690 | goto fail; | |
691 | } | |
692 | ||
693 | if (s->crypt_method) { | |
694 | /* | |
695 | * For encrypted images, read everything into a temporary | |
696 | * contiguous buffer on which the AES functions can work. | |
697 | */ | |
698 | if (!cluster_data) { | |
699 | cluster_data = | |
700 | qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); | |
701 | } | |
702 | ||
703 | assert(cur_nr_sectors <= | |
704 | QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors); | |
705 | qemu_iovec_reset(&hd_qiov); | |
706 | qemu_iovec_add(&hd_qiov, cluster_data, | |
707 | 512 * cur_nr_sectors); | |
708 | } | |
709 | ||
710 | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | |
711 | qemu_co_mutex_unlock(&s->lock); | |
712 | ret = bdrv_co_readv(bs->file, | |
713 | (cluster_offset >> 9) + index_in_cluster, | |
714 | cur_nr_sectors, &hd_qiov); | |
715 | qemu_co_mutex_lock(&s->lock); | |
716 | if (ret < 0) { | |
717 | goto fail; | |
718 | } | |
719 | if (s->crypt_method) { | |
720 | qcow2_encrypt_sectors(s, sector_num, cluster_data, | |
721 | cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key); | |
722 | qemu_iovec_from_buf(qiov, bytes_done, | |
723 | cluster_data, 512 * cur_nr_sectors); | |
724 | } | |
725 | break; | |
726 | ||
727 | default: | |
728 | g_assert_not_reached(); | |
729 | ret = -EIO; | |
730 | goto fail; | |
731 | } | |
732 | ||
733 | remaining_sectors -= cur_nr_sectors; | |
734 | sector_num += cur_nr_sectors; | |
735 | bytes_done += cur_nr_sectors * 512; | |
736 | } | |
737 | ret = 0; | |
738 | ||
739 | fail: | |
740 | qemu_co_mutex_unlock(&s->lock); | |
741 | ||
742 | qemu_iovec_destroy(&hd_qiov); | |
743 | qemu_vfree(cluster_data); | |
744 | ||
745 | return ret; | |
746 | } | |
747 | ||
748 | static void run_dependent_requests(BDRVQcowState *s, QCowL2Meta *m) | |
749 | { | |
750 | /* Take the request off the list of running requests */ | |
751 | if (m->nb_clusters != 0) { | |
752 | QLIST_REMOVE(m, next_in_flight); | |
753 | } | |
754 | ||
755 | /* Restart all dependent requests */ | |
756 | if (!qemu_co_queue_empty(&m->dependent_requests)) { | |
757 | qemu_co_mutex_unlock(&s->lock); | |
758 | qemu_co_queue_restart_all(&m->dependent_requests); | |
759 | qemu_co_mutex_lock(&s->lock); | |
760 | } | |
761 | } | |
762 | ||
763 | static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, | |
764 | int64_t sector_num, | |
765 | int remaining_sectors, | |
766 | QEMUIOVector *qiov) | |
767 | { | |
768 | BDRVQcowState *s = bs->opaque; | |
769 | int index_in_cluster; | |
770 | int n_end; | |
771 | int ret; | |
772 | int cur_nr_sectors; /* number of sectors in current iteration */ | |
773 | uint64_t cluster_offset; | |
774 | QEMUIOVector hd_qiov; | |
775 | uint64_t bytes_done = 0; | |
776 | uint8_t *cluster_data = NULL; | |
777 | QCowL2Meta *l2meta; | |
778 | ||
779 | trace_qcow2_writev_start_req(qemu_coroutine_self(), sector_num, | |
780 | remaining_sectors); | |
781 | ||
782 | qemu_iovec_init(&hd_qiov, qiov->niov); | |
783 | ||
784 | s->cluster_cache_offset = -1; /* disable compressed cache */ | |
785 | ||
786 | qemu_co_mutex_lock(&s->lock); | |
787 | ||
788 | while (remaining_sectors != 0) { | |
789 | ||
790 | l2meta = g_malloc0(sizeof(*l2meta)); | |
791 | qemu_co_queue_init(&l2meta->dependent_requests); | |
792 | ||
793 | trace_qcow2_writev_start_part(qemu_coroutine_self()); | |
794 | index_in_cluster = sector_num & (s->cluster_sectors - 1); | |
795 | n_end = index_in_cluster + remaining_sectors; | |
796 | if (s->crypt_method && | |
797 | n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) { | |
798 | n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors; | |
799 | } | |
800 | ||
801 | ret = qcow2_alloc_cluster_offset(bs, sector_num << 9, | |
802 | index_in_cluster, n_end, &cur_nr_sectors, l2meta); | |
803 | if (ret < 0) { | |
804 | goto fail; | |
805 | } | |
806 | ||
807 | if (l2meta->nb_clusters > 0 && | |
808 | (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)) { | |
809 | qcow2_mark_dirty(bs); | |
810 | } | |
811 | ||
812 | cluster_offset = l2meta->cluster_offset; | |
813 | assert((cluster_offset & 511) == 0); | |
814 | ||
815 | qemu_iovec_reset(&hd_qiov); | |
816 | qemu_iovec_concat(&hd_qiov, qiov, bytes_done, | |
817 | cur_nr_sectors * 512); | |
818 | ||
819 | if (s->crypt_method) { | |
820 | if (!cluster_data) { | |
821 | cluster_data = qemu_blockalign(bs, QCOW_MAX_CRYPT_CLUSTERS * | |
822 | s->cluster_size); | |
823 | } | |
824 | ||
825 | assert(hd_qiov.size <= | |
826 | QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size); | |
827 | qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size); | |
828 | ||
829 | qcow2_encrypt_sectors(s, sector_num, cluster_data, | |
830 | cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key); | |
831 | ||
832 | qemu_iovec_reset(&hd_qiov); | |
833 | qemu_iovec_add(&hd_qiov, cluster_data, | |
834 | cur_nr_sectors * 512); | |
835 | } | |
836 | ||
837 | qemu_co_mutex_unlock(&s->lock); | |
838 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO); | |
839 | trace_qcow2_writev_data(qemu_coroutine_self(), | |
840 | (cluster_offset >> 9) + index_in_cluster); | |
841 | ret = bdrv_co_writev(bs->file, | |
842 | (cluster_offset >> 9) + index_in_cluster, | |
843 | cur_nr_sectors, &hd_qiov); | |
844 | qemu_co_mutex_lock(&s->lock); | |
845 | if (ret < 0) { | |
846 | goto fail; | |
847 | } | |
848 | ||
849 | ret = qcow2_alloc_cluster_link_l2(bs, l2meta); | |
850 | if (ret < 0) { | |
851 | goto fail; | |
852 | } | |
853 | ||
854 | run_dependent_requests(s, l2meta); | |
855 | g_free(l2meta); | |
856 | l2meta = NULL; | |
857 | ||
858 | remaining_sectors -= cur_nr_sectors; | |
859 | sector_num += cur_nr_sectors; | |
860 | bytes_done += cur_nr_sectors * 512; | |
861 | trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_nr_sectors); | |
862 | } | |
863 | ret = 0; | |
864 | ||
865 | fail: | |
866 | if (l2meta != NULL) { | |
867 | run_dependent_requests(s, l2meta); | |
868 | g_free(l2meta); | |
869 | } | |
870 | ||
871 | qemu_co_mutex_unlock(&s->lock); | |
872 | ||
873 | qemu_iovec_destroy(&hd_qiov); | |
874 | qemu_vfree(cluster_data); | |
875 | trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); | |
876 | ||
877 | return ret; | |
878 | } | |
879 | ||
880 | static void qcow2_close(BlockDriverState *bs) | |
881 | { | |
882 | BDRVQcowState *s = bs->opaque; | |
883 | g_free(s->l1_table); | |
884 | ||
885 | qcow2_cache_flush(bs, s->l2_table_cache); | |
886 | qcow2_cache_flush(bs, s->refcount_block_cache); | |
887 | ||
888 | qcow2_mark_clean(bs); | |
889 | ||
890 | qcow2_cache_destroy(bs, s->l2_table_cache); | |
891 | qcow2_cache_destroy(bs, s->refcount_block_cache); | |
892 | ||
893 | g_free(s->unknown_header_fields); | |
894 | cleanup_unknown_header_ext(bs); | |
895 | ||
896 | g_free(s->cluster_cache); | |
897 | qemu_vfree(s->cluster_data); | |
898 | qcow2_refcount_close(bs); | |
899 | qcow2_free_snapshots(bs); | |
900 | } | |
901 | ||
902 | static void qcow2_invalidate_cache(BlockDriverState *bs) | |
903 | { | |
904 | BDRVQcowState *s = bs->opaque; | |
905 | int flags = s->flags; | |
906 | AES_KEY aes_encrypt_key; | |
907 | AES_KEY aes_decrypt_key; | |
908 | uint32_t crypt_method = 0; | |
909 | ||
910 | /* | |
911 | * Backing files are read-only which makes all of their metadata immutable, | |
912 | * that means we don't have to worry about reopening them here. | |
913 | */ | |
914 | ||
915 | if (s->crypt_method) { | |
916 | crypt_method = s->crypt_method; | |
917 | memcpy(&aes_encrypt_key, &s->aes_encrypt_key, sizeof(aes_encrypt_key)); | |
918 | memcpy(&aes_decrypt_key, &s->aes_decrypt_key, sizeof(aes_decrypt_key)); | |
919 | } | |
920 | ||
921 | qcow2_close(bs); | |
922 | ||
923 | memset(s, 0, sizeof(BDRVQcowState)); | |
924 | qcow2_open(bs, flags); | |
925 | ||
926 | if (crypt_method) { | |
927 | s->crypt_method = crypt_method; | |
928 | memcpy(&s->aes_encrypt_key, &aes_encrypt_key, sizeof(aes_encrypt_key)); | |
929 | memcpy(&s->aes_decrypt_key, &aes_decrypt_key, sizeof(aes_decrypt_key)); | |
930 | } | |
931 | } | |
932 | ||
933 | static size_t header_ext_add(char *buf, uint32_t magic, const void *s, | |
934 | size_t len, size_t buflen) | |
935 | { | |
936 | QCowExtension *ext_backing_fmt = (QCowExtension*) buf; | |
937 | size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7); | |
938 | ||
939 | if (buflen < ext_len) { | |
940 | return -ENOSPC; | |
941 | } | |
942 | ||
943 | *ext_backing_fmt = (QCowExtension) { | |
944 | .magic = cpu_to_be32(magic), | |
945 | .len = cpu_to_be32(len), | |
946 | }; | |
947 | memcpy(buf + sizeof(QCowExtension), s, len); | |
948 | ||
949 | return ext_len; | |
950 | } | |
951 | ||
952 | /* | |
953 | * Updates the qcow2 header, including the variable length parts of it, i.e. | |
954 | * the backing file name and all extensions. qcow2 was not designed to allow | |
955 | * such changes, so if we run out of space (we can only use the first cluster) | |
956 | * this function may fail. | |
957 | * | |
958 | * Returns 0 on success, -errno in error cases. | |
959 | */ | |
960 | int qcow2_update_header(BlockDriverState *bs) | |
961 | { | |
962 | BDRVQcowState *s = bs->opaque; | |
963 | QCowHeader *header; | |
964 | char *buf; | |
965 | size_t buflen = s->cluster_size; | |
966 | int ret; | |
967 | uint64_t total_size; | |
968 | uint32_t refcount_table_clusters; | |
969 | size_t header_length; | |
970 | Qcow2UnknownHeaderExtension *uext; | |
971 | ||
972 | buf = qemu_blockalign(bs, buflen); | |
973 | ||
974 | /* Header structure */ | |
975 | header = (QCowHeader*) buf; | |
976 | ||
977 | if (buflen < sizeof(*header)) { | |
978 | ret = -ENOSPC; | |
979 | goto fail; | |
980 | } | |
981 | ||
982 | header_length = sizeof(*header) + s->unknown_header_fields_size; | |
983 | total_size = bs->total_sectors * BDRV_SECTOR_SIZE; | |
984 | refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3); | |
985 | ||
986 | *header = (QCowHeader) { | |
987 | /* Version 2 fields */ | |
988 | .magic = cpu_to_be32(QCOW_MAGIC), | |
989 | .version = cpu_to_be32(s->qcow_version), | |
990 | .backing_file_offset = 0, | |
991 | .backing_file_size = 0, | |
992 | .cluster_bits = cpu_to_be32(s->cluster_bits), | |
993 | .size = cpu_to_be64(total_size), | |
994 | .crypt_method = cpu_to_be32(s->crypt_method_header), | |
995 | .l1_size = cpu_to_be32(s->l1_size), | |
996 | .l1_table_offset = cpu_to_be64(s->l1_table_offset), | |
997 | .refcount_table_offset = cpu_to_be64(s->refcount_table_offset), | |
998 | .refcount_table_clusters = cpu_to_be32(refcount_table_clusters), | |
999 | .nb_snapshots = cpu_to_be32(s->nb_snapshots), | |
1000 | .snapshots_offset = cpu_to_be64(s->snapshots_offset), | |
1001 | ||
1002 | /* Version 3 fields */ | |
1003 | .incompatible_features = cpu_to_be64(s->incompatible_features), | |
1004 | .compatible_features = cpu_to_be64(s->compatible_features), | |
1005 | .autoclear_features = cpu_to_be64(s->autoclear_features), | |
1006 | .refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT), | |
1007 | .header_length = cpu_to_be32(header_length), | |
1008 | }; | |
1009 | ||
1010 | /* For older versions, write a shorter header */ | |
1011 | switch (s->qcow_version) { | |
1012 | case 2: | |
1013 | ret = offsetof(QCowHeader, incompatible_features); | |
1014 | break; | |
1015 | case 3: | |
1016 | ret = sizeof(*header); | |
1017 | break; | |
1018 | default: | |
1019 | ret = -EINVAL; | |
1020 | goto fail; | |
1021 | } | |
1022 | ||
1023 | buf += ret; | |
1024 | buflen -= ret; | |
1025 | memset(buf, 0, buflen); | |
1026 | ||
1027 | /* Preserve any unknown field in the header */ | |
1028 | if (s->unknown_header_fields_size) { | |
1029 | if (buflen < s->unknown_header_fields_size) { | |
1030 | ret = -ENOSPC; | |
1031 | goto fail; | |
1032 | } | |
1033 | ||
1034 | memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size); | |
1035 | buf += s->unknown_header_fields_size; | |
1036 | buflen -= s->unknown_header_fields_size; | |
1037 | } | |
1038 | ||
1039 | /* Backing file format header extension */ | |
1040 | if (*bs->backing_format) { | |
1041 | ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT, | |
1042 | bs->backing_format, strlen(bs->backing_format), | |
1043 | buflen); | |
1044 | if (ret < 0) { | |
1045 | goto fail; | |
1046 | } | |
1047 | ||
1048 | buf += ret; | |
1049 | buflen -= ret; | |
1050 | } | |
1051 | ||
1052 | /* Feature table */ | |
1053 | Qcow2Feature features[] = { | |
1054 | { | |
1055 | .type = QCOW2_FEAT_TYPE_INCOMPATIBLE, | |
1056 | .bit = QCOW2_INCOMPAT_DIRTY_BITNR, | |
1057 | .name = "dirty bit", | |
1058 | }, | |
1059 | { | |
1060 | .type = QCOW2_FEAT_TYPE_COMPATIBLE, | |
1061 | .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR, | |
1062 | .name = "lazy refcounts", | |
1063 | }, | |
1064 | }; | |
1065 | ||
1066 | ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE, | |
1067 | features, sizeof(features), buflen); | |
1068 | if (ret < 0) { | |
1069 | goto fail; | |
1070 | } | |
1071 | buf += ret; | |
1072 | buflen -= ret; | |
1073 | ||
1074 | /* Keep unknown header extensions */ | |
1075 | QLIST_FOREACH(uext, &s->unknown_header_ext, next) { | |
1076 | ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen); | |
1077 | if (ret < 0) { | |
1078 | goto fail; | |
1079 | } | |
1080 | ||
1081 | buf += ret; | |
1082 | buflen -= ret; | |
1083 | } | |
1084 | ||
1085 | /* End of header extensions */ | |
1086 | ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen); | |
1087 | if (ret < 0) { | |
1088 | goto fail; | |
1089 | } | |
1090 | ||
1091 | buf += ret; | |
1092 | buflen -= ret; | |
1093 | ||
1094 | /* Backing file name */ | |
1095 | if (*bs->backing_file) { | |
1096 | size_t backing_file_len = strlen(bs->backing_file); | |
1097 | ||
1098 | if (buflen < backing_file_len) { | |
1099 | ret = -ENOSPC; | |
1100 | goto fail; | |
1101 | } | |
1102 | ||
1103 | /* Using strncpy is ok here, since buf is not NUL-terminated. */ | |
1104 | strncpy(buf, bs->backing_file, buflen); | |
1105 | ||
1106 | header->backing_file_offset = cpu_to_be64(buf - ((char*) header)); | |
1107 | header->backing_file_size = cpu_to_be32(backing_file_len); | |
1108 | } | |
1109 | ||
1110 | /* Write the new header */ | |
1111 | ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size); | |
1112 | if (ret < 0) { | |
1113 | goto fail; | |
1114 | } | |
1115 | ||
1116 | ret = 0; | |
1117 | fail: | |
1118 | qemu_vfree(header); | |
1119 | return ret; | |
1120 | } | |
1121 | ||
1122 | static int qcow2_change_backing_file(BlockDriverState *bs, | |
1123 | const char *backing_file, const char *backing_fmt) | |
1124 | { | |
1125 | pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: ""); | |
1126 | pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: ""); | |
1127 | ||
1128 | return qcow2_update_header(bs); | |
1129 | } | |
1130 | ||
1131 | static int preallocate(BlockDriverState *bs) | |
1132 | { | |
1133 | uint64_t nb_sectors; | |
1134 | uint64_t offset; | |
1135 | int num; | |
1136 | int ret; | |
1137 | QCowL2Meta meta; | |
1138 | ||
1139 | nb_sectors = bdrv_getlength(bs) >> 9; | |
1140 | offset = 0; | |
1141 | qemu_co_queue_init(&meta.dependent_requests); | |
1142 | meta.cluster_offset = 0; | |
1143 | ||
1144 | while (nb_sectors) { | |
1145 | num = MIN(nb_sectors, INT_MAX >> 9); | |
1146 | ret = qcow2_alloc_cluster_offset(bs, offset, 0, num, &num, &meta); | |
1147 | if (ret < 0) { | |
1148 | return ret; | |
1149 | } | |
1150 | ||
1151 | ret = qcow2_alloc_cluster_link_l2(bs, &meta); | |
1152 | if (ret < 0) { | |
1153 | qcow2_free_any_clusters(bs, meta.cluster_offset, meta.nb_clusters); | |
1154 | return ret; | |
1155 | } | |
1156 | ||
1157 | /* There are no dependent requests, but we need to remove our request | |
1158 | * from the list of in-flight requests */ | |
1159 | run_dependent_requests(bs->opaque, &meta); | |
1160 | ||
1161 | /* TODO Preallocate data if requested */ | |
1162 | ||
1163 | nb_sectors -= num; | |
1164 | offset += num << 9; | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * It is expected that the image file is large enough to actually contain | |
1169 | * all of the allocated clusters (otherwise we get failing reads after | |
1170 | * EOF). Extend the image to the last allocated sector. | |
1171 | */ | |
1172 | if (meta.cluster_offset != 0) { | |
1173 | uint8_t buf[512]; | |
1174 | memset(buf, 0, 512); | |
1175 | ret = bdrv_write(bs->file, (meta.cluster_offset >> 9) + num - 1, buf, 1); | |
1176 | if (ret < 0) { | |
1177 | return ret; | |
1178 | } | |
1179 | } | |
1180 | ||
1181 | return 0; | |
1182 | } | |
1183 | ||
1184 | static int qcow2_create2(const char *filename, int64_t total_size, | |
1185 | const char *backing_file, const char *backing_format, | |
1186 | int flags, size_t cluster_size, int prealloc, | |
1187 | QEMUOptionParameter *options, int version) | |
1188 | { | |
1189 | /* Calculate cluster_bits */ | |
1190 | int cluster_bits; | |
1191 | cluster_bits = ffs(cluster_size) - 1; | |
1192 | if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS || | |
1193 | (1 << cluster_bits) != cluster_size) | |
1194 | { | |
1195 | error_report( | |
1196 | "Cluster size must be a power of two between %d and %dk", | |
1197 | 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10)); | |
1198 | return -EINVAL; | |
1199 | } | |
1200 | ||
1201 | /* | |
1202 | * Open the image file and write a minimal qcow2 header. | |
1203 | * | |
1204 | * We keep things simple and start with a zero-sized image. We also | |
1205 | * do without refcount blocks or a L1 table for now. We'll fix the | |
1206 | * inconsistency later. | |
1207 | * | |
1208 | * We do need a refcount table because growing the refcount table means | |
1209 | * allocating two new refcount blocks - the seconds of which would be at | |
1210 | * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file | |
1211 | * size for any qcow2 image. | |
1212 | */ | |
1213 | BlockDriverState* bs; | |
1214 | QCowHeader header; | |
1215 | uint8_t* refcount_table; | |
1216 | int ret; | |
1217 | ||
1218 | ret = bdrv_create_file(filename, options); | |
1219 | if (ret < 0) { | |
1220 | return ret; | |
1221 | } | |
1222 | ||
1223 | ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR); | |
1224 | if (ret < 0) { | |
1225 | return ret; | |
1226 | } | |
1227 | ||
1228 | /* Write the header */ | |
1229 | memset(&header, 0, sizeof(header)); | |
1230 | header.magic = cpu_to_be32(QCOW_MAGIC); | |
1231 | header.version = cpu_to_be32(version); | |
1232 | header.cluster_bits = cpu_to_be32(cluster_bits); | |
1233 | header.size = cpu_to_be64(0); | |
1234 | header.l1_table_offset = cpu_to_be64(0); | |
1235 | header.l1_size = cpu_to_be32(0); | |
1236 | header.refcount_table_offset = cpu_to_be64(cluster_size); | |
1237 | header.refcount_table_clusters = cpu_to_be32(1); | |
1238 | header.refcount_order = cpu_to_be32(3 + REFCOUNT_SHIFT); | |
1239 | header.header_length = cpu_to_be32(sizeof(header)); | |
1240 | ||
1241 | if (flags & BLOCK_FLAG_ENCRYPT) { | |
1242 | header.crypt_method = cpu_to_be32(QCOW_CRYPT_AES); | |
1243 | } else { | |
1244 | header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE); | |
1245 | } | |
1246 | ||
1247 | if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) { | |
1248 | header.compatible_features |= | |
1249 | cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS); | |
1250 | } | |
1251 | ||
1252 | ret = bdrv_pwrite(bs, 0, &header, sizeof(header)); | |
1253 | if (ret < 0) { | |
1254 | goto out; | |
1255 | } | |
1256 | ||
1257 | /* Write an empty refcount table */ | |
1258 | refcount_table = g_malloc0(cluster_size); | |
1259 | ret = bdrv_pwrite(bs, cluster_size, refcount_table, cluster_size); | |
1260 | g_free(refcount_table); | |
1261 | ||
1262 | if (ret < 0) { | |
1263 | goto out; | |
1264 | } | |
1265 | ||
1266 | bdrv_close(bs); | |
1267 | ||
1268 | /* | |
1269 | * And now open the image and make it consistent first (i.e. increase the | |
1270 | * refcount of the cluster that is occupied by the header and the refcount | |
1271 | * table) | |
1272 | */ | |
1273 | BlockDriver* drv = bdrv_find_format("qcow2"); | |
1274 | assert(drv != NULL); | |
1275 | ret = bdrv_open(bs, filename, | |
1276 | BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH, drv); | |
1277 | if (ret < 0) { | |
1278 | goto out; | |
1279 | } | |
1280 | ||
1281 | ret = qcow2_alloc_clusters(bs, 2 * cluster_size); | |
1282 | if (ret < 0) { | |
1283 | goto out; | |
1284 | ||
1285 | } else if (ret != 0) { | |
1286 | error_report("Huh, first cluster in empty image is already in use?"); | |
1287 | abort(); | |
1288 | } | |
1289 | ||
1290 | /* Okay, now that we have a valid image, let's give it the right size */ | |
1291 | ret = bdrv_truncate(bs, total_size * BDRV_SECTOR_SIZE); | |
1292 | if (ret < 0) { | |
1293 | goto out; | |
1294 | } | |
1295 | ||
1296 | /* Want a backing file? There you go.*/ | |
1297 | if (backing_file) { | |
1298 | ret = bdrv_change_backing_file(bs, backing_file, backing_format); | |
1299 | if (ret < 0) { | |
1300 | goto out; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | /* And if we're supposed to preallocate metadata, do that now */ | |
1305 | if (prealloc) { | |
1306 | BDRVQcowState *s = bs->opaque; | |
1307 | qemu_co_mutex_lock(&s->lock); | |
1308 | ret = preallocate(bs); | |
1309 | qemu_co_mutex_unlock(&s->lock); | |
1310 | if (ret < 0) { | |
1311 | goto out; | |
1312 | } | |
1313 | } | |
1314 | ||
1315 | ret = 0; | |
1316 | out: | |
1317 | bdrv_delete(bs); | |
1318 | return ret; | |
1319 | } | |
1320 | ||
1321 | static int qcow2_create(const char *filename, QEMUOptionParameter *options) | |
1322 | { | |
1323 | const char *backing_file = NULL; | |
1324 | const char *backing_fmt = NULL; | |
1325 | uint64_t sectors = 0; | |
1326 | int flags = 0; | |
1327 | size_t cluster_size = DEFAULT_CLUSTER_SIZE; | |
1328 | int prealloc = 0; | |
1329 | int version = 2; | |
1330 | ||
1331 | /* Read out options */ | |
1332 | while (options && options->name) { | |
1333 | if (!strcmp(options->name, BLOCK_OPT_SIZE)) { | |
1334 | sectors = options->value.n / 512; | |
1335 | } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { | |
1336 | backing_file = options->value.s; | |
1337 | } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { | |
1338 | backing_fmt = options->value.s; | |
1339 | } else if (!strcmp(options->name, BLOCK_OPT_ENCRYPT)) { | |
1340 | flags |= options->value.n ? BLOCK_FLAG_ENCRYPT : 0; | |
1341 | } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { | |
1342 | if (options->value.n) { | |
1343 | cluster_size = options->value.n; | |
1344 | } | |
1345 | } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) { | |
1346 | if (!options->value.s || !strcmp(options->value.s, "off")) { | |
1347 | prealloc = 0; | |
1348 | } else if (!strcmp(options->value.s, "metadata")) { | |
1349 | prealloc = 1; | |
1350 | } else { | |
1351 | fprintf(stderr, "Invalid preallocation mode: '%s'\n", | |
1352 | options->value.s); | |
1353 | return -EINVAL; | |
1354 | } | |
1355 | } else if (!strcmp(options->name, BLOCK_OPT_COMPAT_LEVEL)) { | |
1356 | if (!options->value.s || !strcmp(options->value.s, "0.10")) { | |
1357 | version = 2; | |
1358 | } else if (!strcmp(options->value.s, "1.1")) { | |
1359 | version = 3; | |
1360 | } else { | |
1361 | fprintf(stderr, "Invalid compatibility level: '%s'\n", | |
1362 | options->value.s); | |
1363 | return -EINVAL; | |
1364 | } | |
1365 | } else if (!strcmp(options->name, BLOCK_OPT_LAZY_REFCOUNTS)) { | |
1366 | flags |= options->value.n ? BLOCK_FLAG_LAZY_REFCOUNTS : 0; | |
1367 | } | |
1368 | options++; | |
1369 | } | |
1370 | ||
1371 | if (backing_file && prealloc) { | |
1372 | fprintf(stderr, "Backing file and preallocation cannot be used at " | |
1373 | "the same time\n"); | |
1374 | return -EINVAL; | |
1375 | } | |
1376 | ||
1377 | if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) { | |
1378 | fprintf(stderr, "Lazy refcounts only supported with compatibility " | |
1379 | "level 1.1 and above (use compat=1.1 or greater)\n"); | |
1380 | return -EINVAL; | |
1381 | } | |
1382 | ||
1383 | return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags, | |
1384 | cluster_size, prealloc, options, version); | |
1385 | } | |
1386 | ||
1387 | static int qcow2_make_empty(BlockDriverState *bs) | |
1388 | { | |
1389 | #if 0 | |
1390 | /* XXX: not correct */ | |
1391 | BDRVQcowState *s = bs->opaque; | |
1392 | uint32_t l1_length = s->l1_size * sizeof(uint64_t); | |
1393 | int ret; | |
1394 | ||
1395 | memset(s->l1_table, 0, l1_length); | |
1396 | if (bdrv_pwrite(bs->file, s->l1_table_offset, s->l1_table, l1_length) < 0) | |
1397 | return -1; | |
1398 | ret = bdrv_truncate(bs->file, s->l1_table_offset + l1_length); | |
1399 | if (ret < 0) | |
1400 | return ret; | |
1401 | ||
1402 | l2_cache_reset(bs); | |
1403 | #endif | |
1404 | return 0; | |
1405 | } | |
1406 | ||
1407 | static coroutine_fn int qcow2_co_write_zeroes(BlockDriverState *bs, | |
1408 | int64_t sector_num, int nb_sectors) | |
1409 | { | |
1410 | int ret; | |
1411 | BDRVQcowState *s = bs->opaque; | |
1412 | ||
1413 | /* Emulate misaligned zero writes */ | |
1414 | if (sector_num % s->cluster_sectors || nb_sectors % s->cluster_sectors) { | |
1415 | return -ENOTSUP; | |
1416 | } | |
1417 | ||
1418 | /* Whatever is left can use real zero clusters */ | |
1419 | qemu_co_mutex_lock(&s->lock); | |
1420 | ret = qcow2_zero_clusters(bs, sector_num << BDRV_SECTOR_BITS, | |
1421 | nb_sectors); | |
1422 | qemu_co_mutex_unlock(&s->lock); | |
1423 | ||
1424 | return ret; | |
1425 | } | |
1426 | ||
1427 | static coroutine_fn int qcow2_co_discard(BlockDriverState *bs, | |
1428 | int64_t sector_num, int nb_sectors) | |
1429 | { | |
1430 | int ret; | |
1431 | BDRVQcowState *s = bs->opaque; | |
1432 | ||
1433 | qemu_co_mutex_lock(&s->lock); | |
1434 | ret = qcow2_discard_clusters(bs, sector_num << BDRV_SECTOR_BITS, | |
1435 | nb_sectors); | |
1436 | qemu_co_mutex_unlock(&s->lock); | |
1437 | return ret; | |
1438 | } | |
1439 | ||
1440 | static int qcow2_truncate(BlockDriverState *bs, int64_t offset) | |
1441 | { | |
1442 | BDRVQcowState *s = bs->opaque; | |
1443 | int ret, new_l1_size; | |
1444 | ||
1445 | if (offset & 511) { | |
1446 | error_report("The new size must be a multiple of 512"); | |
1447 | return -EINVAL; | |
1448 | } | |
1449 | ||
1450 | /* cannot proceed if image has snapshots */ | |
1451 | if (s->nb_snapshots) { | |
1452 | error_report("Can't resize an image which has snapshots"); | |
1453 | return -ENOTSUP; | |
1454 | } | |
1455 | ||
1456 | /* shrinking is currently not supported */ | |
1457 | if (offset < bs->total_sectors * 512) { | |
1458 | error_report("qcow2 doesn't support shrinking images yet"); | |
1459 | return -ENOTSUP; | |
1460 | } | |
1461 | ||
1462 | new_l1_size = size_to_l1(s, offset); | |
1463 | ret = qcow2_grow_l1_table(bs, new_l1_size, true); | |
1464 | if (ret < 0) { | |
1465 | return ret; | |
1466 | } | |
1467 | ||
1468 | /* write updated header.size */ | |
1469 | offset = cpu_to_be64(offset); | |
1470 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size), | |
1471 | &offset, sizeof(uint64_t)); | |
1472 | if (ret < 0) { | |
1473 | return ret; | |
1474 | } | |
1475 | ||
1476 | s->l1_vm_state_index = new_l1_size; | |
1477 | return 0; | |
1478 | } | |
1479 | ||
1480 | /* XXX: put compressed sectors first, then all the cluster aligned | |
1481 | tables to avoid losing bytes in alignment */ | |
1482 | static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num, | |
1483 | const uint8_t *buf, int nb_sectors) | |
1484 | { | |
1485 | BDRVQcowState *s = bs->opaque; | |
1486 | z_stream strm; | |
1487 | int ret, out_len; | |
1488 | uint8_t *out_buf; | |
1489 | uint64_t cluster_offset; | |
1490 | ||
1491 | if (nb_sectors == 0) { | |
1492 | /* align end of file to a sector boundary to ease reading with | |
1493 | sector based I/Os */ | |
1494 | cluster_offset = bdrv_getlength(bs->file); | |
1495 | cluster_offset = (cluster_offset + 511) & ~511; | |
1496 | bdrv_truncate(bs->file, cluster_offset); | |
1497 | return 0; | |
1498 | } | |
1499 | ||
1500 | if (nb_sectors != s->cluster_sectors) | |
1501 | return -EINVAL; | |
1502 | ||
1503 | out_buf = g_malloc(s->cluster_size + (s->cluster_size / 1000) + 128); | |
1504 | ||
1505 | /* best compression, small window, no zlib header */ | |
1506 | memset(&strm, 0, sizeof(strm)); | |
1507 | ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, | |
1508 | Z_DEFLATED, -12, | |
1509 | 9, Z_DEFAULT_STRATEGY); | |
1510 | if (ret != 0) { | |
1511 | ret = -EINVAL; | |
1512 | goto fail; | |
1513 | } | |
1514 | ||
1515 | strm.avail_in = s->cluster_size; | |
1516 | strm.next_in = (uint8_t *)buf; | |
1517 | strm.avail_out = s->cluster_size; | |
1518 | strm.next_out = out_buf; | |
1519 | ||
1520 | ret = deflate(&strm, Z_FINISH); | |
1521 | if (ret != Z_STREAM_END && ret != Z_OK) { | |
1522 | deflateEnd(&strm); | |
1523 | ret = -EINVAL; | |
1524 | goto fail; | |
1525 | } | |
1526 | out_len = strm.next_out - out_buf; | |
1527 | ||
1528 | deflateEnd(&strm); | |
1529 | ||
1530 | if (ret != Z_STREAM_END || out_len >= s->cluster_size) { | |
1531 | /* could not compress: write normal cluster */ | |
1532 | ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors); | |
1533 | if (ret < 0) { | |
1534 | goto fail; | |
1535 | } | |
1536 | } else { | |
1537 | cluster_offset = qcow2_alloc_compressed_cluster_offset(bs, | |
1538 | sector_num << 9, out_len); | |
1539 | if (!cluster_offset) { | |
1540 | ret = -EIO; | |
1541 | goto fail; | |
1542 | } | |
1543 | cluster_offset &= s->cluster_offset_mask; | |
1544 | BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED); | |
1545 | ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len); | |
1546 | if (ret < 0) { | |
1547 | goto fail; | |
1548 | } | |
1549 | } | |
1550 | ||
1551 | ret = 0; | |
1552 | fail: | |
1553 | g_free(out_buf); | |
1554 | return ret; | |
1555 | } | |
1556 | ||
1557 | static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs) | |
1558 | { | |
1559 | BDRVQcowState *s = bs->opaque; | |
1560 | int ret; | |
1561 | ||
1562 | qemu_co_mutex_lock(&s->lock); | |
1563 | ret = qcow2_cache_flush(bs, s->l2_table_cache); | |
1564 | if (ret < 0) { | |
1565 | qemu_co_mutex_unlock(&s->lock); | |
1566 | return ret; | |
1567 | } | |
1568 | ||
1569 | if (qcow2_need_accurate_refcounts(s)) { | |
1570 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
1571 | if (ret < 0) { | |
1572 | qemu_co_mutex_unlock(&s->lock); | |
1573 | return ret; | |
1574 | } | |
1575 | } | |
1576 | qemu_co_mutex_unlock(&s->lock); | |
1577 | ||
1578 | return 0; | |
1579 | } | |
1580 | ||
1581 | static int64_t qcow2_vm_state_offset(BDRVQcowState *s) | |
1582 | { | |
1583 | return (int64_t)s->l1_vm_state_index << (s->cluster_bits + s->l2_bits); | |
1584 | } | |
1585 | ||
1586 | static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) | |
1587 | { | |
1588 | BDRVQcowState *s = bs->opaque; | |
1589 | bdi->cluster_size = s->cluster_size; | |
1590 | bdi->vm_state_offset = qcow2_vm_state_offset(s); | |
1591 | return 0; | |
1592 | } | |
1593 | ||
1594 | #if 0 | |
1595 | static void dump_refcounts(BlockDriverState *bs) | |
1596 | { | |
1597 | BDRVQcowState *s = bs->opaque; | |
1598 | int64_t nb_clusters, k, k1, size; | |
1599 | int refcount; | |
1600 | ||
1601 | size = bdrv_getlength(bs->file); | |
1602 | nb_clusters = size_to_clusters(s, size); | |
1603 | for(k = 0; k < nb_clusters;) { | |
1604 | k1 = k; | |
1605 | refcount = get_refcount(bs, k); | |
1606 | k++; | |
1607 | while (k < nb_clusters && get_refcount(bs, k) == refcount) | |
1608 | k++; | |
1609 | printf("%" PRId64 ": refcount=%d nb=%" PRId64 "\n", k, refcount, | |
1610 | k - k1); | |
1611 | } | |
1612 | } | |
1613 | #endif | |
1614 | ||
1615 | static int qcow2_save_vmstate(BlockDriverState *bs, const uint8_t *buf, | |
1616 | int64_t pos, int size) | |
1617 | { | |
1618 | BDRVQcowState *s = bs->opaque; | |
1619 | int growable = bs->growable; | |
1620 | int ret; | |
1621 | ||
1622 | BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); | |
1623 | bs->growable = 1; | |
1624 | ret = bdrv_pwrite(bs, qcow2_vm_state_offset(s) + pos, buf, size); | |
1625 | bs->growable = growable; | |
1626 | ||
1627 | return ret; | |
1628 | } | |
1629 | ||
1630 | static int qcow2_load_vmstate(BlockDriverState *bs, uint8_t *buf, | |
1631 | int64_t pos, int size) | |
1632 | { | |
1633 | BDRVQcowState *s = bs->opaque; | |
1634 | int growable = bs->growable; | |
1635 | int ret; | |
1636 | ||
1637 | BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); | |
1638 | bs->growable = 1; | |
1639 | ret = bdrv_pread(bs, qcow2_vm_state_offset(s) + pos, buf, size); | |
1640 | bs->growable = growable; | |
1641 | ||
1642 | return ret; | |
1643 | } | |
1644 | ||
1645 | static QEMUOptionParameter qcow2_create_options[] = { | |
1646 | { | |
1647 | .name = BLOCK_OPT_SIZE, | |
1648 | .type = OPT_SIZE, | |
1649 | .help = "Virtual disk size" | |
1650 | }, | |
1651 | { | |
1652 | .name = BLOCK_OPT_COMPAT_LEVEL, | |
1653 | .type = OPT_STRING, | |
1654 | .help = "Compatibility level (0.10 or 1.1)" | |
1655 | }, | |
1656 | { | |
1657 | .name = BLOCK_OPT_BACKING_FILE, | |
1658 | .type = OPT_STRING, | |
1659 | .help = "File name of a base image" | |
1660 | }, | |
1661 | { | |
1662 | .name = BLOCK_OPT_BACKING_FMT, | |
1663 | .type = OPT_STRING, | |
1664 | .help = "Image format of the base image" | |
1665 | }, | |
1666 | { | |
1667 | .name = BLOCK_OPT_ENCRYPT, | |
1668 | .type = OPT_FLAG, | |
1669 | .help = "Encrypt the image" | |
1670 | }, | |
1671 | { | |
1672 | .name = BLOCK_OPT_CLUSTER_SIZE, | |
1673 | .type = OPT_SIZE, | |
1674 | .help = "qcow2 cluster size", | |
1675 | .value = { .n = DEFAULT_CLUSTER_SIZE }, | |
1676 | }, | |
1677 | { | |
1678 | .name = BLOCK_OPT_PREALLOC, | |
1679 | .type = OPT_STRING, | |
1680 | .help = "Preallocation mode (allowed values: off, metadata)" | |
1681 | }, | |
1682 | { | |
1683 | .name = BLOCK_OPT_LAZY_REFCOUNTS, | |
1684 | .type = OPT_FLAG, | |
1685 | .help = "Postpone refcount updates", | |
1686 | }, | |
1687 | { NULL } | |
1688 | }; | |
1689 | ||
1690 | static BlockDriver bdrv_qcow2 = { | |
1691 | .format_name = "qcow2", | |
1692 | .instance_size = sizeof(BDRVQcowState), | |
1693 | .bdrv_probe = qcow2_probe, | |
1694 | .bdrv_open = qcow2_open, | |
1695 | .bdrv_close = qcow2_close, | |
1696 | .bdrv_reopen_prepare = qcow2_reopen_prepare, | |
1697 | .bdrv_create = qcow2_create, | |
1698 | .bdrv_co_is_allocated = qcow2_co_is_allocated, | |
1699 | .bdrv_set_key = qcow2_set_key, | |
1700 | .bdrv_make_empty = qcow2_make_empty, | |
1701 | ||
1702 | .bdrv_co_readv = qcow2_co_readv, | |
1703 | .bdrv_co_writev = qcow2_co_writev, | |
1704 | .bdrv_co_flush_to_os = qcow2_co_flush_to_os, | |
1705 | ||
1706 | .bdrv_co_write_zeroes = qcow2_co_write_zeroes, | |
1707 | .bdrv_co_discard = qcow2_co_discard, | |
1708 | .bdrv_truncate = qcow2_truncate, | |
1709 | .bdrv_write_compressed = qcow2_write_compressed, | |
1710 | ||
1711 | .bdrv_snapshot_create = qcow2_snapshot_create, | |
1712 | .bdrv_snapshot_goto = qcow2_snapshot_goto, | |
1713 | .bdrv_snapshot_delete = qcow2_snapshot_delete, | |
1714 | .bdrv_snapshot_list = qcow2_snapshot_list, | |
1715 | .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp, | |
1716 | .bdrv_get_info = qcow2_get_info, | |
1717 | ||
1718 | .bdrv_save_vmstate = qcow2_save_vmstate, | |
1719 | .bdrv_load_vmstate = qcow2_load_vmstate, | |
1720 | ||
1721 | .bdrv_change_backing_file = qcow2_change_backing_file, | |
1722 | ||
1723 | .bdrv_invalidate_cache = qcow2_invalidate_cache, | |
1724 | ||
1725 | .create_options = qcow2_create_options, | |
1726 | .bdrv_check = qcow2_check, | |
1727 | }; | |
1728 | ||
1729 | static void bdrv_qcow2_init(void) | |
1730 | { | |
1731 | bdrv_register(&bdrv_qcow2); | |
1732 | } | |
1733 | ||
1734 | block_init(bdrv_qcow2_init); |