]>
Commit | Line | Data |
---|---|---|
45aba42f KW |
1 | /* |
2 | * Block driver for the QCOW version 2 format | |
3 | * | |
4 | * Copyright (c) 2004-2006 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #include <zlib.h> | |
26 | ||
27 | #include "qemu-common.h" | |
28 | #include "block_int.h" | |
29 | #include "block/qcow2.h" | |
3cce16f4 | 30 | #include "trace.h" |
45aba42f | 31 | |
72893756 | 32 | int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size) |
45aba42f KW |
33 | { |
34 | BDRVQcowState *s = bs->opaque; | |
35 | int new_l1_size, new_l1_size2, ret, i; | |
36 | uint64_t *new_l1_table; | |
5d757b56 | 37 | int64_t new_l1_table_offset; |
45aba42f KW |
38 | uint8_t data[12]; |
39 | ||
72893756 | 40 | if (min_size <= s->l1_size) |
45aba42f | 41 | return 0; |
72893756 SH |
42 | |
43 | if (exact_size) { | |
44 | new_l1_size = min_size; | |
45 | } else { | |
46 | /* Bump size up to reduce the number of times we have to grow */ | |
47 | new_l1_size = s->l1_size; | |
48 | if (new_l1_size == 0) { | |
49 | new_l1_size = 1; | |
50 | } | |
51 | while (min_size > new_l1_size) { | |
52 | new_l1_size = (new_l1_size * 3 + 1) / 2; | |
53 | } | |
45aba42f | 54 | } |
72893756 | 55 | |
45aba42f | 56 | #ifdef DEBUG_ALLOC2 |
35ee5e39 | 57 | fprintf(stderr, "grow l1_table from %d to %d\n", s->l1_size, new_l1_size); |
45aba42f KW |
58 | #endif |
59 | ||
60 | new_l1_size2 = sizeof(uint64_t) * new_l1_size; | |
7267c094 | 61 | new_l1_table = g_malloc0(align_offset(new_l1_size2, 512)); |
45aba42f KW |
62 | memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t)); |
63 | ||
64 | /* write new table (align to cluster) */ | |
66f82cee | 65 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE); |
ed6ccf0f | 66 | new_l1_table_offset = qcow2_alloc_clusters(bs, new_l1_size2); |
5d757b56 | 67 | if (new_l1_table_offset < 0) { |
7267c094 | 68 | g_free(new_l1_table); |
5d757b56 KW |
69 | return new_l1_table_offset; |
70 | } | |
29c1a730 KW |
71 | |
72 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
73 | if (ret < 0) { | |
80fa3341 | 74 | goto fail; |
29c1a730 | 75 | } |
45aba42f | 76 | |
66f82cee | 77 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE); |
45aba42f KW |
78 | for(i = 0; i < s->l1_size; i++) |
79 | new_l1_table[i] = cpu_to_be64(new_l1_table[i]); | |
8b3b7206 KW |
80 | ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset, new_l1_table, new_l1_size2); |
81 | if (ret < 0) | |
45aba42f KW |
82 | goto fail; |
83 | for(i = 0; i < s->l1_size; i++) | |
84 | new_l1_table[i] = be64_to_cpu(new_l1_table[i]); | |
85 | ||
86 | /* set new table */ | |
66f82cee | 87 | BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE); |
45aba42f | 88 | cpu_to_be32w((uint32_t*)data, new_l1_size); |
653df36b | 89 | cpu_to_be64wu((uint64_t*)(data + 4), new_l1_table_offset); |
8b3b7206 KW |
90 | ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size), data,sizeof(data)); |
91 | if (ret < 0) { | |
45aba42f | 92 | goto fail; |
fb8fa77c | 93 | } |
7267c094 | 94 | g_free(s->l1_table); |
ed6ccf0f | 95 | qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t)); |
45aba42f KW |
96 | s->l1_table_offset = new_l1_table_offset; |
97 | s->l1_table = new_l1_table; | |
98 | s->l1_size = new_l1_size; | |
99 | return 0; | |
100 | fail: | |
7267c094 | 101 | g_free(new_l1_table); |
fb8fa77c | 102 | qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2); |
8b3b7206 | 103 | return ret; |
45aba42f KW |
104 | } |
105 | ||
45aba42f KW |
106 | /* |
107 | * l2_load | |
108 | * | |
109 | * Loads a L2 table into memory. If the table is in the cache, the cache | |
110 | * is used; otherwise the L2 table is loaded from the image file. | |
111 | * | |
112 | * Returns a pointer to the L2 table on success, or NULL if the read from | |
113 | * the image file failed. | |
114 | */ | |
115 | ||
55c17e98 KW |
116 | static int l2_load(BlockDriverState *bs, uint64_t l2_offset, |
117 | uint64_t **l2_table) | |
45aba42f KW |
118 | { |
119 | BDRVQcowState *s = bs->opaque; | |
55c17e98 | 120 | int ret; |
45aba42f | 121 | |
29c1a730 | 122 | ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset, (void**) l2_table); |
45aba42f | 123 | |
29c1a730 | 124 | return ret; |
45aba42f KW |
125 | } |
126 | ||
6583e3c7 KW |
127 | /* |
128 | * Writes one sector of the L1 table to the disk (can't update single entries | |
129 | * and we really don't want bdrv_pread to perform a read-modify-write) | |
130 | */ | |
131 | #define L1_ENTRIES_PER_SECTOR (512 / 8) | |
66f82cee | 132 | static int write_l1_entry(BlockDriverState *bs, int l1_index) |
6583e3c7 | 133 | { |
66f82cee | 134 | BDRVQcowState *s = bs->opaque; |
6583e3c7 KW |
135 | uint64_t buf[L1_ENTRIES_PER_SECTOR]; |
136 | int l1_start_index; | |
f7defcb6 | 137 | int i, ret; |
6583e3c7 KW |
138 | |
139 | l1_start_index = l1_index & ~(L1_ENTRIES_PER_SECTOR - 1); | |
140 | for (i = 0; i < L1_ENTRIES_PER_SECTOR; i++) { | |
141 | buf[i] = cpu_to_be64(s->l1_table[l1_start_index + i]); | |
142 | } | |
143 | ||
66f82cee | 144 | BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE); |
8b3b7206 | 145 | ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset + 8 * l1_start_index, |
f7defcb6 KW |
146 | buf, sizeof(buf)); |
147 | if (ret < 0) { | |
148 | return ret; | |
6583e3c7 KW |
149 | } |
150 | ||
151 | return 0; | |
152 | } | |
153 | ||
45aba42f KW |
154 | /* |
155 | * l2_allocate | |
156 | * | |
157 | * Allocate a new l2 entry in the file. If l1_index points to an already | |
158 | * used entry in the L2 table (i.e. we are doing a copy on write for the L2 | |
159 | * table) copy the contents of the old L2 table into the newly allocated one. | |
160 | * Otherwise the new table is initialized with zeros. | |
161 | * | |
162 | */ | |
163 | ||
c46e1167 | 164 | static int l2_allocate(BlockDriverState *bs, int l1_index, uint64_t **table) |
45aba42f KW |
165 | { |
166 | BDRVQcowState *s = bs->opaque; | |
6583e3c7 | 167 | uint64_t old_l2_offset; |
f4f0d391 KW |
168 | uint64_t *l2_table; |
169 | int64_t l2_offset; | |
c46e1167 | 170 | int ret; |
45aba42f KW |
171 | |
172 | old_l2_offset = s->l1_table[l1_index]; | |
173 | ||
3cce16f4 KW |
174 | trace_qcow2_l2_allocate(bs, l1_index); |
175 | ||
45aba42f KW |
176 | /* allocate a new l2 entry */ |
177 | ||
ed6ccf0f | 178 | l2_offset = qcow2_alloc_clusters(bs, s->l2_size * sizeof(uint64_t)); |
5d757b56 | 179 | if (l2_offset < 0) { |
c46e1167 | 180 | return l2_offset; |
5d757b56 | 181 | } |
29c1a730 KW |
182 | |
183 | ret = qcow2_cache_flush(bs, s->refcount_block_cache); | |
184 | if (ret < 0) { | |
185 | goto fail; | |
186 | } | |
45aba42f | 187 | |
45aba42f KW |
188 | /* allocate a new entry in the l2 cache */ |
189 | ||
3cce16f4 | 190 | trace_qcow2_l2_allocate_get_empty(bs, l1_index); |
29c1a730 KW |
191 | ret = qcow2_cache_get_empty(bs, s->l2_table_cache, l2_offset, (void**) table); |
192 | if (ret < 0) { | |
193 | return ret; | |
194 | } | |
195 | ||
196 | l2_table = *table; | |
45aba42f KW |
197 | |
198 | if (old_l2_offset == 0) { | |
199 | /* if there was no old l2 table, clear the new table */ | |
200 | memset(l2_table, 0, s->l2_size * sizeof(uint64_t)); | |
201 | } else { | |
29c1a730 KW |
202 | uint64_t* old_table; |
203 | ||
45aba42f | 204 | /* if there was an old l2 table, read it from the disk */ |
66f82cee | 205 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_COW_READ); |
29c1a730 KW |
206 | ret = qcow2_cache_get(bs, s->l2_table_cache, old_l2_offset, |
207 | (void**) &old_table); | |
208 | if (ret < 0) { | |
209 | goto fail; | |
210 | } | |
211 | ||
212 | memcpy(l2_table, old_table, s->cluster_size); | |
213 | ||
214 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &old_table); | |
c46e1167 | 215 | if (ret < 0) { |
175e1152 | 216 | goto fail; |
c46e1167 | 217 | } |
45aba42f | 218 | } |
29c1a730 | 219 | |
45aba42f | 220 | /* write the l2 table to the file */ |
66f82cee | 221 | BLKDBG_EVENT(bs->file, BLKDBG_L2_ALLOC_WRITE); |
29c1a730 | 222 | |
3cce16f4 | 223 | trace_qcow2_l2_allocate_write_l2(bs, l1_index); |
29c1a730 KW |
224 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
225 | ret = qcow2_cache_flush(bs, s->l2_table_cache); | |
c46e1167 | 226 | if (ret < 0) { |
175e1152 KW |
227 | goto fail; |
228 | } | |
229 | ||
230 | /* update the L1 entry */ | |
3cce16f4 | 231 | trace_qcow2_l2_allocate_write_l1(bs, l1_index); |
175e1152 KW |
232 | s->l1_table[l1_index] = l2_offset | QCOW_OFLAG_COPIED; |
233 | ret = write_l1_entry(bs, l1_index); | |
234 | if (ret < 0) { | |
235 | goto fail; | |
c46e1167 | 236 | } |
45aba42f | 237 | |
c46e1167 | 238 | *table = l2_table; |
3cce16f4 | 239 | trace_qcow2_l2_allocate_done(bs, l1_index, 0); |
c46e1167 | 240 | return 0; |
175e1152 KW |
241 | |
242 | fail: | |
3cce16f4 | 243 | trace_qcow2_l2_allocate_done(bs, l1_index, ret); |
29c1a730 | 244 | qcow2_cache_put(bs, s->l2_table_cache, (void**) table); |
68dba0bf | 245 | s->l1_table[l1_index] = old_l2_offset; |
175e1152 | 246 | return ret; |
45aba42f KW |
247 | } |
248 | ||
2bfcc4a0 KW |
249 | /* |
250 | * Checks how many clusters in a given L2 table are contiguous in the image | |
251 | * file. As soon as one of the flags in the bitmask stop_flags changes compared | |
252 | * to the first cluster, the search is stopped and the cluster is not counted | |
253 | * as contiguous. (This allows it, for example, to stop at the first compressed | |
254 | * cluster which may require a different handling) | |
255 | */ | |
45aba42f | 256 | static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size, |
2bfcc4a0 | 257 | uint64_t *l2_table, uint64_t start, uint64_t stop_flags) |
45aba42f KW |
258 | { |
259 | int i; | |
2bfcc4a0 KW |
260 | uint64_t mask = stop_flags | L2E_OFFSET_MASK; |
261 | uint64_t offset = be64_to_cpu(l2_table[0]) & mask; | |
45aba42f KW |
262 | |
263 | if (!offset) | |
264 | return 0; | |
265 | ||
2bfcc4a0 KW |
266 | for (i = start; i < start + nb_clusters; i++) { |
267 | uint64_t l2_entry = be64_to_cpu(l2_table[i]) & mask; | |
268 | if (offset + (uint64_t) i * cluster_size != l2_entry) { | |
45aba42f | 269 | break; |
2bfcc4a0 KW |
270 | } |
271 | } | |
45aba42f KW |
272 | |
273 | return (i - start); | |
274 | } | |
275 | ||
276 | static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table) | |
277 | { | |
2bfcc4a0 KW |
278 | int i; |
279 | ||
280 | for (i = 0; i < nb_clusters; i++) { | |
281 | int type = qcow2_get_cluster_type(be64_to_cpu(l2_table[i])); | |
45aba42f | 282 | |
2bfcc4a0 KW |
283 | if (type != QCOW2_CLUSTER_UNALLOCATED) { |
284 | break; | |
285 | } | |
286 | } | |
45aba42f KW |
287 | |
288 | return i; | |
289 | } | |
290 | ||
291 | /* The crypt function is compatible with the linux cryptoloop | |
292 | algorithm for < 4 GB images. NOTE: out_buf == in_buf is | |
293 | supported */ | |
ed6ccf0f KW |
294 | void qcow2_encrypt_sectors(BDRVQcowState *s, int64_t sector_num, |
295 | uint8_t *out_buf, const uint8_t *in_buf, | |
296 | int nb_sectors, int enc, | |
297 | const AES_KEY *key) | |
45aba42f KW |
298 | { |
299 | union { | |
300 | uint64_t ll[2]; | |
301 | uint8_t b[16]; | |
302 | } ivec; | |
303 | int i; | |
304 | ||
305 | for(i = 0; i < nb_sectors; i++) { | |
306 | ivec.ll[0] = cpu_to_le64(sector_num); | |
307 | ivec.ll[1] = 0; | |
308 | AES_cbc_encrypt(in_buf, out_buf, 512, key, | |
309 | ivec.b, enc); | |
310 | sector_num++; | |
311 | in_buf += 512; | |
312 | out_buf += 512; | |
313 | } | |
314 | } | |
315 | ||
aef4acb6 SH |
316 | static int coroutine_fn copy_sectors(BlockDriverState *bs, |
317 | uint64_t start_sect, | |
318 | uint64_t cluster_offset, | |
319 | int n_start, int n_end) | |
45aba42f KW |
320 | { |
321 | BDRVQcowState *s = bs->opaque; | |
aef4acb6 SH |
322 | QEMUIOVector qiov; |
323 | struct iovec iov; | |
45aba42f | 324 | int n, ret; |
1b9f1491 KW |
325 | |
326 | /* | |
327 | * If this is the last cluster and it is only partially used, we must only | |
328 | * copy until the end of the image, or bdrv_check_request will fail for the | |
329 | * bdrv_read/write calls below. | |
330 | */ | |
331 | if (start_sect + n_end > bs->total_sectors) { | |
332 | n_end = bs->total_sectors - start_sect; | |
333 | } | |
45aba42f KW |
334 | |
335 | n = n_end - n_start; | |
1b9f1491 | 336 | if (n <= 0) { |
45aba42f | 337 | return 0; |
1b9f1491 KW |
338 | } |
339 | ||
aef4acb6 SH |
340 | iov.iov_len = n * BDRV_SECTOR_SIZE; |
341 | iov.iov_base = qemu_blockalign(bs, iov.iov_len); | |
342 | ||
343 | qemu_iovec_init_external(&qiov, &iov, 1); | |
1b9f1491 | 344 | |
66f82cee | 345 | BLKDBG_EVENT(bs->file, BLKDBG_COW_READ); |
aef4acb6 SH |
346 | |
347 | /* Call .bdrv_co_readv() directly instead of using the public block-layer | |
348 | * interface. This avoids double I/O throttling and request tracking, | |
349 | * which can lead to deadlock when block layer copy-on-read is enabled. | |
350 | */ | |
351 | ret = bs->drv->bdrv_co_readv(bs, start_sect + n_start, n, &qiov); | |
1b9f1491 KW |
352 | if (ret < 0) { |
353 | goto out; | |
354 | } | |
355 | ||
45aba42f | 356 | if (s->crypt_method) { |
ed6ccf0f | 357 | qcow2_encrypt_sectors(s, start_sect + n_start, |
aef4acb6 | 358 | iov.iov_base, iov.iov_base, n, 1, |
45aba42f KW |
359 | &s->aes_encrypt_key); |
360 | } | |
1b9f1491 | 361 | |
66f82cee | 362 | BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE); |
aef4acb6 | 363 | ret = bdrv_co_writev(bs->file, (cluster_offset >> 9) + n_start, n, &qiov); |
1b9f1491 KW |
364 | if (ret < 0) { |
365 | goto out; | |
366 | } | |
367 | ||
368 | ret = 0; | |
369 | out: | |
aef4acb6 | 370 | qemu_vfree(iov.iov_base); |
1b9f1491 | 371 | return ret; |
45aba42f KW |
372 | } |
373 | ||
374 | ||
375 | /* | |
376 | * get_cluster_offset | |
377 | * | |
1c46efaa KW |
378 | * For a given offset of the disk image, find the cluster offset in |
379 | * qcow2 file. The offset is stored in *cluster_offset. | |
45aba42f | 380 | * |
d57237f2 | 381 | * on entry, *num is the number of contiguous sectors we'd like to |
45aba42f KW |
382 | * access following offset. |
383 | * | |
d57237f2 | 384 | * on exit, *num is the number of contiguous sectors we can read. |
45aba42f | 385 | * |
68d000a3 KW |
386 | * Returns the cluster type (QCOW2_CLUSTER_*) on success, -errno in error |
387 | * cases. | |
45aba42f | 388 | */ |
1c46efaa KW |
389 | int qcow2_get_cluster_offset(BlockDriverState *bs, uint64_t offset, |
390 | int *num, uint64_t *cluster_offset) | |
45aba42f KW |
391 | { |
392 | BDRVQcowState *s = bs->opaque; | |
80ee15a6 | 393 | unsigned int l1_index, l2_index; |
1c46efaa | 394 | uint64_t l2_offset, *l2_table; |
45aba42f | 395 | int l1_bits, c; |
80ee15a6 KW |
396 | unsigned int index_in_cluster, nb_clusters; |
397 | uint64_t nb_available, nb_needed; | |
55c17e98 | 398 | int ret; |
45aba42f KW |
399 | |
400 | index_in_cluster = (offset >> 9) & (s->cluster_sectors - 1); | |
401 | nb_needed = *num + index_in_cluster; | |
402 | ||
403 | l1_bits = s->l2_bits + s->cluster_bits; | |
404 | ||
405 | /* compute how many bytes there are between the offset and | |
406 | * the end of the l1 entry | |
407 | */ | |
408 | ||
80ee15a6 | 409 | nb_available = (1ULL << l1_bits) - (offset & ((1ULL << l1_bits) - 1)); |
45aba42f KW |
410 | |
411 | /* compute the number of available sectors */ | |
412 | ||
413 | nb_available = (nb_available >> 9) + index_in_cluster; | |
414 | ||
415 | if (nb_needed > nb_available) { | |
416 | nb_needed = nb_available; | |
417 | } | |
418 | ||
1c46efaa | 419 | *cluster_offset = 0; |
45aba42f KW |
420 | |
421 | /* seek the the l2 offset in the l1 table */ | |
422 | ||
423 | l1_index = offset >> l1_bits; | |
68d000a3 KW |
424 | if (l1_index >= s->l1_size) { |
425 | ret = QCOW2_CLUSTER_UNALLOCATED; | |
45aba42f | 426 | goto out; |
68d000a3 | 427 | } |
45aba42f | 428 | |
68d000a3 KW |
429 | l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK; |
430 | if (!l2_offset) { | |
431 | ret = QCOW2_CLUSTER_UNALLOCATED; | |
45aba42f | 432 | goto out; |
68d000a3 | 433 | } |
45aba42f KW |
434 | |
435 | /* load the l2 table in memory */ | |
436 | ||
55c17e98 KW |
437 | ret = l2_load(bs, l2_offset, &l2_table); |
438 | if (ret < 0) { | |
439 | return ret; | |
1c46efaa | 440 | } |
45aba42f KW |
441 | |
442 | /* find the cluster offset for the given disk offset */ | |
443 | ||
444 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | |
1c46efaa | 445 | *cluster_offset = be64_to_cpu(l2_table[l2_index]); |
45aba42f KW |
446 | nb_clusters = size_to_clusters(s, nb_needed << 9); |
447 | ||
68d000a3 KW |
448 | ret = qcow2_get_cluster_type(*cluster_offset); |
449 | switch (ret) { | |
450 | case QCOW2_CLUSTER_COMPRESSED: | |
451 | /* Compressed clusters can only be processed one by one */ | |
452 | c = 1; | |
453 | *cluster_offset &= L2E_COMPRESSED_OFFSET_SIZE_MASK; | |
454 | break; | |
455 | case QCOW2_CLUSTER_UNALLOCATED: | |
45aba42f KW |
456 | /* how many empty clusters ? */ |
457 | c = count_contiguous_free_clusters(nb_clusters, &l2_table[l2_index]); | |
68d000a3 KW |
458 | *cluster_offset = 0; |
459 | break; | |
460 | case QCOW2_CLUSTER_NORMAL: | |
45aba42f KW |
461 | /* how many allocated clusters ? */ |
462 | c = count_contiguous_clusters(nb_clusters, s->cluster_size, | |
2bfcc4a0 | 463 | &l2_table[l2_index], 0, QCOW_OFLAG_COMPRESSED); |
68d000a3 KW |
464 | *cluster_offset &= L2E_OFFSET_MASK; |
465 | break; | |
45aba42f KW |
466 | } |
467 | ||
29c1a730 KW |
468 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
469 | ||
68d000a3 KW |
470 | nb_available = (c * s->cluster_sectors); |
471 | ||
45aba42f KW |
472 | out: |
473 | if (nb_available > nb_needed) | |
474 | nb_available = nb_needed; | |
475 | ||
476 | *num = nb_available - index_in_cluster; | |
477 | ||
68d000a3 | 478 | return ret; |
45aba42f KW |
479 | } |
480 | ||
481 | /* | |
482 | * get_cluster_table | |
483 | * | |
484 | * for a given disk offset, load (and allocate if needed) | |
485 | * the l2 table. | |
486 | * | |
487 | * the l2 table offset in the qcow2 file and the cluster index | |
488 | * in the l2 table are given to the caller. | |
489 | * | |
1e3e8f1a | 490 | * Returns 0 on success, -errno in failure case |
45aba42f | 491 | */ |
45aba42f KW |
492 | static int get_cluster_table(BlockDriverState *bs, uint64_t offset, |
493 | uint64_t **new_l2_table, | |
45aba42f KW |
494 | int *new_l2_index) |
495 | { | |
496 | BDRVQcowState *s = bs->opaque; | |
80ee15a6 | 497 | unsigned int l1_index, l2_index; |
c46e1167 KW |
498 | uint64_t l2_offset; |
499 | uint64_t *l2_table = NULL; | |
80ee15a6 | 500 | int ret; |
45aba42f KW |
501 | |
502 | /* seek the the l2 offset in the l1 table */ | |
503 | ||
504 | l1_index = offset >> (s->l2_bits + s->cluster_bits); | |
505 | if (l1_index >= s->l1_size) { | |
72893756 | 506 | ret = qcow2_grow_l1_table(bs, l1_index + 1, false); |
1e3e8f1a KW |
507 | if (ret < 0) { |
508 | return ret; | |
509 | } | |
45aba42f KW |
510 | } |
511 | l2_offset = s->l1_table[l1_index]; | |
512 | ||
513 | /* seek the l2 table of the given l2 offset */ | |
514 | ||
515 | if (l2_offset & QCOW_OFLAG_COPIED) { | |
516 | /* load the l2 table in memory */ | |
517 | l2_offset &= ~QCOW_OFLAG_COPIED; | |
55c17e98 KW |
518 | ret = l2_load(bs, l2_offset, &l2_table); |
519 | if (ret < 0) { | |
520 | return ret; | |
1e3e8f1a | 521 | } |
45aba42f | 522 | } else { |
16fde5f2 | 523 | /* First allocate a new L2 table (and do COW if needed) */ |
c46e1167 KW |
524 | ret = l2_allocate(bs, l1_index, &l2_table); |
525 | if (ret < 0) { | |
526 | return ret; | |
1e3e8f1a | 527 | } |
16fde5f2 KW |
528 | |
529 | /* Then decrease the refcount of the old table */ | |
530 | if (l2_offset) { | |
531 | qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t)); | |
532 | } | |
45aba42f KW |
533 | l2_offset = s->l1_table[l1_index] & ~QCOW_OFLAG_COPIED; |
534 | } | |
535 | ||
536 | /* find the cluster offset for the given disk offset */ | |
537 | ||
538 | l2_index = (offset >> s->cluster_bits) & (s->l2_size - 1); | |
539 | ||
540 | *new_l2_table = l2_table; | |
45aba42f KW |
541 | *new_l2_index = l2_index; |
542 | ||
1e3e8f1a | 543 | return 0; |
45aba42f KW |
544 | } |
545 | ||
546 | /* | |
547 | * alloc_compressed_cluster_offset | |
548 | * | |
549 | * For a given offset of the disk image, return cluster offset in | |
550 | * qcow2 file. | |
551 | * | |
552 | * If the offset is not found, allocate a new compressed cluster. | |
553 | * | |
554 | * Return the cluster offset if successful, | |
555 | * Return 0, otherwise. | |
556 | * | |
557 | */ | |
558 | ||
ed6ccf0f KW |
559 | uint64_t qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, |
560 | uint64_t offset, | |
561 | int compressed_size) | |
45aba42f KW |
562 | { |
563 | BDRVQcowState *s = bs->opaque; | |
564 | int l2_index, ret; | |
3948d1d4 | 565 | uint64_t *l2_table; |
f4f0d391 | 566 | int64_t cluster_offset; |
45aba42f KW |
567 | int nb_csectors; |
568 | ||
3948d1d4 | 569 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1e3e8f1a | 570 | if (ret < 0) { |
45aba42f | 571 | return 0; |
1e3e8f1a | 572 | } |
45aba42f KW |
573 | |
574 | cluster_offset = be64_to_cpu(l2_table[l2_index]); | |
8f1efd00 KW |
575 | if (cluster_offset & QCOW_OFLAG_COPIED) { |
576 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
577 | return 0; | |
578 | } | |
45aba42f KW |
579 | |
580 | if (cluster_offset) | |
ed6ccf0f | 581 | qcow2_free_any_clusters(bs, cluster_offset, 1); |
45aba42f | 582 | |
ed6ccf0f | 583 | cluster_offset = qcow2_alloc_bytes(bs, compressed_size); |
5d757b56 | 584 | if (cluster_offset < 0) { |
29c1a730 | 585 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
5d757b56 KW |
586 | return 0; |
587 | } | |
588 | ||
45aba42f KW |
589 | nb_csectors = ((cluster_offset + compressed_size - 1) >> 9) - |
590 | (cluster_offset >> 9); | |
591 | ||
592 | cluster_offset |= QCOW_OFLAG_COMPRESSED | | |
593 | ((uint64_t)nb_csectors << s->csize_shift); | |
594 | ||
595 | /* update L2 table */ | |
596 | ||
597 | /* compressed clusters never have the copied flag */ | |
598 | ||
66f82cee | 599 | BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE_COMPRESSED); |
29c1a730 | 600 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
45aba42f | 601 | l2_table[l2_index] = cpu_to_be64(cluster_offset); |
29c1a730 | 602 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
79a31189 | 603 | if (ret < 0) { |
29c1a730 | 604 | return 0; |
4c1612d9 KW |
605 | } |
606 | ||
29c1a730 | 607 | return cluster_offset; |
4c1612d9 KW |
608 | } |
609 | ||
148da7ea | 610 | int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m) |
45aba42f KW |
611 | { |
612 | BDRVQcowState *s = bs->opaque; | |
613 | int i, j = 0, l2_index, ret; | |
3948d1d4 | 614 | uint64_t *old_cluster, start_sect, *l2_table; |
250196f1 | 615 | uint64_t cluster_offset = m->alloc_offset; |
29c1a730 | 616 | bool cow = false; |
45aba42f | 617 | |
3cce16f4 KW |
618 | trace_qcow2_cluster_link_l2(qemu_coroutine_self(), m->nb_clusters); |
619 | ||
45aba42f KW |
620 | if (m->nb_clusters == 0) |
621 | return 0; | |
622 | ||
7267c094 | 623 | old_cluster = g_malloc(m->nb_clusters * sizeof(uint64_t)); |
45aba42f KW |
624 | |
625 | /* copy content of unmodified sectors */ | |
626 | start_sect = (m->offset & ~(s->cluster_size - 1)) >> 9; | |
627 | if (m->n_start) { | |
29c1a730 | 628 | cow = true; |
1b9f1491 | 629 | qemu_co_mutex_unlock(&s->lock); |
45aba42f | 630 | ret = copy_sectors(bs, start_sect, cluster_offset, 0, m->n_start); |
1b9f1491 | 631 | qemu_co_mutex_lock(&s->lock); |
45aba42f KW |
632 | if (ret < 0) |
633 | goto err; | |
634 | } | |
635 | ||
636 | if (m->nb_available & (s->cluster_sectors - 1)) { | |
637 | uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1); | |
29c1a730 | 638 | cow = true; |
1b9f1491 | 639 | qemu_co_mutex_unlock(&s->lock); |
45aba42f KW |
640 | ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9), |
641 | m->nb_available - end, s->cluster_sectors); | |
1b9f1491 | 642 | qemu_co_mutex_lock(&s->lock); |
45aba42f KW |
643 | if (ret < 0) |
644 | goto err; | |
645 | } | |
646 | ||
29c1a730 KW |
647 | /* |
648 | * Update L2 table. | |
649 | * | |
650 | * Before we update the L2 table to actually point to the new cluster, we | |
651 | * need to be sure that the refcounts have been increased and COW was | |
652 | * handled. | |
653 | */ | |
654 | if (cow) { | |
3de0a294 | 655 | qcow2_cache_depends_on_flush(s->l2_table_cache); |
29c1a730 KW |
656 | } |
657 | ||
658 | qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache); | |
3948d1d4 | 659 | ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index); |
1e3e8f1a | 660 | if (ret < 0) { |
45aba42f | 661 | goto err; |
1e3e8f1a | 662 | } |
29c1a730 | 663 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); |
45aba42f KW |
664 | |
665 | for (i = 0; i < m->nb_clusters; i++) { | |
666 | /* if two concurrent writes happen to the same unallocated cluster | |
667 | * each write allocates separate cluster and writes data concurrently. | |
668 | * The first one to complete updates l2 table with pointer to its | |
669 | * cluster the second one has to do RMW (which is done above by | |
670 | * copy_sectors()), update l2 table with its cluster pointer and free | |
671 | * old cluster. This is what this loop does */ | |
672 | if(l2_table[l2_index + i] != 0) | |
673 | old_cluster[j++] = l2_table[l2_index + i]; | |
674 | ||
675 | l2_table[l2_index + i] = cpu_to_be64((cluster_offset + | |
676 | (i << s->cluster_bits)) | QCOW_OFLAG_COPIED); | |
677 | } | |
678 | ||
9f8e668e | 679 | |
29c1a730 | 680 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
c835d00f | 681 | if (ret < 0) { |
45aba42f | 682 | goto err; |
4c1612d9 | 683 | } |
45aba42f | 684 | |
7ec5e6a4 KW |
685 | /* |
686 | * If this was a COW, we need to decrease the refcount of the old cluster. | |
687 | * Also flush bs->file to get the right order for L2 and refcount update. | |
688 | */ | |
689 | if (j != 0) { | |
7ec5e6a4 KW |
690 | for (i = 0; i < j; i++) { |
691 | qcow2_free_any_clusters(bs, | |
692 | be64_to_cpu(old_cluster[i]) & ~QCOW_OFLAG_COPIED, 1); | |
693 | } | |
694 | } | |
45aba42f KW |
695 | |
696 | ret = 0; | |
697 | err: | |
7267c094 | 698 | g_free(old_cluster); |
45aba42f KW |
699 | return ret; |
700 | } | |
701 | ||
bf319ece KW |
702 | /* |
703 | * Returns the number of contiguous clusters that can be used for an allocating | |
704 | * write, but require COW to be performed (this includes yet unallocated space, | |
705 | * which must copy from the backing file) | |
706 | */ | |
707 | static int count_cow_clusters(BDRVQcowState *s, int nb_clusters, | |
708 | uint64_t *l2_table, int l2_index) | |
709 | { | |
710 | int i = 0; | |
711 | uint64_t cluster_offset; | |
712 | ||
713 | while (i < nb_clusters) { | |
714 | i += count_contiguous_clusters(nb_clusters - i, s->cluster_size, | |
2bfcc4a0 KW |
715 | &l2_table[l2_index], i, |
716 | QCOW_OFLAG_COPIED | QCOW_OFLAG_COMPRESSED); | |
bf319ece KW |
717 | if ((i >= nb_clusters) || be64_to_cpu(l2_table[l2_index + i])) { |
718 | break; | |
719 | } | |
720 | ||
721 | i += count_contiguous_free_clusters(nb_clusters - i, | |
722 | &l2_table[l2_index + i]); | |
723 | if (i >= nb_clusters) { | |
724 | break; | |
725 | } | |
726 | ||
727 | cluster_offset = be64_to_cpu(l2_table[l2_index + i]); | |
728 | ||
729 | if ((cluster_offset & QCOW_OFLAG_COPIED) || | |
730 | (cluster_offset & QCOW_OFLAG_COMPRESSED)) | |
731 | break; | |
732 | } | |
733 | ||
734 | assert(i <= nb_clusters); | |
735 | return i; | |
736 | } | |
737 | ||
250196f1 KW |
738 | /* |
739 | * Allocates new clusters for the given guest_offset. | |
740 | * | |
741 | * At most *nb_clusters are allocated, and on return *nb_clusters is updated to | |
742 | * contain the number of clusters that have been allocated and are contiguous | |
743 | * in the image file. | |
744 | * | |
745 | * If *host_offset is non-zero, it specifies the offset in the image file at | |
746 | * which the new clusters must start. *nb_clusters can be 0 on return in this | |
747 | * case if the cluster at host_offset is already in use. If *host_offset is | |
748 | * zero, the clusters can be allocated anywhere in the image file. | |
749 | * | |
750 | * *host_offset is updated to contain the offset into the image file at which | |
751 | * the first allocated cluster starts. | |
752 | * | |
753 | * Return 0 on success and -errno in error cases. -EAGAIN means that the | |
754 | * function has been waiting for another request and the allocation must be | |
755 | * restarted, but the whole request should not be failed. | |
756 | */ | |
757 | static int do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset, | |
758 | uint64_t *host_offset, unsigned int *nb_clusters, uint64_t *l2_table) | |
759 | { | |
760 | BDRVQcowState *s = bs->opaque; | |
761 | int64_t cluster_offset; | |
762 | QCowL2Meta *old_alloc; | |
763 | ||
764 | trace_qcow2_do_alloc_clusters_offset(qemu_coroutine_self(), guest_offset, | |
765 | *host_offset, *nb_clusters); | |
766 | ||
767 | /* | |
768 | * Check if there already is an AIO write request in flight which allocates | |
769 | * the same cluster. In this case we need to wait until the previous | |
770 | * request has completed and updated the L2 table accordingly. | |
771 | */ | |
772 | QLIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) { | |
773 | ||
774 | uint64_t start = guest_offset >> s->cluster_bits; | |
775 | uint64_t end = start + *nb_clusters; | |
776 | uint64_t old_start = old_alloc->offset >> s->cluster_bits; | |
777 | uint64_t old_end = old_start + old_alloc->nb_clusters; | |
778 | ||
779 | if (end < old_start || start > old_end) { | |
780 | /* No intersection */ | |
781 | } else { | |
782 | if (start < old_start) { | |
783 | /* Stop at the start of a running allocation */ | |
784 | *nb_clusters = old_start - start; | |
785 | } else { | |
786 | *nb_clusters = 0; | |
787 | } | |
788 | ||
789 | if (*nb_clusters == 0) { | |
790 | /* Wait for the dependency to complete. We need to recheck | |
791 | * the free/allocated clusters when we continue. */ | |
792 | qemu_co_mutex_unlock(&s->lock); | |
793 | qemu_co_queue_wait(&old_alloc->dependent_requests); | |
794 | qemu_co_mutex_lock(&s->lock); | |
795 | return -EAGAIN; | |
796 | } | |
797 | } | |
798 | } | |
799 | ||
800 | if (!*nb_clusters) { | |
801 | abort(); | |
802 | } | |
803 | ||
804 | /* Allocate new clusters */ | |
805 | trace_qcow2_cluster_alloc_phys(qemu_coroutine_self()); | |
806 | if (*host_offset == 0) { | |
807 | cluster_offset = qcow2_alloc_clusters(bs, *nb_clusters * s->cluster_size); | |
808 | } else { | |
809 | cluster_offset = *host_offset; | |
810 | *nb_clusters = qcow2_alloc_clusters_at(bs, cluster_offset, *nb_clusters); | |
811 | } | |
812 | ||
813 | if (cluster_offset < 0) { | |
814 | return cluster_offset; | |
815 | } | |
816 | *host_offset = cluster_offset; | |
817 | return 0; | |
818 | } | |
819 | ||
45aba42f KW |
820 | /* |
821 | * alloc_cluster_offset | |
822 | * | |
250196f1 KW |
823 | * For a given offset on the virtual disk, find the cluster offset in qcow2 |
824 | * file. If the offset is not found, allocate a new cluster. | |
45aba42f | 825 | * |
250196f1 | 826 | * If the cluster was already allocated, m->nb_clusters is set to 0 and |
a7912369 | 827 | * other fields in m are meaningless. |
148da7ea KW |
828 | * |
829 | * If the cluster is newly allocated, m->nb_clusters is set to the number of | |
68d100e9 KW |
830 | * contiguous clusters that have been allocated. In this case, the other |
831 | * fields of m are valid and contain information about the first allocated | |
832 | * cluster. | |
45aba42f | 833 | * |
68d100e9 KW |
834 | * If the request conflicts with another write request in flight, the coroutine |
835 | * is queued and will be reentered when the dependency has completed. | |
148da7ea KW |
836 | * |
837 | * Return 0 on success and -errno in error cases | |
45aba42f | 838 | */ |
f4f0d391 KW |
839 | int qcow2_alloc_cluster_offset(BlockDriverState *bs, uint64_t offset, |
840 | int n_start, int n_end, int *num, QCowL2Meta *m) | |
45aba42f KW |
841 | { |
842 | BDRVQcowState *s = bs->opaque; | |
250196f1 | 843 | int l2_index, ret, sectors; |
3948d1d4 | 844 | uint64_t *l2_table; |
250196f1 KW |
845 | unsigned int nb_clusters, keep_clusters; |
846 | uint64_t cluster_offset; | |
45aba42f | 847 | |
3cce16f4 KW |
848 | trace_qcow2_alloc_clusters_offset(qemu_coroutine_self(), offset, |
849 | n_start, n_end); | |
850 | ||
250196f1 | 851 | /* Find L2 entry for the first involved cluster */ |
3948d1d4 | 852 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
1e3e8f1a | 853 | if (ret < 0) { |
148da7ea | 854 | return ret; |
1e3e8f1a | 855 | } |
45aba42f | 856 | |
250196f1 KW |
857 | /* |
858 | * Calculate the number of clusters to look for. We stop at L2 table | |
859 | * boundaries to keep things simple. | |
860 | */ | |
68d100e9 | 861 | again: |
250196f1 KW |
862 | nb_clusters = MIN(size_to_clusters(s, n_end << BDRV_SECTOR_BITS), |
863 | s->l2_size - l2_index); | |
45aba42f KW |
864 | |
865 | cluster_offset = be64_to_cpu(l2_table[l2_index]); | |
866 | ||
250196f1 KW |
867 | /* |
868 | * Check how many clusters are already allocated and don't need COW, and how | |
869 | * many need a new allocation. | |
870 | */ | |
45aba42f | 871 | if (cluster_offset & QCOW_OFLAG_COPIED) { |
250196f1 KW |
872 | /* We keep all QCOW_OFLAG_COPIED clusters */ |
873 | keep_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size, | |
2bfcc4a0 KW |
874 | &l2_table[l2_index], 0, |
875 | QCOW_OFLAG_COPIED); | |
250196f1 KW |
876 | assert(keep_clusters <= nb_clusters); |
877 | nb_clusters -= keep_clusters; | |
878 | } else { | |
879 | /* For the moment, overwrite compressed clusters one by one */ | |
880 | if (cluster_offset & QCOW_OFLAG_COMPRESSED) { | |
881 | nb_clusters = 1; | |
882 | } else { | |
883 | nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index); | |
884 | } | |
45aba42f | 885 | |
250196f1 KW |
886 | keep_clusters = 0; |
887 | cluster_offset = 0; | |
45aba42f KW |
888 | } |
889 | ||
250196f1 | 890 | cluster_offset &= ~QCOW_OFLAG_COPIED; |
45aba42f | 891 | |
250196f1 KW |
892 | /* If there is something left to allocate, do that now */ |
893 | *m = (QCowL2Meta) { | |
894 | .cluster_offset = cluster_offset, | |
895 | .nb_clusters = 0, | |
896 | }; | |
897 | qemu_co_queue_init(&m->dependent_requests); | |
45aba42f | 898 | |
250196f1 KW |
899 | if (nb_clusters > 0) { |
900 | uint64_t alloc_offset; | |
901 | uint64_t alloc_cluster_offset; | |
902 | uint64_t keep_bytes = keep_clusters * s->cluster_size; | |
45aba42f | 903 | |
250196f1 KW |
904 | /* Calculate start and size of allocation */ |
905 | alloc_offset = offset + keep_bytes; | |
45aba42f | 906 | |
250196f1 KW |
907 | if (keep_clusters == 0) { |
908 | alloc_cluster_offset = 0; | |
f214978a | 909 | } else { |
250196f1 | 910 | alloc_cluster_offset = cluster_offset + keep_bytes; |
f214978a | 911 | } |
f214978a | 912 | |
250196f1 KW |
913 | /* Allocate, if necessary at a given offset in the image file */ |
914 | ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset, | |
915 | &nb_clusters, l2_table); | |
916 | if (ret == -EAGAIN) { | |
917 | goto again; | |
918 | } else if (ret < 0) { | |
919 | goto fail; | |
920 | } | |
f214978a | 921 | |
250196f1 KW |
922 | /* save info needed for meta data update */ |
923 | if (nb_clusters > 0) { | |
924 | int requested_sectors = n_end - keep_clusters * s->cluster_sectors; | |
925 | int avail_sectors = (keep_clusters + nb_clusters) | |
926 | << (s->cluster_bits - BDRV_SECTOR_BITS); | |
927 | ||
928 | *m = (QCowL2Meta) { | |
929 | .cluster_offset = keep_clusters == 0 ? | |
930 | alloc_cluster_offset : cluster_offset, | |
931 | .alloc_offset = alloc_cluster_offset, | |
932 | .offset = alloc_offset, | |
933 | .n_start = keep_clusters == 0 ? n_start : 0, | |
934 | .nb_clusters = nb_clusters, | |
935 | .nb_available = MIN(requested_sectors, avail_sectors), | |
936 | }; | |
937 | qemu_co_queue_init(&m->dependent_requests); | |
938 | QLIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight); | |
939 | } | |
5d757b56 | 940 | } |
45aba42f | 941 | |
250196f1 | 942 | /* Some cleanup work */ |
29c1a730 KW |
943 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); |
944 | if (ret < 0) { | |
9e2a3701 | 945 | goto fail_put; |
29c1a730 KW |
946 | } |
947 | ||
250196f1 KW |
948 | sectors = (keep_clusters + nb_clusters) << (s->cluster_bits - 9); |
949 | if (sectors > n_end) { | |
950 | sectors = n_end; | |
951 | } | |
45aba42f | 952 | |
250196f1 KW |
953 | assert(sectors > n_start); |
954 | *num = sectors - n_start; | |
45aba42f | 955 | |
148da7ea | 956 | return 0; |
29c1a730 KW |
957 | |
958 | fail: | |
959 | qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
9e2a3701 | 960 | fail_put: |
8dc0a5e7 | 961 | if (m->nb_clusters > 0) { |
250196f1 KW |
962 | QLIST_REMOVE(m, next_in_flight); |
963 | } | |
29c1a730 | 964 | return ret; |
45aba42f KW |
965 | } |
966 | ||
967 | static int decompress_buffer(uint8_t *out_buf, int out_buf_size, | |
968 | const uint8_t *buf, int buf_size) | |
969 | { | |
970 | z_stream strm1, *strm = &strm1; | |
971 | int ret, out_len; | |
972 | ||
973 | memset(strm, 0, sizeof(*strm)); | |
974 | ||
975 | strm->next_in = (uint8_t *)buf; | |
976 | strm->avail_in = buf_size; | |
977 | strm->next_out = out_buf; | |
978 | strm->avail_out = out_buf_size; | |
979 | ||
980 | ret = inflateInit2(strm, -12); | |
981 | if (ret != Z_OK) | |
982 | return -1; | |
983 | ret = inflate(strm, Z_FINISH); | |
984 | out_len = strm->next_out - out_buf; | |
985 | if ((ret != Z_STREAM_END && ret != Z_BUF_ERROR) || | |
986 | out_len != out_buf_size) { | |
987 | inflateEnd(strm); | |
988 | return -1; | |
989 | } | |
990 | inflateEnd(strm); | |
991 | return 0; | |
992 | } | |
993 | ||
66f82cee | 994 | int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset) |
45aba42f | 995 | { |
66f82cee | 996 | BDRVQcowState *s = bs->opaque; |
45aba42f KW |
997 | int ret, csize, nb_csectors, sector_offset; |
998 | uint64_t coffset; | |
999 | ||
1000 | coffset = cluster_offset & s->cluster_offset_mask; | |
1001 | if (s->cluster_cache_offset != coffset) { | |
1002 | nb_csectors = ((cluster_offset >> s->csize_shift) & s->csize_mask) + 1; | |
1003 | sector_offset = coffset & 511; | |
1004 | csize = nb_csectors * 512 - sector_offset; | |
66f82cee KW |
1005 | BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED); |
1006 | ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data, nb_csectors); | |
45aba42f | 1007 | if (ret < 0) { |
8af36488 | 1008 | return ret; |
45aba42f KW |
1009 | } |
1010 | if (decompress_buffer(s->cluster_cache, s->cluster_size, | |
1011 | s->cluster_data + sector_offset, csize) < 0) { | |
8af36488 | 1012 | return -EIO; |
45aba42f KW |
1013 | } |
1014 | s->cluster_cache_offset = coffset; | |
1015 | } | |
1016 | return 0; | |
1017 | } | |
5ea929e3 KW |
1018 | |
1019 | /* | |
1020 | * This discards as many clusters of nb_clusters as possible at once (i.e. | |
1021 | * all clusters in the same L2 table) and returns the number of discarded | |
1022 | * clusters. | |
1023 | */ | |
1024 | static int discard_single_l2(BlockDriverState *bs, uint64_t offset, | |
1025 | unsigned int nb_clusters) | |
1026 | { | |
1027 | BDRVQcowState *s = bs->opaque; | |
3948d1d4 | 1028 | uint64_t *l2_table; |
5ea929e3 KW |
1029 | int l2_index; |
1030 | int ret; | |
1031 | int i; | |
1032 | ||
3948d1d4 | 1033 | ret = get_cluster_table(bs, offset, &l2_table, &l2_index); |
5ea929e3 KW |
1034 | if (ret < 0) { |
1035 | return ret; | |
1036 | } | |
1037 | ||
1038 | /* Limit nb_clusters to one L2 table */ | |
1039 | nb_clusters = MIN(nb_clusters, s->l2_size - l2_index); | |
1040 | ||
1041 | for (i = 0; i < nb_clusters; i++) { | |
1042 | uint64_t old_offset; | |
1043 | ||
1044 | old_offset = be64_to_cpu(l2_table[l2_index + i]); | |
1045 | old_offset &= ~QCOW_OFLAG_COPIED; | |
1046 | ||
1047 | if (old_offset == 0) { | |
1048 | continue; | |
1049 | } | |
1050 | ||
1051 | /* First remove L2 entries */ | |
1052 | qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table); | |
1053 | l2_table[l2_index + i] = cpu_to_be64(0); | |
1054 | ||
1055 | /* Then decrease the refcount */ | |
1056 | qcow2_free_any_clusters(bs, old_offset, 1); | |
1057 | } | |
1058 | ||
1059 | ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table); | |
1060 | if (ret < 0) { | |
1061 | return ret; | |
1062 | } | |
1063 | ||
1064 | return nb_clusters; | |
1065 | } | |
1066 | ||
1067 | int qcow2_discard_clusters(BlockDriverState *bs, uint64_t offset, | |
1068 | int nb_sectors) | |
1069 | { | |
1070 | BDRVQcowState *s = bs->opaque; | |
1071 | uint64_t end_offset; | |
1072 | unsigned int nb_clusters; | |
1073 | int ret; | |
1074 | ||
1075 | end_offset = offset + (nb_sectors << BDRV_SECTOR_BITS); | |
1076 | ||
1077 | /* Round start up and end down */ | |
1078 | offset = align_offset(offset, s->cluster_size); | |
1079 | end_offset &= ~(s->cluster_size - 1); | |
1080 | ||
1081 | if (offset > end_offset) { | |
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | nb_clusters = size_to_clusters(s, end_offset - offset); | |
1086 | ||
1087 | /* Each L2 table is handled by its own loop iteration */ | |
1088 | while (nb_clusters > 0) { | |
1089 | ret = discard_single_l2(bs, offset, nb_clusters); | |
1090 | if (ret < 0) { | |
1091 | return ret; | |
1092 | } | |
1093 | ||
1094 | nb_clusters -= ret; | |
1095 | offset += (ret * s->cluster_size); | |
1096 | } | |
1097 | ||
1098 | return 0; | |
1099 | } |