]>
Commit | Line | Data |
---|---|---|
75411d23 SH |
1 | /* |
2 | * QEMU Enhanced Disk Format | |
3 | * | |
4 | * Copyright IBM, Corp. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <[email protected]> | |
8 | * Anthony Liguori <[email protected]> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
80c71a24 | 15 | #include "qemu/osdep.h" |
da34e65c | 16 | #include "qapi/error.h" |
1de7afc9 | 17 | #include "qemu/timer.h" |
58369e22 | 18 | #include "qemu/bswap.h" |
eabba580 | 19 | #include "trace.h" |
75411d23 | 20 | #include "qed.h" |
7b1b5d19 | 21 | #include "qapi/qmp/qerror.h" |
8a56fdad | 22 | #include "sysemu/block-backend.h" |
75411d23 SH |
23 | |
24 | static int bdrv_qed_probe(const uint8_t *buf, int buf_size, | |
25 | const char *filename) | |
26 | { | |
27 | const QEDHeader *header = (const QEDHeader *)buf; | |
28 | ||
29 | if (buf_size < sizeof(*header)) { | |
30 | return 0; | |
31 | } | |
32 | if (le32_to_cpu(header->magic) != QED_MAGIC) { | |
33 | return 0; | |
34 | } | |
35 | return 100; | |
36 | } | |
37 | ||
38 | /** | |
39 | * Check whether an image format is raw | |
40 | * | |
41 | * @fmt: Backing file format, may be NULL | |
42 | */ | |
43 | static bool qed_fmt_is_raw(const char *fmt) | |
44 | { | |
45 | return fmt && strcmp(fmt, "raw") == 0; | |
46 | } | |
47 | ||
48 | static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) | |
49 | { | |
50 | cpu->magic = le32_to_cpu(le->magic); | |
51 | cpu->cluster_size = le32_to_cpu(le->cluster_size); | |
52 | cpu->table_size = le32_to_cpu(le->table_size); | |
53 | cpu->header_size = le32_to_cpu(le->header_size); | |
54 | cpu->features = le64_to_cpu(le->features); | |
55 | cpu->compat_features = le64_to_cpu(le->compat_features); | |
56 | cpu->autoclear_features = le64_to_cpu(le->autoclear_features); | |
57 | cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); | |
58 | cpu->image_size = le64_to_cpu(le->image_size); | |
59 | cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); | |
60 | cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); | |
61 | } | |
62 | ||
63 | static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) | |
64 | { | |
65 | le->magic = cpu_to_le32(cpu->magic); | |
66 | le->cluster_size = cpu_to_le32(cpu->cluster_size); | |
67 | le->table_size = cpu_to_le32(cpu->table_size); | |
68 | le->header_size = cpu_to_le32(cpu->header_size); | |
69 | le->features = cpu_to_le64(cpu->features); | |
70 | le->compat_features = cpu_to_le64(cpu->compat_features); | |
71 | le->autoclear_features = cpu_to_le64(cpu->autoclear_features); | |
72 | le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); | |
73 | le->image_size = cpu_to_le64(cpu->image_size); | |
74 | le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); | |
75 | le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); | |
76 | } | |
77 | ||
b10170ac | 78 | int qed_write_header_sync(BDRVQEDState *s) |
75411d23 SH |
79 | { |
80 | QEDHeader le; | |
81 | int ret; | |
82 | ||
83 | qed_header_cpu_to_le(&s->header, &le); | |
d9ca2ea2 | 84 | ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); |
75411d23 SH |
85 | if (ret != sizeof(le)) { |
86 | return ret; | |
87 | } | |
88 | return 0; | |
89 | } | |
90 | ||
01979a98 SH |
91 | /** |
92 | * Update header in-place (does not rewrite backing filename or other strings) | |
93 | * | |
94 | * This function only updates known header fields in-place and does not affect | |
95 | * extra data after the QED header. | |
1f01e50b PB |
96 | * |
97 | * No new allocating reqs can start while this function runs. | |
01979a98 | 98 | */ |
87f0d882 | 99 | static int coroutine_fn qed_write_header(BDRVQEDState *s) |
01979a98 SH |
100 | { |
101 | /* We must write full sectors for O_DIRECT but cannot necessarily generate | |
102 | * the data following the header if an unrecognized compat feature is | |
103 | * active. Therefore, first read the sectors containing the header, update | |
104 | * them, and write back. | |
105 | */ | |
106 | ||
c41a73ff | 107 | int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE); |
01979a98 | 108 | size_t len = nsectors * BDRV_SECTOR_SIZE; |
7076309a KW |
109 | uint8_t *buf; |
110 | struct iovec iov; | |
111 | QEMUIOVector qiov; | |
112 | int ret; | |
113 | ||
1f01e50b PB |
114 | assert(s->allocating_acb || s->allocating_write_reqs_plugged); |
115 | ||
7076309a KW |
116 | buf = qemu_blockalign(s->bs, len); |
117 | iov = (struct iovec) { | |
118 | .iov_base = buf, | |
119 | .iov_len = len, | |
120 | }; | |
121 | qemu_iovec_init_external(&qiov, &iov, 1); | |
122 | ||
0f714ec7 | 123 | ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0); |
7076309a KW |
124 | if (ret < 0) { |
125 | goto out; | |
126 | } | |
127 | ||
128 | /* Update header */ | |
129 | qed_header_cpu_to_le(&s->header, (QEDHeader *) buf); | |
130 | ||
0f714ec7 | 131 | ret = bdrv_co_pwritev(s->bs->file, 0, qiov.size, &qiov, 0); |
7076309a KW |
132 | if (ret < 0) { |
133 | goto out; | |
134 | } | |
135 | ||
136 | ret = 0; | |
137 | out: | |
138 | qemu_vfree(buf); | |
f13d712b | 139 | return ret; |
01979a98 SH |
140 | } |
141 | ||
75411d23 SH |
142 | static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size) |
143 | { | |
144 | uint64_t table_entries; | |
145 | uint64_t l2_size; | |
146 | ||
147 | table_entries = (table_size * cluster_size) / sizeof(uint64_t); | |
148 | l2_size = table_entries * cluster_size; | |
149 | ||
150 | return l2_size * table_entries; | |
151 | } | |
152 | ||
153 | static bool qed_is_cluster_size_valid(uint32_t cluster_size) | |
154 | { | |
155 | if (cluster_size < QED_MIN_CLUSTER_SIZE || | |
156 | cluster_size > QED_MAX_CLUSTER_SIZE) { | |
157 | return false; | |
158 | } | |
159 | if (cluster_size & (cluster_size - 1)) { | |
160 | return false; /* not power of 2 */ | |
161 | } | |
162 | return true; | |
163 | } | |
164 | ||
165 | static bool qed_is_table_size_valid(uint32_t table_size) | |
166 | { | |
167 | if (table_size < QED_MIN_TABLE_SIZE || | |
168 | table_size > QED_MAX_TABLE_SIZE) { | |
169 | return false; | |
170 | } | |
171 | if (table_size & (table_size - 1)) { | |
172 | return false; /* not power of 2 */ | |
173 | } | |
174 | return true; | |
175 | } | |
176 | ||
177 | static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, | |
178 | uint32_t table_size) | |
179 | { | |
180 | if (image_size % BDRV_SECTOR_SIZE != 0) { | |
181 | return false; /* not multiple of sector size */ | |
182 | } | |
183 | if (image_size > qed_max_image_size(cluster_size, table_size)) { | |
184 | return false; /* image is too large */ | |
185 | } | |
186 | return true; | |
187 | } | |
188 | ||
189 | /** | |
190 | * Read a string of known length from the image file | |
191 | * | |
192 | * @file: Image file | |
193 | * @offset: File offset to start of string, in bytes | |
194 | * @n: String length in bytes | |
195 | * @buf: Destination buffer | |
196 | * @buflen: Destination buffer length in bytes | |
197 | * @ret: 0 on success, -errno on failure | |
198 | * | |
199 | * The string is NUL-terminated. | |
200 | */ | |
cf2ab8fc | 201 | static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n, |
75411d23 SH |
202 | char *buf, size_t buflen) |
203 | { | |
204 | int ret; | |
205 | if (n >= buflen) { | |
206 | return -EINVAL; | |
207 | } | |
208 | ret = bdrv_pread(file, offset, buf, n); | |
209 | if (ret < 0) { | |
210 | return ret; | |
211 | } | |
212 | buf[n] = '\0'; | |
213 | return 0; | |
214 | } | |
215 | ||
eabba580 SH |
216 | /** |
217 | * Allocate new clusters | |
218 | * | |
219 | * @s: QED state | |
220 | * @n: Number of contiguous clusters to allocate | |
221 | * @ret: Offset of first allocated cluster | |
222 | * | |
223 | * This function only produces the offset where the new clusters should be | |
224 | * written. It updates BDRVQEDState but does not make any changes to the image | |
225 | * file. | |
1f01e50b PB |
226 | * |
227 | * Called with table_lock held. | |
eabba580 SH |
228 | */ |
229 | static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) | |
230 | { | |
231 | uint64_t offset = s->file_size; | |
232 | s->file_size += n * s->header.cluster_size; | |
233 | return offset; | |
234 | } | |
235 | ||
298800ca SH |
236 | QEDTable *qed_alloc_table(BDRVQEDState *s) |
237 | { | |
238 | /* Honor O_DIRECT memory alignment requirements */ | |
239 | return qemu_blockalign(s->bs, | |
240 | s->header.cluster_size * s->header.table_size); | |
241 | } | |
242 | ||
eabba580 SH |
243 | /** |
244 | * Allocate a new zeroed L2 table | |
1f01e50b PB |
245 | * |
246 | * Called with table_lock held. | |
eabba580 SH |
247 | */ |
248 | static CachedL2Table *qed_new_l2_table(BDRVQEDState *s) | |
249 | { | |
250 | CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); | |
251 | ||
252 | l2_table->table = qed_alloc_table(s); | |
253 | l2_table->offset = qed_alloc_clusters(s, s->header.table_size); | |
254 | ||
255 | memset(l2_table->table->offsets, 0, | |
256 | s->header.cluster_size * s->header.table_size); | |
257 | return l2_table; | |
258 | } | |
259 | ||
1f01e50b | 260 | static bool qed_plug_allocating_write_reqs(BDRVQEDState *s) |
6f321e93 | 261 | { |
1f01e50b PB |
262 | qemu_co_mutex_lock(&s->table_lock); |
263 | ||
264 | /* No reentrancy is allowed. */ | |
6f321e93 | 265 | assert(!s->allocating_write_reqs_plugged); |
1f01e50b PB |
266 | if (s->allocating_acb != NULL) { |
267 | /* Another allocating write came concurrently. This cannot happen | |
f8ea8dac | 268 | * from bdrv_qed_co_drain_begin, but it can happen when the timer runs. |
1f01e50b PB |
269 | */ |
270 | qemu_co_mutex_unlock(&s->table_lock); | |
271 | return false; | |
272 | } | |
6f321e93 SH |
273 | |
274 | s->allocating_write_reqs_plugged = true; | |
1f01e50b PB |
275 | qemu_co_mutex_unlock(&s->table_lock); |
276 | return true; | |
6f321e93 SH |
277 | } |
278 | ||
279 | static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) | |
280 | { | |
1f01e50b | 281 | qemu_co_mutex_lock(&s->table_lock); |
6f321e93 | 282 | assert(s->allocating_write_reqs_plugged); |
6f321e93 | 283 | s->allocating_write_reqs_plugged = false; |
1f01e50b PB |
284 | qemu_co_queue_next(&s->allocating_write_reqs); |
285 | qemu_co_mutex_unlock(&s->table_lock); | |
6f321e93 SH |
286 | } |
287 | ||
87f0d882 | 288 | static void coroutine_fn qed_need_check_timer_entry(void *opaque) |
6f321e93 SH |
289 | { |
290 | BDRVQEDState *s = opaque; | |
c0e8f989 KW |
291 | int ret; |
292 | ||
c0e8f989 | 293 | trace_qed_need_check_timer_cb(s); |
6f321e93 | 294 | |
1f01e50b PB |
295 | if (!qed_plug_allocating_write_reqs(s)) { |
296 | return; | |
297 | } | |
c0e8f989 KW |
298 | |
299 | /* Ensure writes are on disk before clearing flag */ | |
300 | ret = bdrv_co_flush(s->bs->file->bs); | |
c0e8f989 | 301 | if (ret < 0) { |
6f321e93 SH |
302 | qed_unplug_allocating_write_reqs(s); |
303 | return; | |
304 | } | |
305 | ||
306 | s->header.features &= ~QED_F_NEED_CHECK; | |
f13d712b KW |
307 | ret = qed_write_header(s); |
308 | (void) ret; | |
309 | ||
310 | qed_unplug_allocating_write_reqs(s); | |
311 | ||
c0e8f989 | 312 | ret = bdrv_co_flush(s->bs); |
f13d712b | 313 | (void) ret; |
6f321e93 SH |
314 | } |
315 | ||
316 | static void qed_need_check_timer_cb(void *opaque) | |
317 | { | |
c0e8f989 KW |
318 | Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque); |
319 | qemu_coroutine_enter(co); | |
2f47da5f PB |
320 | } |
321 | ||
6f321e93 SH |
322 | static void qed_start_need_check_timer(BDRVQEDState *s) |
323 | { | |
324 | trace_qed_start_need_check_timer(s); | |
325 | ||
bc72ad67 | 326 | /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for |
6f321e93 SH |
327 | * migration. |
328 | */ | |
bc72ad67 | 329 | timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + |
73bcb24d | 330 | NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT); |
6f321e93 SH |
331 | } |
332 | ||
333 | /* It's okay to call this multiple times or when no timer is started */ | |
334 | static void qed_cancel_need_check_timer(BDRVQEDState *s) | |
335 | { | |
336 | trace_qed_cancel_need_check_timer(s); | |
bc72ad67 | 337 | timer_del(s->need_check_timer); |
6f321e93 SH |
338 | } |
339 | ||
a8c868c3 SH |
340 | static void bdrv_qed_detach_aio_context(BlockDriverState *bs) |
341 | { | |
342 | BDRVQEDState *s = bs->opaque; | |
343 | ||
344 | qed_cancel_need_check_timer(s); | |
345 | timer_free(s->need_check_timer); | |
346 | } | |
347 | ||
348 | static void bdrv_qed_attach_aio_context(BlockDriverState *bs, | |
349 | AioContext *new_context) | |
350 | { | |
351 | BDRVQEDState *s = bs->opaque; | |
352 | ||
353 | s->need_check_timer = aio_timer_new(new_context, | |
354 | QEMU_CLOCK_VIRTUAL, SCALE_NS, | |
355 | qed_need_check_timer_cb, s); | |
356 | if (s->header.features & QED_F_NEED_CHECK) { | |
357 | qed_start_need_check_timer(s); | |
358 | } | |
359 | } | |
360 | ||
f8ea8dac | 361 | static void coroutine_fn bdrv_qed_co_drain_begin(BlockDriverState *bs) |
6653a73d FZ |
362 | { |
363 | BDRVQEDState *s = bs->opaque; | |
364 | ||
365 | /* Fire the timer immediately in order to start doing I/O as soon as the | |
366 | * header is flushed. | |
367 | */ | |
368 | if (s->need_check_timer && timer_pending(s->need_check_timer)) { | |
369 | qed_cancel_need_check_timer(s); | |
61124f03 | 370 | qed_need_check_timer_entry(s); |
6653a73d FZ |
371 | } |
372 | } | |
373 | ||
61c7887e PB |
374 | static void bdrv_qed_init_state(BlockDriverState *bs) |
375 | { | |
376 | BDRVQEDState *s = bs->opaque; | |
377 | ||
378 | memset(s, 0, sizeof(BDRVQEDState)); | |
379 | s->bs = bs; | |
1f01e50b | 380 | qemu_co_mutex_init(&s->table_lock); |
61c7887e PB |
381 | qemu_co_queue_init(&s->allocating_write_reqs); |
382 | } | |
383 | ||
4e4bf5c4 KW |
384 | static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags, |
385 | Error **errp) | |
75411d23 SH |
386 | { |
387 | BDRVQEDState *s = bs->opaque; | |
388 | QEDHeader le_header; | |
389 | int64_t file_size; | |
390 | int ret; | |
391 | ||
cf2ab8fc | 392 | ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); |
75411d23 SH |
393 | if (ret < 0) { |
394 | return ret; | |
395 | } | |
75411d23 SH |
396 | qed_header_le_to_cpu(&le_header, &s->header); |
397 | ||
398 | if (s->header.magic != QED_MAGIC) { | |
76abe407 PB |
399 | error_setg(errp, "Image not in QED format"); |
400 | return -EINVAL; | |
75411d23 SH |
401 | } |
402 | if (s->header.features & ~QED_FEATURE_MASK) { | |
10b758e8 | 403 | /* image uses unsupported feature bits */ |
a55448b3 HR |
404 | error_setg(errp, "Unsupported QED features: %" PRIx64, |
405 | s->header.features & ~QED_FEATURE_MASK); | |
10b758e8 | 406 | return -ENOTSUP; |
75411d23 SH |
407 | } |
408 | if (!qed_is_cluster_size_valid(s->header.cluster_size)) { | |
409 | return -EINVAL; | |
410 | } | |
411 | ||
412 | /* Round down file size to the last cluster */ | |
9a4f4c31 | 413 | file_size = bdrv_getlength(bs->file->bs); |
75411d23 SH |
414 | if (file_size < 0) { |
415 | return file_size; | |
416 | } | |
417 | s->file_size = qed_start_of_cluster(s, file_size); | |
418 | ||
419 | if (!qed_is_table_size_valid(s->header.table_size)) { | |
420 | return -EINVAL; | |
421 | } | |
422 | if (!qed_is_image_size_valid(s->header.image_size, | |
423 | s->header.cluster_size, | |
424 | s->header.table_size)) { | |
425 | return -EINVAL; | |
426 | } | |
427 | if (!qed_check_table_offset(s, s->header.l1_table_offset)) { | |
428 | return -EINVAL; | |
429 | } | |
430 | ||
431 | s->table_nelems = (s->header.cluster_size * s->header.table_size) / | |
432 | sizeof(uint64_t); | |
786a4ea8 | 433 | s->l2_shift = ctz32(s->header.cluster_size); |
75411d23 | 434 | s->l2_mask = s->table_nelems - 1; |
786a4ea8 | 435 | s->l1_shift = s->l2_shift + ctz32(s->table_nelems); |
75411d23 | 436 | |
0adfa1ed SH |
437 | /* Header size calculation must not overflow uint32_t */ |
438 | if (s->header.header_size > UINT32_MAX / s->header.cluster_size) { | |
439 | return -EINVAL; | |
440 | } | |
441 | ||
75411d23 SH |
442 | if ((s->header.features & QED_F_BACKING_FILE)) { |
443 | if ((uint64_t)s->header.backing_filename_offset + | |
444 | s->header.backing_filename_size > | |
445 | s->header.cluster_size * s->header.header_size) { | |
446 | return -EINVAL; | |
447 | } | |
448 | ||
cf2ab8fc | 449 | ret = qed_read_string(bs->file, s->header.backing_filename_offset, |
75411d23 SH |
450 | s->header.backing_filename_size, bs->backing_file, |
451 | sizeof(bs->backing_file)); | |
452 | if (ret < 0) { | |
453 | return ret; | |
454 | } | |
455 | ||
456 | if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) { | |
457 | pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); | |
458 | } | |
459 | } | |
460 | ||
461 | /* Reset unknown autoclear feature bits. This is a backwards | |
462 | * compatibility mechanism that allows images to be opened by older | |
463 | * programs, which "knock out" unknown feature bits. When an image is | |
464 | * opened by a newer program again it can detect that the autoclear | |
465 | * feature is no longer valid. | |
466 | */ | |
467 | if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && | |
04c01a5c | 468 | !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) { |
75411d23 SH |
469 | s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; |
470 | ||
471 | ret = qed_write_header_sync(s); | |
472 | if (ret) { | |
473 | return ret; | |
474 | } | |
475 | ||
476 | /* From here on only known autoclear feature bits are valid */ | |
9a4f4c31 | 477 | bdrv_flush(bs->file->bs); |
75411d23 SH |
478 | } |
479 | ||
298800ca SH |
480 | s->l1_table = qed_alloc_table(s); |
481 | qed_init_l2_cache(&s->l2_cache); | |
482 | ||
483 | ret = qed_read_l1_table_sync(s); | |
01979a98 SH |
484 | if (ret) { |
485 | goto out; | |
486 | } | |
487 | ||
488 | /* If image was not closed cleanly, check consistency */ | |
058f8f16 | 489 | if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) { |
01979a98 SH |
490 | /* Read-only images cannot be fixed. There is no risk of corruption |
491 | * since write operations are not possible. Therefore, allow | |
492 | * potentially inconsistent images to be opened read-only. This can | |
493 | * aid data recovery from an otherwise inconsistent image. | |
494 | */ | |
9a4f4c31 | 495 | if (!bdrv_is_read_only(bs->file->bs) && |
04c01a5c | 496 | !(flags & BDRV_O_INACTIVE)) { |
01979a98 SH |
497 | BdrvCheckResult result = {0}; |
498 | ||
499 | ret = qed_check(s, &result, true); | |
6f321e93 SH |
500 | if (ret) { |
501 | goto out; | |
502 | } | |
01979a98 SH |
503 | } |
504 | } | |
505 | ||
a8c868c3 | 506 | bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs)); |
6f321e93 | 507 | |
01979a98 | 508 | out: |
298800ca SH |
509 | if (ret) { |
510 | qed_free_l2_cache(&s->l2_cache); | |
511 | qemu_vfree(s->l1_table); | |
512 | } | |
75411d23 SH |
513 | return ret; |
514 | } | |
515 | ||
4e4bf5c4 KW |
516 | static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, |
517 | Error **errp) | |
518 | { | |
519 | bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file, | |
520 | false, errp); | |
521 | if (!bs->file) { | |
522 | return -EINVAL; | |
523 | } | |
524 | ||
61c7887e | 525 | bdrv_qed_init_state(bs); |
4e4bf5c4 KW |
526 | return bdrv_qed_do_open(bs, options, flags, errp); |
527 | } | |
528 | ||
3baca891 | 529 | static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) |
d34682cd KW |
530 | { |
531 | BDRVQEDState *s = bs->opaque; | |
532 | ||
cf081fca | 533 | bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; |
d34682cd KW |
534 | } |
535 | ||
f9cb20f1 JC |
536 | /* We have nothing to do for QED reopen, stubs just return |
537 | * success */ | |
538 | static int bdrv_qed_reopen_prepare(BDRVReopenState *state, | |
539 | BlockReopenQueue *queue, Error **errp) | |
540 | { | |
541 | return 0; | |
542 | } | |
543 | ||
75411d23 SH |
544 | static void bdrv_qed_close(BlockDriverState *bs) |
545 | { | |
298800ca SH |
546 | BDRVQEDState *s = bs->opaque; |
547 | ||
a8c868c3 | 548 | bdrv_qed_detach_aio_context(bs); |
6f321e93 | 549 | |
01979a98 | 550 | /* Ensure writes reach stable storage */ |
9a4f4c31 | 551 | bdrv_flush(bs->file->bs); |
01979a98 SH |
552 | |
553 | /* Clean shutdown, no check required on next open */ | |
554 | if (s->header.features & QED_F_NEED_CHECK) { | |
555 | s->header.features &= ~QED_F_NEED_CHECK; | |
556 | qed_write_header_sync(s); | |
557 | } | |
558 | ||
298800ca SH |
559 | qed_free_l2_cache(&s->l2_cache); |
560 | qemu_vfree(s->l1_table); | |
75411d23 SH |
561 | } |
562 | ||
75411d23 SH |
563 | static int qed_create(const char *filename, uint32_t cluster_size, |
564 | uint64_t image_size, uint32_t table_size, | |
0fea6b79 | 565 | const char *backing_file, const char *backing_fmt, |
4ab15590 | 566 | QemuOpts *opts, Error **errp) |
75411d23 SH |
567 | { |
568 | QEDHeader header = { | |
569 | .magic = QED_MAGIC, | |
570 | .cluster_size = cluster_size, | |
571 | .table_size = table_size, | |
572 | .header_size = 1, | |
573 | .features = 0, | |
574 | .compat_features = 0, | |
575 | .l1_table_offset = cluster_size, | |
576 | .image_size = image_size, | |
577 | }; | |
578 | QEDHeader le_header; | |
579 | uint8_t *l1_table = NULL; | |
580 | size_t l1_size = header.cluster_size * header.table_size; | |
34b5d2c6 | 581 | Error *local_err = NULL; |
75411d23 | 582 | int ret = 0; |
8a56fdad | 583 | BlockBackend *blk; |
75411d23 | 584 | |
4ab15590 | 585 | ret = bdrv_create_file(filename, opts, &local_err); |
75411d23 | 586 | if (ret < 0) { |
0fea6b79 | 587 | error_propagate(errp, local_err); |
75411d23 SH |
588 | return ret; |
589 | } | |
590 | ||
efaa7c4e | 591 | blk = blk_new_open(filename, NULL, NULL, |
55880601 KW |
592 | BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, |
593 | &local_err); | |
8a56fdad | 594 | if (blk == NULL) { |
0fea6b79 | 595 | error_propagate(errp, local_err); |
8a56fdad | 596 | return -EIO; |
75411d23 SH |
597 | } |
598 | ||
8a56fdad KW |
599 | blk_set_allow_write_beyond_eof(blk, true); |
600 | ||
c743849b | 601 | /* File must start empty and grow, check truncate is supported */ |
3a691c50 | 602 | ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp); |
c743849b SH |
603 | if (ret < 0) { |
604 | goto out; | |
605 | } | |
606 | ||
75411d23 SH |
607 | if (backing_file) { |
608 | header.features |= QED_F_BACKING_FILE; | |
609 | header.backing_filename_offset = sizeof(le_header); | |
610 | header.backing_filename_size = strlen(backing_file); | |
611 | ||
612 | if (qed_fmt_is_raw(backing_fmt)) { | |
613 | header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | |
614 | } | |
615 | } | |
616 | ||
617 | qed_header_cpu_to_le(&header, &le_header); | |
8341f00d | 618 | ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0); |
75411d23 SH |
619 | if (ret < 0) { |
620 | goto out; | |
621 | } | |
8a56fdad | 622 | ret = blk_pwrite(blk, sizeof(le_header), backing_file, |
8341f00d | 623 | header.backing_filename_size, 0); |
75411d23 SH |
624 | if (ret < 0) { |
625 | goto out; | |
626 | } | |
627 | ||
7267c094 | 628 | l1_table = g_malloc0(l1_size); |
8341f00d | 629 | ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0); |
75411d23 SH |
630 | if (ret < 0) { |
631 | goto out; | |
632 | } | |
633 | ||
634 | ret = 0; /* success */ | |
635 | out: | |
7267c094 | 636 | g_free(l1_table); |
8a56fdad | 637 | blk_unref(blk); |
75411d23 SH |
638 | return ret; |
639 | } | |
640 | ||
7ab74849 | 641 | static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp) |
75411d23 SH |
642 | { |
643 | uint64_t image_size = 0; | |
644 | uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; | |
645 | uint32_t table_size = QED_DEFAULT_TABLE_SIZE; | |
7ab74849 CL |
646 | char *backing_file = NULL; |
647 | char *backing_fmt = NULL; | |
648 | int ret; | |
649 | ||
c2eb918e HT |
650 | image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0), |
651 | BDRV_SECTOR_SIZE); | |
7ab74849 CL |
652 | backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); |
653 | backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT); | |
654 | cluster_size = qemu_opt_get_size_del(opts, | |
655 | BLOCK_OPT_CLUSTER_SIZE, | |
656 | QED_DEFAULT_CLUSTER_SIZE); | |
657 | table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE, | |
658 | QED_DEFAULT_TABLE_SIZE); | |
75411d23 SH |
659 | |
660 | if (!qed_is_cluster_size_valid(cluster_size)) { | |
5ff679b4 AG |
661 | error_setg(errp, "QED cluster size must be within range [%u, %u] " |
662 | "and power of 2", | |
663 | QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); | |
7ab74849 CL |
664 | ret = -EINVAL; |
665 | goto finish; | |
75411d23 SH |
666 | } |
667 | if (!qed_is_table_size_valid(table_size)) { | |
5ff679b4 AG |
668 | error_setg(errp, "QED table size must be within range [%u, %u] " |
669 | "and power of 2", | |
670 | QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); | |
7ab74849 CL |
671 | ret = -EINVAL; |
672 | goto finish; | |
75411d23 SH |
673 | } |
674 | if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) { | |
5ff679b4 AG |
675 | error_setg(errp, "QED image size must be a non-zero multiple of " |
676 | "cluster size and less than %" PRIu64 " bytes", | |
677 | qed_max_image_size(cluster_size, table_size)); | |
7ab74849 CL |
678 | ret = -EINVAL; |
679 | goto finish; | |
75411d23 SH |
680 | } |
681 | ||
7ab74849 | 682 | ret = qed_create(filename, cluster_size, image_size, table_size, |
4ab15590 | 683 | backing_file, backing_fmt, opts, errp); |
7ab74849 CL |
684 | |
685 | finish: | |
686 | g_free(backing_file); | |
687 | g_free(backing_fmt); | |
688 | return ret; | |
75411d23 SH |
689 | } |
690 | ||
298800ca | 691 | typedef struct { |
4bc74be9 | 692 | BlockDriverState *bs; |
b7d5a5b8 | 693 | Coroutine *co; |
4bc74be9 PB |
694 | uint64_t pos; |
695 | int64_t status; | |
298800ca | 696 | int *pnum; |
53f1dfd1 | 697 | BlockDriverState **file; |
298800ca SH |
698 | } QEDIsAllocatedCB; |
699 | ||
1f01e50b | 700 | /* Called with table_lock held. */ |
298800ca SH |
701 | static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) |
702 | { | |
703 | QEDIsAllocatedCB *cb = opaque; | |
4bc74be9 | 704 | BDRVQEDState *s = cb->bs->opaque; |
298800ca | 705 | *cb->pnum = len / BDRV_SECTOR_SIZE; |
4bc74be9 PB |
706 | switch (ret) { |
707 | case QED_CLUSTER_FOUND: | |
708 | offset |= qed_offset_into_cluster(s, cb->pos); | |
709 | cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset; | |
53f1dfd1 | 710 | *cb->file = cb->bs->file->bs; |
4bc74be9 PB |
711 | break; |
712 | case QED_CLUSTER_ZERO: | |
713 | cb->status = BDRV_BLOCK_ZERO; | |
714 | break; | |
715 | case QED_CLUSTER_L2: | |
716 | case QED_CLUSTER_L1: | |
717 | cb->status = 0; | |
718 | break; | |
719 | default: | |
720 | assert(ret < 0); | |
721 | cb->status = ret; | |
722 | break; | |
723 | } | |
724 | ||
b7d5a5b8 | 725 | if (cb->co) { |
b9e413dd | 726 | aio_co_wake(cb->co); |
b7d5a5b8 | 727 | } |
298800ca SH |
728 | } |
729 | ||
b6b8a333 | 730 | static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs, |
b7d5a5b8 | 731 | int64_t sector_num, |
67a0fd2a FZ |
732 | int nb_sectors, int *pnum, |
733 | BlockDriverState **file) | |
75411d23 | 734 | { |
298800ca | 735 | BDRVQEDState *s = bs->opaque; |
298800ca SH |
736 | size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; |
737 | QEDIsAllocatedCB cb = { | |
4bc74be9 PB |
738 | .bs = bs, |
739 | .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE, | |
740 | .status = BDRV_BLOCK_OFFSET_MASK, | |
298800ca | 741 | .pnum = pnum, |
53f1dfd1 | 742 | .file = file, |
298800ca SH |
743 | }; |
744 | QEDRequest request = { .l2_table = NULL }; | |
0f21b7a1 KW |
745 | uint64_t offset; |
746 | int ret; | |
298800ca | 747 | |
1f01e50b | 748 | qemu_co_mutex_lock(&s->table_lock); |
0f21b7a1 KW |
749 | ret = qed_find_cluster(s, &request, cb.pos, &len, &offset); |
750 | qed_is_allocated_cb(&cb, ret, offset, len); | |
298800ca | 751 | |
0f21b7a1 KW |
752 | /* The callback was invoked immediately */ |
753 | assert(cb.status != BDRV_BLOCK_OFFSET_MASK); | |
298800ca | 754 | |
298800ca | 755 | qed_unref_l2_cache_entry(request.l2_table); |
1f01e50b | 756 | qemu_co_mutex_unlock(&s->table_lock); |
298800ca | 757 | |
4bc74be9 | 758 | return cb.status; |
75411d23 SH |
759 | } |
760 | ||
eabba580 SH |
761 | static BDRVQEDState *acb_to_s(QEDAIOCB *acb) |
762 | { | |
48cc565e | 763 | return acb->bs->opaque; |
eabba580 SH |
764 | } |
765 | ||
766 | /** | |
767 | * Read from the backing file or zero-fill if no backing file | |
768 | * | |
f06ee3d4 KW |
769 | * @s: QED state |
770 | * @pos: Byte position in device | |
771 | * @qiov: Destination I/O vector | |
772 | * @backing_qiov: Possibly shortened copy of qiov, to be allocated here | |
773 | * @cb: Completion function | |
774 | * @opaque: User data for completion function | |
eabba580 SH |
775 | * |
776 | * This function reads qiov->size bytes starting at pos from the backing file. | |
777 | * If there is no backing file then zeroes are read. | |
778 | */ | |
87f0d882 KW |
779 | static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos, |
780 | QEMUIOVector *qiov, | |
781 | QEMUIOVector **backing_qiov) | |
eabba580 | 782 | { |
eabba580 SH |
783 | uint64_t backing_length = 0; |
784 | size_t size; | |
e85c5281 | 785 | int ret; |
eabba580 SH |
786 | |
787 | /* If there is a backing file, get its length. Treat the absence of a | |
788 | * backing file like a zero length backing file. | |
789 | */ | |
760e0063 KW |
790 | if (s->bs->backing) { |
791 | int64_t l = bdrv_getlength(s->bs->backing->bs); | |
eabba580 | 792 | if (l < 0) { |
e85c5281 | 793 | return l; |
eabba580 SH |
794 | } |
795 | backing_length = l; | |
796 | } | |
797 | ||
798 | /* Zero all sectors if reading beyond the end of the backing file */ | |
799 | if (pos >= backing_length || | |
800 | pos + qiov->size > backing_length) { | |
3d9b4925 | 801 | qemu_iovec_memset(qiov, 0, 0, qiov->size); |
eabba580 SH |
802 | } |
803 | ||
804 | /* Complete now if there are no backing file sectors to read */ | |
805 | if (pos >= backing_length) { | |
e85c5281 | 806 | return 0; |
eabba580 SH |
807 | } |
808 | ||
809 | /* If the read straddles the end of the backing file, shorten it */ | |
810 | size = MIN((uint64_t)backing_length - pos, qiov->size); | |
811 | ||
f06ee3d4 KW |
812 | assert(*backing_qiov == NULL); |
813 | *backing_qiov = g_new(QEMUIOVector, 1); | |
814 | qemu_iovec_init(*backing_qiov, qiov->niov); | |
815 | qemu_iovec_concat(*backing_qiov, qiov, 0, size); | |
816 | ||
820100fd | 817 | BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO); |
0f714ec7 | 818 | ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0); |
e85c5281 KW |
819 | if (ret < 0) { |
820 | return ret; | |
821 | } | |
822 | return 0; | |
eabba580 SH |
823 | } |
824 | ||
eabba580 SH |
825 | /** |
826 | * Copy data from backing file into the image | |
827 | * | |
828 | * @s: QED state | |
829 | * @pos: Byte position in device | |
830 | * @len: Number of bytes | |
831 | * @offset: Byte offset in image file | |
eabba580 | 832 | */ |
87f0d882 KW |
833 | static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s, |
834 | uint64_t pos, uint64_t len, | |
835 | uint64_t offset) | |
eabba580 | 836 | { |
0f7aa24d KW |
837 | QEMUIOVector qiov; |
838 | QEMUIOVector *backing_qiov = NULL; | |
839 | struct iovec iov; | |
e85c5281 | 840 | int ret; |
eabba580 SH |
841 | |
842 | /* Skip copy entirely if there is no work to do */ | |
843 | if (len == 0) { | |
b4ac32f3 | 844 | return 0; |
eabba580 SH |
845 | } |
846 | ||
0f7aa24d KW |
847 | iov = (struct iovec) { |
848 | .iov_base = qemu_blockalign(s->bs, len), | |
849 | .iov_len = len, | |
850 | }; | |
851 | qemu_iovec_init_external(&qiov, &iov, 1); | |
852 | ||
853 | ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov); | |
854 | ||
855 | if (backing_qiov) { | |
856 | qemu_iovec_destroy(backing_qiov); | |
857 | g_free(backing_qiov); | |
858 | backing_qiov = NULL; | |
859 | } | |
860 | ||
861 | if (ret) { | |
862 | goto out; | |
863 | } | |
eabba580 | 864 | |
0f7aa24d | 865 | BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); |
0f714ec7 | 866 | ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0); |
0f7aa24d KW |
867 | if (ret < 0) { |
868 | goto out; | |
869 | } | |
870 | ret = 0; | |
871 | out: | |
872 | qemu_vfree(iov.iov_base); | |
b4ac32f3 | 873 | return ret; |
eabba580 SH |
874 | } |
875 | ||
876 | /** | |
877 | * Link one or more contiguous clusters into a table | |
878 | * | |
879 | * @s: QED state | |
880 | * @table: L2 table | |
881 | * @index: First cluster index | |
882 | * @n: Number of contiguous clusters | |
21df65b6 AL |
883 | * @cluster: First cluster offset |
884 | * | |
885 | * The cluster offset may be an allocated byte offset in the image file, the | |
886 | * zero cluster marker, or the unallocated cluster marker. | |
1f01e50b PB |
887 | * |
888 | * Called with table_lock held. | |
eabba580 | 889 | */ |
87f0d882 KW |
890 | static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table, |
891 | int index, unsigned int n, | |
892 | uint64_t cluster) | |
eabba580 SH |
893 | { |
894 | int i; | |
895 | for (i = index; i < index + n; i++) { | |
896 | table->offsets[i] = cluster; | |
21df65b6 AL |
897 | if (!qed_offset_is_unalloc_cluster(cluster) && |
898 | !qed_offset_is_zero_cluster(cluster)) { | |
899 | cluster += s->header.cluster_size; | |
900 | } | |
eabba580 SH |
901 | } |
902 | } | |
903 | ||
1f01e50b | 904 | /* Called with table_lock held. */ |
87f0d882 | 905 | static void coroutine_fn qed_aio_complete(QEDAIOCB *acb) |
eabba580 | 906 | { |
1919631e | 907 | BDRVQEDState *s = acb_to_s(acb); |
eabba580 SH |
908 | |
909 | /* Free resources */ | |
910 | qemu_iovec_destroy(&acb->cur_qiov); | |
911 | qed_unref_l2_cache_entry(acb->request.l2_table); | |
912 | ||
0e71be19 SH |
913 | /* Free the buffer we may have allocated for zero writes */ |
914 | if (acb->flags & QED_AIOCB_ZERO) { | |
915 | qemu_vfree(acb->qiov->iov[0].iov_base); | |
916 | acb->qiov->iov[0].iov_base = NULL; | |
917 | } | |
918 | ||
eabba580 SH |
919 | /* Start next allocating write request waiting behind this one. Note that |
920 | * requests enqueue themselves when they first hit an unallocated cluster | |
921 | * but they wait until the entire request is finished before waking up the | |
922 | * next request in the queue. This ensures that we don't cycle through | |
923 | * requests multiple times but rather finish one at a time completely. | |
924 | */ | |
0806c3b5 KW |
925 | if (acb == s->allocating_acb) { |
926 | s->allocating_acb = NULL; | |
927 | if (!qemu_co_queue_empty(&s->allocating_write_reqs)) { | |
1f01e50b | 928 | qemu_co_queue_next(&s->allocating_write_reqs); |
6f321e93 SH |
929 | } else if (s->header.features & QED_F_NEED_CHECK) { |
930 | qed_start_need_check_timer(s); | |
eabba580 SH |
931 | } |
932 | } | |
933 | } | |
934 | ||
935 | /** | |
fae25ac7 | 936 | * Update L1 table with new L2 table offset and write it out |
1f01e50b PB |
937 | * |
938 | * Called with table_lock held. | |
eabba580 | 939 | */ |
87f0d882 | 940 | static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb) |
eabba580 | 941 | { |
eabba580 SH |
942 | BDRVQEDState *s = acb_to_s(acb); |
943 | CachedL2Table *l2_table = acb->request.l2_table; | |
e4fc8781 | 944 | uint64_t l2_offset = l2_table->offset; |
fb18de21 | 945 | int index, ret; |
eabba580 | 946 | |
fae25ac7 KW |
947 | index = qed_l1_index(s, acb->cur_pos); |
948 | s->l1_table->offsets[index] = l2_table->offset; | |
949 | ||
950 | ret = qed_write_l1_table(s, index, 1); | |
951 | ||
952 | /* Commit the current L2 table to the cache */ | |
eabba580 SH |
953 | qed_commit_l2_cache_entry(&s->l2_cache, l2_table); |
954 | ||
955 | /* This is guaranteed to succeed because we just committed the entry to the | |
956 | * cache. | |
957 | */ | |
e4fc8781 | 958 | acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); |
eabba580 SH |
959 | assert(acb->request.l2_table != NULL); |
960 | ||
fb18de21 | 961 | return ret; |
eabba580 SH |
962 | } |
963 | ||
eabba580 SH |
964 | |
965 | /** | |
966 | * Update L2 table with new cluster offsets and write them out | |
1f01e50b PB |
967 | * |
968 | * Called with table_lock held. | |
eabba580 | 969 | */ |
87f0d882 | 970 | static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset) |
eabba580 | 971 | { |
eabba580 SH |
972 | BDRVQEDState *s = acb_to_s(acb); |
973 | bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1; | |
88d2dd72 | 974 | int index, ret; |
eabba580 SH |
975 | |
976 | if (need_alloc) { | |
977 | qed_unref_l2_cache_entry(acb->request.l2_table); | |
978 | acb->request.l2_table = qed_new_l2_table(s); | |
979 | } | |
980 | ||
981 | index = qed_l2_index(s, acb->cur_pos); | |
982 | qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, | |
0e71be19 | 983 | offset); |
eabba580 SH |
984 | |
985 | if (need_alloc) { | |
986 | /* Write out the whole new L2 table */ | |
453e53e2 | 987 | ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true); |
fb18de21 | 988 | if (ret) { |
88d2dd72 | 989 | return ret; |
fb18de21 | 990 | } |
88d2dd72 | 991 | return qed_aio_write_l1_update(acb); |
eabba580 SH |
992 | } else { |
993 | /* Write out only the updated part of the L2 table */ | |
453e53e2 KW |
994 | ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, |
995 | false); | |
88d2dd72 KW |
996 | if (ret) { |
997 | return ret; | |
998 | } | |
eabba580 | 999 | } |
88d2dd72 | 1000 | return 0; |
eabba580 SH |
1001 | } |
1002 | ||
eabba580 SH |
1003 | /** |
1004 | * Write data to the image file | |
1f01e50b PB |
1005 | * |
1006 | * Called with table_lock *not* held. | |
eabba580 | 1007 | */ |
87f0d882 | 1008 | static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb) |
eabba580 | 1009 | { |
eabba580 SH |
1010 | BDRVQEDState *s = acb_to_s(acb); |
1011 | uint64_t offset = acb->cur_cluster + | |
1012 | qed_offset_into_cluster(s, acb->cur_pos); | |
eabba580 | 1013 | |
eaf0bc56 | 1014 | trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size); |
eabba580 | 1015 | |
a4d8f1ae | 1016 | BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); |
e7569c18 PB |
1017 | return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size, |
1018 | &acb->cur_qiov, 0); | |
eabba580 SH |
1019 | } |
1020 | ||
1021 | /** | |
b4ac32f3 | 1022 | * Populate untouched regions of new data cluster |
1f01e50b PB |
1023 | * |
1024 | * Called with table_lock held. | |
eabba580 | 1025 | */ |
87f0d882 | 1026 | static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb) |
eabba580 | 1027 | { |
eabba580 | 1028 | BDRVQEDState *s = acb_to_s(acb); |
b4ac32f3 | 1029 | uint64_t start, len, offset; |
a101341a | 1030 | int ret; |
eabba580 | 1031 | |
1f01e50b PB |
1032 | qemu_co_mutex_unlock(&s->table_lock); |
1033 | ||
b4ac32f3 KW |
1034 | /* Populate front untouched region of new data cluster */ |
1035 | start = qed_start_of_cluster(s, acb->cur_pos); | |
1036 | len = qed_offset_into_cluster(s, acb->cur_pos); | |
1037 | ||
1038 | trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); | |
1039 | ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster); | |
a101341a | 1040 | if (ret < 0) { |
1f01e50b | 1041 | goto out; |
eabba580 SH |
1042 | } |
1043 | ||
b4ac32f3 KW |
1044 | /* Populate back untouched region of new data cluster */ |
1045 | start = acb->cur_pos + acb->cur_qiov.size; | |
1046 | len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start; | |
1047 | offset = acb->cur_cluster + | |
1048 | qed_offset_into_cluster(s, acb->cur_pos) + | |
1049 | acb->cur_qiov.size; | |
eabba580 | 1050 | |
b4ac32f3 KW |
1051 | trace_qed_aio_write_postfill(s, acb, start, len, offset); |
1052 | ret = qed_copy_from_backing_file(s, start, len, offset); | |
eaf0bc56 | 1053 | if (ret < 0) { |
1f01e50b | 1054 | goto out; |
eaf0bc56 | 1055 | } |
a101341a | 1056 | |
e7569c18 PB |
1057 | ret = qed_aio_write_main(acb); |
1058 | if (ret < 0) { | |
1f01e50b | 1059 | goto out; |
e7569c18 PB |
1060 | } |
1061 | ||
1062 | if (s->bs->backing) { | |
1063 | /* | |
1064 | * Flush new data clusters before updating the L2 table | |
1065 | * | |
1066 | * This flush is necessary when a backing file is in use. A crash | |
1067 | * during an allocating write could result in empty clusters in the | |
1068 | * image. If the write only touched a subregion of the cluster, | |
1069 | * then backing image sectors have been lost in the untouched | |
1070 | * region. The solution is to flush after writing a new data | |
1071 | * cluster and before updating the L2 table. | |
1072 | */ | |
1073 | ret = bdrv_co_flush(s->bs->file->bs); | |
e7569c18 PB |
1074 | } |
1075 | ||
1f01e50b PB |
1076 | out: |
1077 | qemu_co_mutex_lock(&s->table_lock); | |
1078 | return ret; | |
eabba580 SH |
1079 | } |
1080 | ||
0d09c797 SH |
1081 | /** |
1082 | * Check if the QED_F_NEED_CHECK bit should be set during allocating write | |
1083 | */ | |
1084 | static bool qed_should_set_need_check(BDRVQEDState *s) | |
1085 | { | |
1086 | /* The flush before L2 update path ensures consistency */ | |
760e0063 | 1087 | if (s->bs->backing) { |
0d09c797 SH |
1088 | return false; |
1089 | } | |
1090 | ||
1091 | return !(s->header.features & QED_F_NEED_CHECK); | |
1092 | } | |
1093 | ||
eabba580 SH |
1094 | /** |
1095 | * Write new data cluster | |
1096 | * | |
1097 | * @acb: Write request | |
1098 | * @len: Length in bytes | |
1099 | * | |
1100 | * This path is taken when writing to previously unallocated clusters. | |
1f01e50b PB |
1101 | * |
1102 | * Called with table_lock held. | |
eabba580 | 1103 | */ |
87f0d882 | 1104 | static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
eabba580 SH |
1105 | { |
1106 | BDRVQEDState *s = acb_to_s(acb); | |
f13d712b | 1107 | int ret; |
eabba580 | 1108 | |
6f321e93 | 1109 | /* Cancel timer when the first allocating request comes in */ |
0806c3b5 | 1110 | if (s->allocating_acb == NULL) { |
6f321e93 SH |
1111 | qed_cancel_need_check_timer(s); |
1112 | } | |
1113 | ||
eabba580 | 1114 | /* Freeze this request if another allocating write is in progress */ |
0806c3b5 KW |
1115 | if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) { |
1116 | if (s->allocating_acb != NULL) { | |
1f01e50b | 1117 | qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock); |
0806c3b5 KW |
1118 | assert(s->allocating_acb == NULL); |
1119 | } | |
1120 | s->allocating_acb = acb; | |
1121 | return -EAGAIN; /* start over with looking up table entries */ | |
eabba580 SH |
1122 | } |
1123 | ||
1124 | acb->cur_nclusters = qed_bytes_to_clusters(s, | |
1125 | qed_offset_into_cluster(s, acb->cur_pos) + len); | |
1b093c48 | 1126 | qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
eabba580 | 1127 | |
0e71be19 SH |
1128 | if (acb->flags & QED_AIOCB_ZERO) { |
1129 | /* Skip ahead if the clusters are already zero */ | |
1130 | if (acb->find_cluster_ret == QED_CLUSTER_ZERO) { | |
d6daddcd | 1131 | return 0; |
0e71be19 | 1132 | } |
e7569c18 | 1133 | acb->cur_cluster = 1; |
0e71be19 | 1134 | } else { |
0e71be19 SH |
1135 | acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); |
1136 | } | |
1137 | ||
0d09c797 SH |
1138 | if (qed_should_set_need_check(s)) { |
1139 | s->header.features |= QED_F_NEED_CHECK; | |
f13d712b | 1140 | ret = qed_write_header(s); |
a101341a | 1141 | if (ret < 0) { |
d6daddcd | 1142 | return ret; |
a101341a KW |
1143 | } |
1144 | } | |
1145 | ||
e7569c18 | 1146 | if (!(acb->flags & QED_AIOCB_ZERO)) { |
a101341a | 1147 | ret = qed_aio_write_cow(acb); |
e7569c18 PB |
1148 | if (ret < 0) { |
1149 | return ret; | |
1150 | } | |
01979a98 | 1151 | } |
e7569c18 PB |
1152 | |
1153 | return qed_aio_write_l2_update(acb, acb->cur_cluster); | |
eabba580 SH |
1154 | } |
1155 | ||
1156 | /** | |
1157 | * Write data cluster in place | |
1158 | * | |
1159 | * @acb: Write request | |
1160 | * @offset: Cluster offset in bytes | |
1161 | * @len: Length in bytes | |
1162 | * | |
1163 | * This path is taken when writing to already allocated clusters. | |
1f01e50b PB |
1164 | * |
1165 | * Called with table_lock held. | |
eabba580 | 1166 | */ |
87f0d882 KW |
1167 | static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, |
1168 | size_t len) | |
eabba580 | 1169 | { |
1f01e50b PB |
1170 | BDRVQEDState *s = acb_to_s(acb); |
1171 | int r; | |
1172 | ||
1173 | qemu_co_mutex_unlock(&s->table_lock); | |
1174 | ||
0e71be19 SH |
1175 | /* Allocate buffer for zero writes */ |
1176 | if (acb->flags & QED_AIOCB_ZERO) { | |
1177 | struct iovec *iov = acb->qiov->iov; | |
1178 | ||
1179 | if (!iov->iov_base) { | |
48cc565e | 1180 | iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len); |
4f4896db | 1181 | if (iov->iov_base == NULL) { |
1f01e50b PB |
1182 | r = -ENOMEM; |
1183 | goto out; | |
4f4896db | 1184 | } |
0e71be19 SH |
1185 | memset(iov->iov_base, 0, iov->iov_len); |
1186 | } | |
1187 | } | |
1188 | ||
eabba580 SH |
1189 | /* Calculate the I/O vector */ |
1190 | acb->cur_cluster = offset; | |
1b093c48 | 1191 | qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
eabba580 | 1192 | |
1f01e50b PB |
1193 | /* Do the actual write. */ |
1194 | r = qed_aio_write_main(acb); | |
1195 | out: | |
1196 | qemu_co_mutex_lock(&s->table_lock); | |
1197 | return r; | |
eabba580 SH |
1198 | } |
1199 | ||
1200 | /** | |
1201 | * Write data cluster | |
1202 | * | |
1203 | * @opaque: Write request | |
0596be7e | 1204 | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1 |
eabba580 SH |
1205 | * @offset: Cluster offset in bytes |
1206 | * @len: Length in bytes | |
1f01e50b PB |
1207 | * |
1208 | * Called with table_lock held. | |
eabba580 | 1209 | */ |
87f0d882 KW |
1210 | static int coroutine_fn qed_aio_write_data(void *opaque, int ret, |
1211 | uint64_t offset, size_t len) | |
eabba580 SH |
1212 | { |
1213 | QEDAIOCB *acb = opaque; | |
1214 | ||
1215 | trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); | |
1216 | ||
1217 | acb->find_cluster_ret = ret; | |
1218 | ||
1219 | switch (ret) { | |
1220 | case QED_CLUSTER_FOUND: | |
0596be7e | 1221 | return qed_aio_write_inplace(acb, offset, len); |
eabba580 SH |
1222 | |
1223 | case QED_CLUSTER_L2: | |
1224 | case QED_CLUSTER_L1: | |
21df65b6 | 1225 | case QED_CLUSTER_ZERO: |
0596be7e | 1226 | return qed_aio_write_alloc(acb, len); |
eabba580 SH |
1227 | |
1228 | default: | |
0596be7e | 1229 | g_assert_not_reached(); |
d6daddcd | 1230 | } |
eabba580 SH |
1231 | } |
1232 | ||
1233 | /** | |
1234 | * Read data cluster | |
1235 | * | |
1236 | * @opaque: Read request | |
0596be7e | 1237 | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1 |
eabba580 SH |
1238 | * @offset: Cluster offset in bytes |
1239 | * @len: Length in bytes | |
1f01e50b PB |
1240 | * |
1241 | * Called with table_lock held. | |
eabba580 | 1242 | */ |
87f0d882 KW |
1243 | static int coroutine_fn qed_aio_read_data(void *opaque, int ret, |
1244 | uint64_t offset, size_t len) | |
eabba580 SH |
1245 | { |
1246 | QEDAIOCB *acb = opaque; | |
1247 | BDRVQEDState *s = acb_to_s(acb); | |
48cc565e | 1248 | BlockDriverState *bs = acb->bs; |
1f01e50b PB |
1249 | int r; |
1250 | ||
1251 | qemu_co_mutex_unlock(&s->table_lock); | |
eabba580 SH |
1252 | |
1253 | /* Adjust offset into cluster */ | |
1254 | offset += qed_offset_into_cluster(s, acb->cur_pos); | |
1255 | ||
1256 | trace_qed_aio_read_data(s, acb, ret, offset, len); | |
1257 | ||
1b093c48 | 1258 | qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
eabba580 | 1259 | |
1f01e50b PB |
1260 | /* Handle zero cluster and backing file reads, otherwise read |
1261 | * data cluster directly. | |
1262 | */ | |
21df65b6 | 1263 | if (ret == QED_CLUSTER_ZERO) { |
3d9b4925 | 1264 | qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size); |
1f01e50b | 1265 | r = 0; |
21df65b6 | 1266 | } else if (ret != QED_CLUSTER_FOUND) { |
1f01e50b PB |
1267 | r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, |
1268 | &acb->backing_qiov); | |
1269 | } else { | |
1270 | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); | |
1271 | r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size, | |
1272 | &acb->cur_qiov, 0); | |
eabba580 SH |
1273 | } |
1274 | ||
1f01e50b PB |
1275 | qemu_co_mutex_lock(&s->table_lock); |
1276 | return r; | |
eabba580 SH |
1277 | } |
1278 | ||
1279 | /** | |
1280 | * Begin next I/O or complete the request | |
1281 | */ | |
87f0d882 | 1282 | static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb) |
eabba580 | 1283 | { |
eabba580 | 1284 | BDRVQEDState *s = acb_to_s(acb); |
0f21b7a1 KW |
1285 | uint64_t offset; |
1286 | size_t len; | |
dddf8db1 | 1287 | int ret; |
eabba580 | 1288 | |
1f01e50b | 1289 | qemu_co_mutex_lock(&s->table_lock); |
01859874 KW |
1290 | while (1) { |
1291 | trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size); | |
eabba580 | 1292 | |
01859874 KW |
1293 | if (acb->backing_qiov) { |
1294 | qemu_iovec_destroy(acb->backing_qiov); | |
1295 | g_free(acb->backing_qiov); | |
1296 | acb->backing_qiov = NULL; | |
1297 | } | |
f06ee3d4 | 1298 | |
01859874 KW |
1299 | acb->qiov_offset += acb->cur_qiov.size; |
1300 | acb->cur_pos += acb->cur_qiov.size; | |
1301 | qemu_iovec_reset(&acb->cur_qiov); | |
eabba580 | 1302 | |
01859874 KW |
1303 | /* Complete request */ |
1304 | if (acb->cur_pos >= acb->end_pos) { | |
48cc565e KW |
1305 | ret = 0; |
1306 | break; | |
01859874 | 1307 | } |
eabba580 | 1308 | |
01859874 KW |
1309 | /* Find next cluster and start I/O */ |
1310 | len = acb->end_pos - acb->cur_pos; | |
1311 | ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset); | |
1312 | if (ret < 0) { | |
48cc565e | 1313 | break; |
01859874 | 1314 | } |
0596be7e | 1315 | |
01859874 KW |
1316 | if (acb->flags & QED_AIOCB_WRITE) { |
1317 | ret = qed_aio_write_data(acb, ret, offset, len); | |
1318 | } else { | |
1319 | ret = qed_aio_read_data(acb, ret, offset, len); | |
1320 | } | |
0596be7e | 1321 | |
0806c3b5 | 1322 | if (ret < 0 && ret != -EAGAIN) { |
48cc565e | 1323 | break; |
0596be7e | 1324 | } |
0596be7e | 1325 | } |
eabba580 | 1326 | |
48cc565e KW |
1327 | trace_qed_aio_complete(s, acb, ret); |
1328 | qed_aio_complete(acb); | |
1f01e50b | 1329 | qemu_co_mutex_unlock(&s->table_lock); |
48cc565e | 1330 | return ret; |
89f89709 KW |
1331 | } |
1332 | ||
1333 | static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num, | |
1334 | QEMUIOVector *qiov, int nb_sectors, | |
1335 | int flags) | |
1336 | { | |
48cc565e KW |
1337 | QEDAIOCB acb = { |
1338 | .bs = bs, | |
1339 | .cur_pos = (uint64_t) sector_num * BDRV_SECTOR_SIZE, | |
1340 | .end_pos = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE, | |
1341 | .qiov = qiov, | |
1342 | .flags = flags, | |
89f89709 | 1343 | }; |
48cc565e | 1344 | qemu_iovec_init(&acb.cur_qiov, qiov->niov); |
eabba580 | 1345 | |
48cc565e | 1346 | trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags); |
eabba580 SH |
1347 | |
1348 | /* Start request */ | |
48cc565e | 1349 | return qed_aio_next_io(&acb); |
75411d23 SH |
1350 | } |
1351 | ||
89f89709 KW |
1352 | static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs, |
1353 | int64_t sector_num, int nb_sectors, | |
1354 | QEMUIOVector *qiov) | |
75411d23 | 1355 | { |
89f89709 | 1356 | return qed_co_request(bs, sector_num, qiov, nb_sectors, 0); |
75411d23 SH |
1357 | } |
1358 | ||
89f89709 KW |
1359 | static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs, |
1360 | int64_t sector_num, int nb_sectors, | |
1361 | QEMUIOVector *qiov) | |
0e71be19 | 1362 | { |
89f89709 | 1363 | return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE); |
0e71be19 SH |
1364 | } |
1365 | ||
49a2e483 EB |
1366 | static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, |
1367 | int64_t offset, | |
f5a5ca79 | 1368 | int bytes, |
49a2e483 | 1369 | BdrvRequestFlags flags) |
0e71be19 | 1370 | { |
ef72f76e | 1371 | BDRVQEDState *s = bs->opaque; |
0e71be19 SH |
1372 | QEMUIOVector qiov; |
1373 | struct iovec iov; | |
1374 | ||
49a2e483 EB |
1375 | /* Fall back if the request is not aligned */ |
1376 | if (qed_offset_into_cluster(s, offset) || | |
f5a5ca79 | 1377 | qed_offset_into_cluster(s, bytes)) { |
49a2e483 | 1378 | return -ENOTSUP; |
ef72f76e SH |
1379 | } |
1380 | ||
0e71be19 SH |
1381 | /* Zero writes start without an I/O buffer. If a buffer becomes necessary |
1382 | * then it will be allocated during request processing. | |
1383 | */ | |
49a2e483 | 1384 | iov.iov_base = NULL; |
f5a5ca79 | 1385 | iov.iov_len = bytes; |
0e71be19 SH |
1386 | |
1387 | qemu_iovec_init_external(&qiov, &iov, 1); | |
89f89709 | 1388 | return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov, |
f5a5ca79 | 1389 | bytes >> BDRV_SECTOR_BITS, |
89f89709 | 1390 | QED_AIOCB_WRITE | QED_AIOCB_ZERO); |
0e71be19 SH |
1391 | } |
1392 | ||
8243ccb7 HR |
1393 | static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset, |
1394 | PreallocMode prealloc, Error **errp) | |
75411d23 | 1395 | { |
77a5a000 SH |
1396 | BDRVQEDState *s = bs->opaque; |
1397 | uint64_t old_image_size; | |
1398 | int ret; | |
1399 | ||
8243ccb7 HR |
1400 | if (prealloc != PREALLOC_MODE_OFF) { |
1401 | error_setg(errp, "Unsupported preallocation mode '%s'", | |
977c736f | 1402 | PreallocMode_str(prealloc)); |
8243ccb7 HR |
1403 | return -ENOTSUP; |
1404 | } | |
1405 | ||
77a5a000 SH |
1406 | if (!qed_is_image_size_valid(offset, s->header.cluster_size, |
1407 | s->header.table_size)) { | |
f59adb32 | 1408 | error_setg(errp, "Invalid image size specified"); |
77a5a000 SH |
1409 | return -EINVAL; |
1410 | } | |
1411 | ||
77a5a000 | 1412 | if ((uint64_t)offset < s->header.image_size) { |
f59adb32 | 1413 | error_setg(errp, "Shrinking images is currently not supported"); |
77a5a000 SH |
1414 | return -ENOTSUP; |
1415 | } | |
1416 | ||
1417 | old_image_size = s->header.image_size; | |
1418 | s->header.image_size = offset; | |
1419 | ret = qed_write_header_sync(s); | |
1420 | if (ret < 0) { | |
1421 | s->header.image_size = old_image_size; | |
f59adb32 | 1422 | error_setg_errno(errp, -ret, "Failed to update the image size"); |
77a5a000 SH |
1423 | } |
1424 | return ret; | |
75411d23 SH |
1425 | } |
1426 | ||
1427 | static int64_t bdrv_qed_getlength(BlockDriverState *bs) | |
1428 | { | |
1429 | BDRVQEDState *s = bs->opaque; | |
1430 | return s->header.image_size; | |
1431 | } | |
1432 | ||
1433 | static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) | |
1434 | { | |
1435 | BDRVQEDState *s = bs->opaque; | |
1436 | ||
1437 | memset(bdi, 0, sizeof(*bdi)); | |
1438 | bdi->cluster_size = s->header.cluster_size; | |
d68dbee8 | 1439 | bdi->is_dirty = s->header.features & QED_F_NEED_CHECK; |
95de6d70 PB |
1440 | bdi->unallocated_blocks_are_zero = true; |
1441 | bdi->can_write_zeroes_with_unmap = true; | |
75411d23 SH |
1442 | return 0; |
1443 | } | |
1444 | ||
1445 | static int bdrv_qed_change_backing_file(BlockDriverState *bs, | |
1446 | const char *backing_file, | |
1447 | const char *backing_fmt) | |
1448 | { | |
1449 | BDRVQEDState *s = bs->opaque; | |
1450 | QEDHeader new_header, le_header; | |
1451 | void *buffer; | |
1452 | size_t buffer_len, backing_file_len; | |
1453 | int ret; | |
1454 | ||
1455 | /* Refuse to set backing filename if unknown compat feature bits are | |
1456 | * active. If the image uses an unknown compat feature then we may not | |
1457 | * know the layout of data following the header structure and cannot safely | |
1458 | * add a new string. | |
1459 | */ | |
1460 | if (backing_file && (s->header.compat_features & | |
1461 | ~QED_COMPAT_FEATURE_MASK)) { | |
1462 | return -ENOTSUP; | |
1463 | } | |
1464 | ||
1465 | memcpy(&new_header, &s->header, sizeof(new_header)); | |
1466 | ||
1467 | new_header.features &= ~(QED_F_BACKING_FILE | | |
1468 | QED_F_BACKING_FORMAT_NO_PROBE); | |
1469 | ||
1470 | /* Adjust feature flags */ | |
1471 | if (backing_file) { | |
1472 | new_header.features |= QED_F_BACKING_FILE; | |
1473 | ||
1474 | if (qed_fmt_is_raw(backing_fmt)) { | |
1475 | new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; | |
1476 | } | |
1477 | } | |
1478 | ||
1479 | /* Calculate new header size */ | |
1480 | backing_file_len = 0; | |
1481 | ||
1482 | if (backing_file) { | |
1483 | backing_file_len = strlen(backing_file); | |
1484 | } | |
1485 | ||
1486 | buffer_len = sizeof(new_header); | |
1487 | new_header.backing_filename_offset = buffer_len; | |
1488 | new_header.backing_filename_size = backing_file_len; | |
1489 | buffer_len += backing_file_len; | |
1490 | ||
1491 | /* Make sure we can rewrite header without failing */ | |
1492 | if (buffer_len > new_header.header_size * new_header.cluster_size) { | |
1493 | return -ENOSPC; | |
1494 | } | |
1495 | ||
1496 | /* Prepare new header */ | |
7267c094 | 1497 | buffer = g_malloc(buffer_len); |
75411d23 SH |
1498 | |
1499 | qed_header_cpu_to_le(&new_header, &le_header); | |
1500 | memcpy(buffer, &le_header, sizeof(le_header)); | |
1501 | buffer_len = sizeof(le_header); | |
1502 | ||
feba23b1 PB |
1503 | if (backing_file) { |
1504 | memcpy(buffer + buffer_len, backing_file, backing_file_len); | |
1505 | buffer_len += backing_file_len; | |
1506 | } | |
75411d23 SH |
1507 | |
1508 | /* Write new header */ | |
d9ca2ea2 | 1509 | ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len); |
7267c094 | 1510 | g_free(buffer); |
75411d23 SH |
1511 | if (ret == 0) { |
1512 | memcpy(&s->header, &new_header, sizeof(new_header)); | |
1513 | } | |
1514 | return ret; | |
1515 | } | |
1516 | ||
5a8a30db | 1517 | static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp) |
c82954e5 | 1518 | { |
1f01e50b | 1519 | BDRVQEDState *s = bs->opaque; |
5a8a30db KW |
1520 | Error *local_err = NULL; |
1521 | int ret; | |
c82954e5 BC |
1522 | |
1523 | bdrv_qed_close(bs); | |
3456a8d1 | 1524 | |
61c7887e | 1525 | bdrv_qed_init_state(bs); |
1f01e50b PB |
1526 | if (qemu_in_coroutine()) { |
1527 | qemu_co_mutex_lock(&s->table_lock); | |
1528 | } | |
4e4bf5c4 | 1529 | ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err); |
1f01e50b PB |
1530 | if (qemu_in_coroutine()) { |
1531 | qemu_co_mutex_unlock(&s->table_lock); | |
1532 | } | |
5a8a30db | 1533 | if (local_err) { |
e43bfd9c MA |
1534 | error_propagate(errp, local_err); |
1535 | error_prepend(errp, "Could not reopen qed layer: "); | |
5a8a30db KW |
1536 | return; |
1537 | } else if (ret < 0) { | |
1538 | error_setg_errno(errp, -ret, "Could not reopen qed layer"); | |
1539 | return; | |
1540 | } | |
c82954e5 BC |
1541 | } |
1542 | ||
4534ff54 KW |
1543 | static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result, |
1544 | BdrvCheckMode fix) | |
75411d23 | 1545 | { |
01979a98 SH |
1546 | BDRVQEDState *s = bs->opaque; |
1547 | ||
4534ff54 | 1548 | return qed_check(s, result, !!fix); |
75411d23 SH |
1549 | } |
1550 | ||
7ab74849 CL |
1551 | static QemuOptsList qed_create_opts = { |
1552 | .name = "qed-create-opts", | |
1553 | .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head), | |
1554 | .desc = { | |
1555 | { | |
1556 | .name = BLOCK_OPT_SIZE, | |
1557 | .type = QEMU_OPT_SIZE, | |
1558 | .help = "Virtual disk size" | |
1559 | }, | |
1560 | { | |
1561 | .name = BLOCK_OPT_BACKING_FILE, | |
1562 | .type = QEMU_OPT_STRING, | |
1563 | .help = "File name of a base image" | |
1564 | }, | |
1565 | { | |
1566 | .name = BLOCK_OPT_BACKING_FMT, | |
1567 | .type = QEMU_OPT_STRING, | |
1568 | .help = "Image format of the base image" | |
1569 | }, | |
1570 | { | |
1571 | .name = BLOCK_OPT_CLUSTER_SIZE, | |
1572 | .type = QEMU_OPT_SIZE, | |
1573 | .help = "Cluster size (in bytes)", | |
1574 | .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE) | |
1575 | }, | |
1576 | { | |
1577 | .name = BLOCK_OPT_TABLE_SIZE, | |
1578 | .type = QEMU_OPT_SIZE, | |
1579 | .help = "L1/L2 table size (in clusters)" | |
1580 | }, | |
1581 | { /* end of list */ } | |
1582 | } | |
75411d23 SH |
1583 | }; |
1584 | ||
1585 | static BlockDriver bdrv_qed = { | |
1586 | .format_name = "qed", | |
1587 | .instance_size = sizeof(BDRVQEDState), | |
7ab74849 | 1588 | .create_opts = &qed_create_opts, |
8ee79e70 | 1589 | .supports_backing = true, |
75411d23 SH |
1590 | |
1591 | .bdrv_probe = bdrv_qed_probe, | |
1592 | .bdrv_open = bdrv_qed_open, | |
1593 | .bdrv_close = bdrv_qed_close, | |
f9cb20f1 | 1594 | .bdrv_reopen_prepare = bdrv_qed_reopen_prepare, |
862f215f | 1595 | .bdrv_child_perm = bdrv_format_default_perms, |
c282e1fd | 1596 | .bdrv_create = bdrv_qed_create, |
3ac21627 | 1597 | .bdrv_has_zero_init = bdrv_has_zero_init_1, |
b6b8a333 | 1598 | .bdrv_co_get_block_status = bdrv_qed_co_get_block_status, |
89f89709 KW |
1599 | .bdrv_co_readv = bdrv_qed_co_readv, |
1600 | .bdrv_co_writev = bdrv_qed_co_writev, | |
49a2e483 | 1601 | .bdrv_co_pwrite_zeroes = bdrv_qed_co_pwrite_zeroes, |
75411d23 SH |
1602 | .bdrv_truncate = bdrv_qed_truncate, |
1603 | .bdrv_getlength = bdrv_qed_getlength, | |
1604 | .bdrv_get_info = bdrv_qed_get_info, | |
d34682cd | 1605 | .bdrv_refresh_limits = bdrv_qed_refresh_limits, |
75411d23 | 1606 | .bdrv_change_backing_file = bdrv_qed_change_backing_file, |
c82954e5 | 1607 | .bdrv_invalidate_cache = bdrv_qed_invalidate_cache, |
75411d23 | 1608 | .bdrv_check = bdrv_qed_check, |
a8c868c3 SH |
1609 | .bdrv_detach_aio_context = bdrv_qed_detach_aio_context, |
1610 | .bdrv_attach_aio_context = bdrv_qed_attach_aio_context, | |
f8ea8dac | 1611 | .bdrv_co_drain_begin = bdrv_qed_co_drain_begin, |
75411d23 SH |
1612 | }; |
1613 | ||
1614 | static void bdrv_qed_init(void) | |
1615 | { | |
1616 | bdrv_register(&bdrv_qed); | |
1617 | } | |
1618 | ||
1619 | block_init(bdrv_qed_init); |