2 * QEMU Enhanced Disk Format
4 * Copyright IBM, Corp. 2010
10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11 * See the COPYING.LIB file in the top-level directory.
15 #include "qemu/timer.h"
18 #include "qapi/qmp/qerror.h"
19 #include "migration/migration.h"
21 static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
23 QEDAIOCB *acb = (QEDAIOCB *)blockacb;
24 bool finished = false;
26 /* Wait for the request to finish */
27 acb->finished = &finished;
33 static const AIOCBInfo qed_aiocb_info = {
34 .aiocb_size = sizeof(QEDAIOCB),
35 .cancel = qed_aio_cancel,
38 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
41 const QEDHeader *header = (const QEDHeader *)buf;
43 if (buf_size < sizeof(*header)) {
46 if (le32_to_cpu(header->magic) != QED_MAGIC) {
53 * Check whether an image format is raw
55 * @fmt: Backing file format, may be NULL
57 static bool qed_fmt_is_raw(const char *fmt)
59 return fmt && strcmp(fmt, "raw") == 0;
62 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
64 cpu->magic = le32_to_cpu(le->magic);
65 cpu->cluster_size = le32_to_cpu(le->cluster_size);
66 cpu->table_size = le32_to_cpu(le->table_size);
67 cpu->header_size = le32_to_cpu(le->header_size);
68 cpu->features = le64_to_cpu(le->features);
69 cpu->compat_features = le64_to_cpu(le->compat_features);
70 cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
71 cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
72 cpu->image_size = le64_to_cpu(le->image_size);
73 cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
74 cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
77 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
79 le->magic = cpu_to_le32(cpu->magic);
80 le->cluster_size = cpu_to_le32(cpu->cluster_size);
81 le->table_size = cpu_to_le32(cpu->table_size);
82 le->header_size = cpu_to_le32(cpu->header_size);
83 le->features = cpu_to_le64(cpu->features);
84 le->compat_features = cpu_to_le64(cpu->compat_features);
85 le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
86 le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
87 le->image_size = cpu_to_le64(cpu->image_size);
88 le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
89 le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
92 int qed_write_header_sync(BDRVQEDState *s)
97 qed_header_cpu_to_le(&s->header, &le);
98 ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
99 if (ret != sizeof(le)) {
114 static void qed_write_header_cb(void *opaque, int ret)
116 QEDWriteHeaderCB *write_header_cb = opaque;
118 qemu_vfree(write_header_cb->buf);
119 gencb_complete(write_header_cb, ret);
122 static void qed_write_header_read_cb(void *opaque, int ret)
124 QEDWriteHeaderCB *write_header_cb = opaque;
125 BDRVQEDState *s = write_header_cb->s;
128 qed_write_header_cb(write_header_cb, ret);
133 qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
135 bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
136 write_header_cb->nsectors, qed_write_header_cb,
141 * Update header in-place (does not rewrite backing filename or other strings)
143 * This function only updates known header fields in-place and does not affect
144 * extra data after the QED header.
146 static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
149 /* We must write full sectors for O_DIRECT but cannot necessarily generate
150 * the data following the header if an unrecognized compat feature is
151 * active. Therefore, first read the sectors containing the header, update
152 * them, and write back.
155 int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
157 size_t len = nsectors * BDRV_SECTOR_SIZE;
158 QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
161 write_header_cb->s = s;
162 write_header_cb->nsectors = nsectors;
163 write_header_cb->buf = qemu_blockalign(s->bs, len);
164 write_header_cb->iov.iov_base = write_header_cb->buf;
165 write_header_cb->iov.iov_len = len;
166 qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
168 bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
169 qed_write_header_read_cb, write_header_cb);
172 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
174 uint64_t table_entries;
177 table_entries = (table_size * cluster_size) / sizeof(uint64_t);
178 l2_size = table_entries * cluster_size;
180 return l2_size * table_entries;
183 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
185 if (cluster_size < QED_MIN_CLUSTER_SIZE ||
186 cluster_size > QED_MAX_CLUSTER_SIZE) {
189 if (cluster_size & (cluster_size - 1)) {
190 return false; /* not power of 2 */
195 static bool qed_is_table_size_valid(uint32_t table_size)
197 if (table_size < QED_MIN_TABLE_SIZE ||
198 table_size > QED_MAX_TABLE_SIZE) {
201 if (table_size & (table_size - 1)) {
202 return false; /* not power of 2 */
207 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
210 if (image_size % BDRV_SECTOR_SIZE != 0) {
211 return false; /* not multiple of sector size */
213 if (image_size > qed_max_image_size(cluster_size, table_size)) {
214 return false; /* image is too large */
220 * Read a string of known length from the image file
223 * @offset: File offset to start of string, in bytes
224 * @n: String length in bytes
225 * @buf: Destination buffer
226 * @buflen: Destination buffer length in bytes
227 * @ret: 0 on success, -errno on failure
229 * The string is NUL-terminated.
231 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
232 char *buf, size_t buflen)
238 ret = bdrv_pread(file, offset, buf, n);
247 * Allocate new clusters
250 * @n: Number of contiguous clusters to allocate
251 * @ret: Offset of first allocated cluster
253 * This function only produces the offset where the new clusters should be
254 * written. It updates BDRVQEDState but does not make any changes to the image
257 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
259 uint64_t offset = s->file_size;
260 s->file_size += n * s->header.cluster_size;
264 QEDTable *qed_alloc_table(BDRVQEDState *s)
266 /* Honor O_DIRECT memory alignment requirements */
267 return qemu_blockalign(s->bs,
268 s->header.cluster_size * s->header.table_size);
272 * Allocate a new zeroed L2 table
274 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
276 CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
278 l2_table->table = qed_alloc_table(s);
279 l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
281 memset(l2_table->table->offsets, 0,
282 s->header.cluster_size * s->header.table_size);
286 static void qed_aio_next_io(void *opaque, int ret);
288 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
290 assert(!s->allocating_write_reqs_plugged);
292 s->allocating_write_reqs_plugged = true;
295 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
299 assert(s->allocating_write_reqs_plugged);
301 s->allocating_write_reqs_plugged = false;
303 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
305 qed_aio_next_io(acb, 0);
309 static void qed_finish_clear_need_check(void *opaque, int ret)
314 static void qed_flush_after_clear_need_check(void *opaque, int ret)
316 BDRVQEDState *s = opaque;
318 bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
320 /* No need to wait until flush completes */
321 qed_unplug_allocating_write_reqs(s);
324 static void qed_clear_need_check(void *opaque, int ret)
326 BDRVQEDState *s = opaque;
329 qed_unplug_allocating_write_reqs(s);
333 s->header.features &= ~QED_F_NEED_CHECK;
334 qed_write_header(s, qed_flush_after_clear_need_check, s);
337 static void qed_need_check_timer_cb(void *opaque)
339 BDRVQEDState *s = opaque;
341 /* The timer should only fire when allocating writes have drained */
342 assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
344 trace_qed_need_check_timer_cb(s);
346 qed_plug_allocating_write_reqs(s);
348 /* Ensure writes are on disk before clearing flag */
349 bdrv_aio_flush(s->bs, qed_clear_need_check, s);
352 static void qed_start_need_check_timer(BDRVQEDState *s)
354 trace_qed_start_need_check_timer(s);
356 /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
359 timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
360 get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
363 /* It's okay to call this multiple times or when no timer is started */
364 static void qed_cancel_need_check_timer(BDRVQEDState *s)
366 trace_qed_cancel_need_check_timer(s);
367 timer_del(s->need_check_timer);
370 static void bdrv_qed_rebind(BlockDriverState *bs)
372 BDRVQEDState *s = bs->opaque;
376 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
379 BDRVQEDState *s = bs->opaque;
385 QSIMPLEQ_INIT(&s->allocating_write_reqs);
387 ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
391 qed_header_le_to_cpu(&le_header, &s->header);
393 if (s->header.magic != QED_MAGIC) {
396 if (s->header.features & ~QED_FEATURE_MASK) {
397 /* image uses unsupported feature bits */
399 snprintf(buf, sizeof(buf), "%" PRIx64,
400 s->header.features & ~QED_FEATURE_MASK);
401 qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
402 bs->device_name, "QED", buf);
405 if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
409 /* Round down file size to the last cluster */
410 file_size = bdrv_getlength(bs->file);
414 s->file_size = qed_start_of_cluster(s, file_size);
416 if (!qed_is_table_size_valid(s->header.table_size)) {
419 if (!qed_is_image_size_valid(s->header.image_size,
420 s->header.cluster_size,
421 s->header.table_size)) {
424 if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
428 s->table_nelems = (s->header.cluster_size * s->header.table_size) /
430 s->l2_shift = ffs(s->header.cluster_size) - 1;
431 s->l2_mask = s->table_nelems - 1;
432 s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
434 if ((s->header.features & QED_F_BACKING_FILE)) {
435 if ((uint64_t)s->header.backing_filename_offset +
436 s->header.backing_filename_size >
437 s->header.cluster_size * s->header.header_size) {
441 ret = qed_read_string(bs->file, s->header.backing_filename_offset,
442 s->header.backing_filename_size, bs->backing_file,
443 sizeof(bs->backing_file));
448 if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
449 pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
453 /* Reset unknown autoclear feature bits. This is a backwards
454 * compatibility mechanism that allows images to be opened by older
455 * programs, which "knock out" unknown feature bits. When an image is
456 * opened by a newer program again it can detect that the autoclear
457 * feature is no longer valid.
459 if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
460 !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) {
461 s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
463 ret = qed_write_header_sync(s);
468 /* From here on only known autoclear feature bits are valid */
469 bdrv_flush(bs->file);
472 s->l1_table = qed_alloc_table(s);
473 qed_init_l2_cache(&s->l2_cache);
475 ret = qed_read_l1_table_sync(s);
480 /* If image was not closed cleanly, check consistency */
481 if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
482 /* Read-only images cannot be fixed. There is no risk of corruption
483 * since write operations are not possible. Therefore, allow
484 * potentially inconsistent images to be opened read-only. This can
485 * aid data recovery from an otherwise inconsistent image.
487 if (!bdrv_is_read_only(bs->file) &&
488 !(flags & BDRV_O_INCOMING)) {
489 BdrvCheckResult result = {0};
491 ret = qed_check(s, &result, true);
498 s->need_check_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
499 qed_need_check_timer_cb, s);
503 qed_free_l2_cache(&s->l2_cache);
504 qemu_vfree(s->l1_table);
509 /* We have nothing to do for QED reopen, stubs just return
511 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
512 BlockReopenQueue *queue, Error **errp)
517 static void bdrv_qed_close(BlockDriverState *bs)
519 BDRVQEDState *s = bs->opaque;
521 qed_cancel_need_check_timer(s);
522 timer_free(s->need_check_timer);
524 /* Ensure writes reach stable storage */
525 bdrv_flush(bs->file);
527 /* Clean shutdown, no check required on next open */
528 if (s->header.features & QED_F_NEED_CHECK) {
529 s->header.features &= ~QED_F_NEED_CHECK;
530 qed_write_header_sync(s);
533 qed_free_l2_cache(&s->l2_cache);
534 qemu_vfree(s->l1_table);
537 static int qed_create(const char *filename, uint32_t cluster_size,
538 uint64_t image_size, uint32_t table_size,
539 const char *backing_file, const char *backing_fmt)
543 .cluster_size = cluster_size,
544 .table_size = table_size,
547 .compat_features = 0,
548 .l1_table_offset = cluster_size,
549 .image_size = image_size,
552 uint8_t *l1_table = NULL;
553 size_t l1_size = header.cluster_size * header.table_size;
554 Error *local_err = NULL;
556 BlockDriverState *bs = NULL;
558 ret = bdrv_create_file(filename, NULL);
563 ret = bdrv_file_open(&bs, filename, NULL, BDRV_O_RDWR | BDRV_O_CACHE_WB,
566 qerror_report_err(local_err);
567 error_free(local_err);
571 /* File must start empty and grow, check truncate is supported */
572 ret = bdrv_truncate(bs, 0);
578 header.features |= QED_F_BACKING_FILE;
579 header.backing_filename_offset = sizeof(le_header);
580 header.backing_filename_size = strlen(backing_file);
582 if (qed_fmt_is_raw(backing_fmt)) {
583 header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
587 qed_header_cpu_to_le(&header, &le_header);
588 ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
592 ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
593 header.backing_filename_size);
598 l1_table = g_malloc0(l1_size);
599 ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
604 ret = 0; /* success */
611 static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options,
614 uint64_t image_size = 0;
615 uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
616 uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
617 const char *backing_file = NULL;
618 const char *backing_fmt = NULL;
620 while (options && options->name) {
621 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
622 image_size = options->value.n;
623 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
624 backing_file = options->value.s;
625 } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
626 backing_fmt = options->value.s;
627 } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
628 if (options->value.n) {
629 cluster_size = options->value.n;
631 } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
632 if (options->value.n) {
633 table_size = options->value.n;
639 if (!qed_is_cluster_size_valid(cluster_size)) {
640 fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
641 QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
644 if (!qed_is_table_size_valid(table_size)) {
645 fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
646 QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
649 if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
650 fprintf(stderr, "QED image size must be a non-zero multiple of "
651 "cluster size and less than %" PRIu64 " bytes\n",
652 qed_max_image_size(cluster_size, table_size));
656 return qed_create(filename, cluster_size, image_size, table_size,
657 backing_file, backing_fmt);
661 BlockDriverState *bs;
668 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
670 QEDIsAllocatedCB *cb = opaque;
671 BDRVQEDState *s = cb->bs->opaque;
672 *cb->pnum = len / BDRV_SECTOR_SIZE;
674 case QED_CLUSTER_FOUND:
675 offset |= qed_offset_into_cluster(s, cb->pos);
676 cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
678 case QED_CLUSTER_ZERO:
679 cb->status = BDRV_BLOCK_ZERO;
692 qemu_coroutine_enter(cb->co, NULL);
696 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
698 int nb_sectors, int *pnum)
700 BDRVQEDState *s = bs->opaque;
701 size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
702 QEDIsAllocatedCB cb = {
704 .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
705 .status = BDRV_BLOCK_OFFSET_MASK,
708 QEDRequest request = { .l2_table = NULL };
710 qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
712 /* Now sleep if the callback wasn't invoked immediately */
713 while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
714 cb.co = qemu_coroutine_self();
715 qemu_coroutine_yield();
718 qed_unref_l2_cache_entry(request.l2_table);
723 static int bdrv_qed_make_empty(BlockDriverState *bs)
728 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
730 return acb->common.bs->opaque;
734 * Read from the backing file or zero-fill if no backing file
737 * @pos: Byte position in device
738 * @qiov: Destination I/O vector
739 * @cb: Completion function
740 * @opaque: User data for completion function
742 * This function reads qiov->size bytes starting at pos from the backing file.
743 * If there is no backing file then zeroes are read.
745 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
747 BlockDriverCompletionFunc *cb, void *opaque)
749 uint64_t backing_length = 0;
752 /* If there is a backing file, get its length. Treat the absence of a
753 * backing file like a zero length backing file.
755 if (s->bs->backing_hd) {
756 int64_t l = bdrv_getlength(s->bs->backing_hd);
764 /* Zero all sectors if reading beyond the end of the backing file */
765 if (pos >= backing_length ||
766 pos + qiov->size > backing_length) {
767 qemu_iovec_memset(qiov, 0, 0, qiov->size);
770 /* Complete now if there are no backing file sectors to read */
771 if (pos >= backing_length) {
776 /* If the read straddles the end of the backing file, shorten it */
777 size = MIN((uint64_t)backing_length - pos, qiov->size);
779 BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
780 bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
781 qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
790 } CopyFromBackingFileCB;
792 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
794 CopyFromBackingFileCB *copy_cb = opaque;
795 qemu_vfree(copy_cb->iov.iov_base);
796 gencb_complete(©_cb->gencb, ret);
799 static void qed_copy_from_backing_file_write(void *opaque, int ret)
801 CopyFromBackingFileCB *copy_cb = opaque;
802 BDRVQEDState *s = copy_cb->s;
805 qed_copy_from_backing_file_cb(copy_cb, ret);
809 BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
810 bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
811 ©_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
812 qed_copy_from_backing_file_cb, copy_cb);
816 * Copy data from backing file into the image
819 * @pos: Byte position in device
820 * @len: Number of bytes
821 * @offset: Byte offset in image file
822 * @cb: Completion function
823 * @opaque: User data for completion function
825 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
826 uint64_t len, uint64_t offset,
827 BlockDriverCompletionFunc *cb,
830 CopyFromBackingFileCB *copy_cb;
832 /* Skip copy entirely if there is no work to do */
838 copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
840 copy_cb->offset = offset;
841 copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
842 copy_cb->iov.iov_len = len;
843 qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1);
845 qed_read_backing_file(s, pos, ©_cb->qiov,
846 qed_copy_from_backing_file_write, copy_cb);
850 * Link one or more contiguous clusters into a table
854 * @index: First cluster index
855 * @n: Number of contiguous clusters
856 * @cluster: First cluster offset
858 * The cluster offset may be an allocated byte offset in the image file, the
859 * zero cluster marker, or the unallocated cluster marker.
861 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
862 unsigned int n, uint64_t cluster)
865 for (i = index; i < index + n; i++) {
866 table->offsets[i] = cluster;
867 if (!qed_offset_is_unalloc_cluster(cluster) &&
868 !qed_offset_is_zero_cluster(cluster)) {
869 cluster += s->header.cluster_size;
874 static void qed_aio_complete_bh(void *opaque)
876 QEDAIOCB *acb = opaque;
877 BlockDriverCompletionFunc *cb = acb->common.cb;
878 void *user_opaque = acb->common.opaque;
879 int ret = acb->bh_ret;
880 bool *finished = acb->finished;
882 qemu_bh_delete(acb->bh);
883 qemu_aio_release(acb);
885 /* Invoke callback */
886 cb(user_opaque, ret);
888 /* Signal cancel completion */
894 static void qed_aio_complete(QEDAIOCB *acb, int ret)
896 BDRVQEDState *s = acb_to_s(acb);
898 trace_qed_aio_complete(s, acb, ret);
901 qemu_iovec_destroy(&acb->cur_qiov);
902 qed_unref_l2_cache_entry(acb->request.l2_table);
904 /* Free the buffer we may have allocated for zero writes */
905 if (acb->flags & QED_AIOCB_ZERO) {
906 qemu_vfree(acb->qiov->iov[0].iov_base);
907 acb->qiov->iov[0].iov_base = NULL;
910 /* Arrange for a bh to invoke the completion function */
912 acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
913 qemu_bh_schedule(acb->bh);
915 /* Start next allocating write request waiting behind this one. Note that
916 * requests enqueue themselves when they first hit an unallocated cluster
917 * but they wait until the entire request is finished before waking up the
918 * next request in the queue. This ensures that we don't cycle through
919 * requests multiple times but rather finish one at a time completely.
921 if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
922 QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
923 acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
925 qed_aio_next_io(acb, 0);
926 } else if (s->header.features & QED_F_NEED_CHECK) {
927 qed_start_need_check_timer(s);
933 * Commit the current L2 table to the cache
935 static void qed_commit_l2_update(void *opaque, int ret)
937 QEDAIOCB *acb = opaque;
938 BDRVQEDState *s = acb_to_s(acb);
939 CachedL2Table *l2_table = acb->request.l2_table;
940 uint64_t l2_offset = l2_table->offset;
942 qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
944 /* This is guaranteed to succeed because we just committed the entry to the
947 acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
948 assert(acb->request.l2_table != NULL);
950 qed_aio_next_io(opaque, ret);
954 * Update L1 table with new L2 table offset and write it out
956 static void qed_aio_write_l1_update(void *opaque, int ret)
958 QEDAIOCB *acb = opaque;
959 BDRVQEDState *s = acb_to_s(acb);
963 qed_aio_complete(acb, ret);
967 index = qed_l1_index(s, acb->cur_pos);
968 s->l1_table->offsets[index] = acb->request.l2_table->offset;
970 qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
974 * Update L2 table with new cluster offsets and write them out
976 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
978 BDRVQEDState *s = acb_to_s(acb);
979 bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
987 qed_unref_l2_cache_entry(acb->request.l2_table);
988 acb->request.l2_table = qed_new_l2_table(s);
991 index = qed_l2_index(s, acb->cur_pos);
992 qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
996 /* Write out the whole new L2 table */
997 qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
998 qed_aio_write_l1_update, acb);
1000 /* Write out only the updated part of the L2 table */
1001 qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1002 qed_aio_next_io, acb);
1007 qed_aio_complete(acb, ret);
1010 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1012 QEDAIOCB *acb = opaque;
1013 qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1017 * Flush new data clusters before updating the L2 table
1019 * This flush is necessary when a backing file is in use. A crash during an
1020 * allocating write could result in empty clusters in the image. If the write
1021 * only touched a subregion of the cluster, then backing image sectors have
1022 * been lost in the untouched region. The solution is to flush after writing a
1023 * new data cluster and before updating the L2 table.
1025 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1027 QEDAIOCB *acb = opaque;
1028 BDRVQEDState *s = acb_to_s(acb);
1030 if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
1031 qed_aio_complete(acb, -EIO);
1036 * Write data to the image file
1038 static void qed_aio_write_main(void *opaque, int ret)
1040 QEDAIOCB *acb = opaque;
1041 BDRVQEDState *s = acb_to_s(acb);
1042 uint64_t offset = acb->cur_cluster +
1043 qed_offset_into_cluster(s, acb->cur_pos);
1044 BlockDriverCompletionFunc *next_fn;
1046 trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1049 qed_aio_complete(acb, ret);
1053 if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1054 next_fn = qed_aio_next_io;
1056 if (s->bs->backing_hd) {
1057 next_fn = qed_aio_write_flush_before_l2_update;
1059 next_fn = qed_aio_write_l2_update_cb;
1063 BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1064 bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1065 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1070 * Populate back untouched region of new data cluster
1072 static void qed_aio_write_postfill(void *opaque, int ret)
1074 QEDAIOCB *acb = opaque;
1075 BDRVQEDState *s = acb_to_s(acb);
1076 uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1078 qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1079 uint64_t offset = acb->cur_cluster +
1080 qed_offset_into_cluster(s, acb->cur_pos) +
1084 qed_aio_complete(acb, ret);
1088 trace_qed_aio_write_postfill(s, acb, start, len, offset);
1089 qed_copy_from_backing_file(s, start, len, offset,
1090 qed_aio_write_main, acb);
1094 * Populate front untouched region of new data cluster
1096 static void qed_aio_write_prefill(void *opaque, int ret)
1098 QEDAIOCB *acb = opaque;
1099 BDRVQEDState *s = acb_to_s(acb);
1100 uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1101 uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1103 trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1104 qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1105 qed_aio_write_postfill, acb);
1109 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1111 static bool qed_should_set_need_check(BDRVQEDState *s)
1113 /* The flush before L2 update path ensures consistency */
1114 if (s->bs->backing_hd) {
1118 return !(s->header.features & QED_F_NEED_CHECK);
1121 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1123 QEDAIOCB *acb = opaque;
1126 qed_aio_complete(acb, ret);
1130 qed_aio_write_l2_update(acb, 0, 1);
1134 * Write new data cluster
1136 * @acb: Write request
1137 * @len: Length in bytes
1139 * This path is taken when writing to previously unallocated clusters.
1141 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1143 BDRVQEDState *s = acb_to_s(acb);
1144 BlockDriverCompletionFunc *cb;
1146 /* Cancel timer when the first allocating request comes in */
1147 if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1148 qed_cancel_need_check_timer(s);
1151 /* Freeze this request if another allocating write is in progress */
1152 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1153 QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1155 if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1156 s->allocating_write_reqs_plugged) {
1157 return; /* wait for existing request to finish */
1160 acb->cur_nclusters = qed_bytes_to_clusters(s,
1161 qed_offset_into_cluster(s, acb->cur_pos) + len);
1162 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1164 if (acb->flags & QED_AIOCB_ZERO) {
1165 /* Skip ahead if the clusters are already zero */
1166 if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1167 qed_aio_next_io(acb, 0);
1171 cb = qed_aio_write_zero_cluster;
1173 cb = qed_aio_write_prefill;
1174 acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1177 if (qed_should_set_need_check(s)) {
1178 s->header.features |= QED_F_NEED_CHECK;
1179 qed_write_header(s, cb, acb);
1186 * Write data cluster in place
1188 * @acb: Write request
1189 * @offset: Cluster offset in bytes
1190 * @len: Length in bytes
1192 * This path is taken when writing to already allocated clusters.
1194 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1196 /* Allocate buffer for zero writes */
1197 if (acb->flags & QED_AIOCB_ZERO) {
1198 struct iovec *iov = acb->qiov->iov;
1200 if (!iov->iov_base) {
1201 iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
1202 memset(iov->iov_base, 0, iov->iov_len);
1206 /* Calculate the I/O vector */
1207 acb->cur_cluster = offset;
1208 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1210 /* Do the actual write */
1211 qed_aio_write_main(acb, 0);
1215 * Write data cluster
1217 * @opaque: Write request
1218 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1220 * @offset: Cluster offset in bytes
1221 * @len: Length in bytes
1223 * Callback from qed_find_cluster().
1225 static void qed_aio_write_data(void *opaque, int ret,
1226 uint64_t offset, size_t len)
1228 QEDAIOCB *acb = opaque;
1230 trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1232 acb->find_cluster_ret = ret;
1235 case QED_CLUSTER_FOUND:
1236 qed_aio_write_inplace(acb, offset, len);
1239 case QED_CLUSTER_L2:
1240 case QED_CLUSTER_L1:
1241 case QED_CLUSTER_ZERO:
1242 qed_aio_write_alloc(acb, len);
1246 qed_aio_complete(acb, ret);
1254 * @opaque: Read request
1255 * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1257 * @offset: Cluster offset in bytes
1258 * @len: Length in bytes
1260 * Callback from qed_find_cluster().
1262 static void qed_aio_read_data(void *opaque, int ret,
1263 uint64_t offset, size_t len)
1265 QEDAIOCB *acb = opaque;
1266 BDRVQEDState *s = acb_to_s(acb);
1267 BlockDriverState *bs = acb->common.bs;
1269 /* Adjust offset into cluster */
1270 offset += qed_offset_into_cluster(s, acb->cur_pos);
1272 trace_qed_aio_read_data(s, acb, ret, offset, len);
1278 qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1280 /* Handle zero cluster and backing file reads */
1281 if (ret == QED_CLUSTER_ZERO) {
1282 qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1283 qed_aio_next_io(acb, 0);
1285 } else if (ret != QED_CLUSTER_FOUND) {
1286 qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1287 qed_aio_next_io, acb);
1291 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1292 bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1293 &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1294 qed_aio_next_io, acb);
1298 qed_aio_complete(acb, ret);
1302 * Begin next I/O or complete the request
1304 static void qed_aio_next_io(void *opaque, int ret)
1306 QEDAIOCB *acb = opaque;
1307 BDRVQEDState *s = acb_to_s(acb);
1308 QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1309 qed_aio_write_data : qed_aio_read_data;
1311 trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1313 /* Handle I/O error */
1315 qed_aio_complete(acb, ret);
1319 acb->qiov_offset += acb->cur_qiov.size;
1320 acb->cur_pos += acb->cur_qiov.size;
1321 qemu_iovec_reset(&acb->cur_qiov);
1323 /* Complete request */
1324 if (acb->cur_pos >= acb->end_pos) {
1325 qed_aio_complete(acb, 0);
1329 /* Find next cluster and start I/O */
1330 qed_find_cluster(s, &acb->request,
1331 acb->cur_pos, acb->end_pos - acb->cur_pos,
1335 static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1337 QEMUIOVector *qiov, int nb_sectors,
1338 BlockDriverCompletionFunc *cb,
1339 void *opaque, int flags)
1341 QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1343 trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1347 acb->finished = NULL;
1349 acb->qiov_offset = 0;
1350 acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1351 acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1352 acb->request.l2_table = NULL;
1353 qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1356 qed_aio_next_io(acb, 0);
1357 return &acb->common;
1360 static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1362 QEMUIOVector *qiov, int nb_sectors,
1363 BlockDriverCompletionFunc *cb,
1366 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1369 static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1371 QEMUIOVector *qiov, int nb_sectors,
1372 BlockDriverCompletionFunc *cb,
1375 return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1376 opaque, QED_AIOCB_WRITE);
1385 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1387 QEDWriteZeroesCB *cb = opaque;
1392 qemu_coroutine_enter(cb->co, NULL);
1396 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1400 BlockDriverAIOCB *blockacb;
1401 BDRVQEDState *s = bs->opaque;
1402 QEDWriteZeroesCB cb = { .done = false };
1406 /* Refuse if there are untouched backing file sectors */
1407 if (bs->backing_hd) {
1408 if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1411 if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1416 /* Zero writes start without an I/O buffer. If a buffer becomes necessary
1417 * then it will be allocated during request processing.
1419 iov.iov_base = NULL,
1420 iov.iov_len = nb_sectors * BDRV_SECTOR_SIZE,
1422 qemu_iovec_init_external(&qiov, &iov, 1);
1423 blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1424 qed_co_write_zeroes_cb, &cb,
1425 QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1430 cb.co = qemu_coroutine_self();
1431 qemu_coroutine_yield();
1437 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1439 BDRVQEDState *s = bs->opaque;
1440 uint64_t old_image_size;
1443 if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1444 s->header.table_size)) {
1448 /* Shrinking is currently not supported */
1449 if ((uint64_t)offset < s->header.image_size) {
1453 old_image_size = s->header.image_size;
1454 s->header.image_size = offset;
1455 ret = qed_write_header_sync(s);
1457 s->header.image_size = old_image_size;
1462 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1464 BDRVQEDState *s = bs->opaque;
1465 return s->header.image_size;
1468 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1470 BDRVQEDState *s = bs->opaque;
1472 memset(bdi, 0, sizeof(*bdi));
1473 bdi->cluster_size = s->header.cluster_size;
1474 bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1478 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1479 const char *backing_file,
1480 const char *backing_fmt)
1482 BDRVQEDState *s = bs->opaque;
1483 QEDHeader new_header, le_header;
1485 size_t buffer_len, backing_file_len;
1488 /* Refuse to set backing filename if unknown compat feature bits are
1489 * active. If the image uses an unknown compat feature then we may not
1490 * know the layout of data following the header structure and cannot safely
1493 if (backing_file && (s->header.compat_features &
1494 ~QED_COMPAT_FEATURE_MASK)) {
1498 memcpy(&new_header, &s->header, sizeof(new_header));
1500 new_header.features &= ~(QED_F_BACKING_FILE |
1501 QED_F_BACKING_FORMAT_NO_PROBE);
1503 /* Adjust feature flags */
1505 new_header.features |= QED_F_BACKING_FILE;
1507 if (qed_fmt_is_raw(backing_fmt)) {
1508 new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1512 /* Calculate new header size */
1513 backing_file_len = 0;
1516 backing_file_len = strlen(backing_file);
1519 buffer_len = sizeof(new_header);
1520 new_header.backing_filename_offset = buffer_len;
1521 new_header.backing_filename_size = backing_file_len;
1522 buffer_len += backing_file_len;
1524 /* Make sure we can rewrite header without failing */
1525 if (buffer_len > new_header.header_size * new_header.cluster_size) {
1529 /* Prepare new header */
1530 buffer = g_malloc(buffer_len);
1532 qed_header_cpu_to_le(&new_header, &le_header);
1533 memcpy(buffer, &le_header, sizeof(le_header));
1534 buffer_len = sizeof(le_header);
1537 memcpy(buffer + buffer_len, backing_file, backing_file_len);
1538 buffer_len += backing_file_len;
1541 /* Write new header */
1542 ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1545 memcpy(&s->header, &new_header, sizeof(new_header));
1550 static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
1552 BDRVQEDState *s = bs->opaque;
1555 memset(s, 0, sizeof(BDRVQEDState));
1556 bdrv_qed_open(bs, NULL, bs->open_flags, NULL);
1559 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1562 BDRVQEDState *s = bs->opaque;
1564 return qed_check(s, result, !!fix);
1567 static QEMUOptionParameter qed_create_options[] = {
1569 .name = BLOCK_OPT_SIZE,
1571 .help = "Virtual disk size (in bytes)"
1573 .name = BLOCK_OPT_BACKING_FILE,
1575 .help = "File name of a base image"
1577 .name = BLOCK_OPT_BACKING_FMT,
1579 .help = "Image format of the base image"
1581 .name = BLOCK_OPT_CLUSTER_SIZE,
1583 .help = "Cluster size (in bytes)",
1584 .value = { .n = QED_DEFAULT_CLUSTER_SIZE },
1586 .name = BLOCK_OPT_TABLE_SIZE,
1588 .help = "L1/L2 table size (in clusters)"
1590 { /* end of list */ }
1593 static BlockDriver bdrv_qed = {
1594 .format_name = "qed",
1595 .instance_size = sizeof(BDRVQEDState),
1596 .create_options = qed_create_options,
1598 .bdrv_probe = bdrv_qed_probe,
1599 .bdrv_rebind = bdrv_qed_rebind,
1600 .bdrv_open = bdrv_qed_open,
1601 .bdrv_close = bdrv_qed_close,
1602 .bdrv_reopen_prepare = bdrv_qed_reopen_prepare,
1603 .bdrv_create = bdrv_qed_create,
1604 .bdrv_has_zero_init = bdrv_has_zero_init_1,
1605 .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1606 .bdrv_make_empty = bdrv_qed_make_empty,
1607 .bdrv_aio_readv = bdrv_qed_aio_readv,
1608 .bdrv_aio_writev = bdrv_qed_aio_writev,
1609 .bdrv_co_write_zeroes = bdrv_qed_co_write_zeroes,
1610 .bdrv_truncate = bdrv_qed_truncate,
1611 .bdrv_getlength = bdrv_qed_getlength,
1612 .bdrv_get_info = bdrv_qed_get_info,
1613 .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1614 .bdrv_invalidate_cache = bdrv_qed_invalidate_cache,
1615 .bdrv_check = bdrv_qed_check,
1618 static void bdrv_qed_init(void)
1620 bdrv_register(&bdrv_qed);
1623 block_init(bdrv_qed_init);