X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/ebffe2afceb1a17b5d134b5debf553955fe5ea1a..1abcfe9e299b6e48792ddfa6f2ab8cec93f16823:/block/qed-table.c diff --git a/block/qed-table.c b/block/qed-table.c index f31f9ff069..7df5680adb 100644 --- a/block/qed-table.c +++ b/block/qed-table.c @@ -12,102 +12,45 @@ * */ +#include "qemu/osdep.h" #include "trace.h" -#include "qemu_socket.h" /* for EINPROGRESS on Windows */ +#include "qemu/sockets.h" /* for EINPROGRESS on Windows */ #include "qed.h" +#include "qemu/bswap.h" -typedef struct { - GenericCB gencb; - BDRVQEDState *s; - QEDTable *table; - - struct iovec iov; +/* Called with table_lock held. */ +static int qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table) +{ QEMUIOVector qiov; -} QEDReadTableCB; + int noffsets; + int i, ret; -static void qed_read_table_cb(void *opaque, int ret) -{ - QEDReadTableCB *read_table_cb = opaque; - QEDTable *table = read_table_cb->table; - int noffsets = read_table_cb->iov.iov_len / sizeof(uint64_t); - int i; + struct iovec iov = { + .iov_base = table->offsets, + .iov_len = s->header.cluster_size * s->header.table_size, + }; + qemu_iovec_init_external(&qiov, &iov, 1); - /* Handle I/O error */ - if (ret) { + trace_qed_read_table(s, offset, table); + + qemu_co_mutex_unlock(&s->table_lock); + ret = bdrv_preadv(s->bs->file, offset, &qiov); + qemu_co_mutex_lock(&s->table_lock); + if (ret < 0) { goto out; } /* Byteswap offsets */ + noffsets = qiov.size / sizeof(uint64_t); for (i = 0; i < noffsets; i++) { table->offsets[i] = le64_to_cpu(table->offsets[i]); } + ret = 0; out: /* Completion */ - trace_qed_read_table_cb(read_table_cb->s, read_table_cb->table, ret); - gencb_complete(&read_table_cb->gencb, ret); -} - -static void qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, - BlockDriverCompletionFunc *cb, void *opaque) -{ - QEDReadTableCB *read_table_cb = gencb_alloc(sizeof(*read_table_cb), - cb, opaque); - QEMUIOVector *qiov = &read_table_cb->qiov; - BlockDriverAIOCB *aiocb; - - trace_qed_read_table(s, offset, table); - - read_table_cb->s = s; - read_table_cb->table = table; - read_table_cb->iov.iov_base = table->offsets, - read_table_cb->iov.iov_len = s->header.cluster_size * s->header.table_size, - - qemu_iovec_init_external(qiov, &read_table_cb->iov, 1); - aiocb = bdrv_aio_readv(s->bs->file, offset / BDRV_SECTOR_SIZE, qiov, - read_table_cb->iov.iov_len / BDRV_SECTOR_SIZE, - qed_read_table_cb, read_table_cb); - if (!aiocb) { - qed_read_table_cb(read_table_cb, -EIO); - } -} - -typedef struct { - GenericCB gencb; - BDRVQEDState *s; - QEDTable *orig_table; - QEDTable *table; - bool flush; /* flush after write? */ - - struct iovec iov; - QEMUIOVector qiov; -} QEDWriteTableCB; - -static void qed_write_table_cb(void *opaque, int ret) -{ - QEDWriteTableCB *write_table_cb = opaque; - - trace_qed_write_table_cb(write_table_cb->s, - write_table_cb->orig_table, - write_table_cb->flush, - ret); - - if (ret) { - goto out; - } - - if (write_table_cb->flush) { - /* We still need to flush first */ - write_table_cb->flush = false; - bdrv_aio_flush(write_table_cb->s->bs, qed_write_table_cb, - write_table_cb); - return; - } - -out: - qemu_vfree(write_table_cb->table); - gencb_complete(&write_table_cb->gencb, ret); - return; + trace_qed_read_table_cb(s, table, ret); + return ret; } /** @@ -119,18 +62,19 @@ out: * @index: Index of first element * @n: Number of elements * @flush: Whether or not to sync to disk - * @cb: Completion function - * @opaque: Argument for completion function + * + * Called with table_lock held. */ -static void qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, - unsigned int index, unsigned int n, bool flush, - BlockDriverCompletionFunc *cb, void *opaque) +static int qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, + unsigned int index, unsigned int n, bool flush) { - QEDWriteTableCB *write_table_cb; - BlockDriverAIOCB *aiocb; unsigned int sector_mask = BDRV_SECTOR_SIZE / sizeof(uint64_t) - 1; unsigned int start, end, i; + QEDTable *new_table; + struct iovec iov; + QEMUIOVector qiov; size_t len_bytes; + int ret; trace_qed_write_table(s, offset, table, index, n); @@ -140,166 +84,116 @@ static void qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table, len_bytes = (end - start) * sizeof(uint64_t); - write_table_cb = gencb_alloc(sizeof(*write_table_cb), cb, opaque); - write_table_cb->s = s; - write_table_cb->orig_table = table; - write_table_cb->flush = flush; - write_table_cb->table = qemu_blockalign(s->bs, len_bytes); - write_table_cb->iov.iov_base = write_table_cb->table->offsets; - write_table_cb->iov.iov_len = len_bytes; - qemu_iovec_init_external(&write_table_cb->qiov, &write_table_cb->iov, 1); + new_table = qemu_blockalign(s->bs, len_bytes); + iov = (struct iovec) { + .iov_base = new_table->offsets, + .iov_len = len_bytes, + }; + qemu_iovec_init_external(&qiov, &iov, 1); /* Byteswap table */ for (i = start; i < end; i++) { uint64_t le_offset = cpu_to_le64(table->offsets[i]); - write_table_cb->table->offsets[i - start] = le_offset; + new_table->offsets[i - start] = le_offset; } /* Adjust for offset into table */ offset += start * sizeof(uint64_t); - aiocb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, - &write_table_cb->qiov, - write_table_cb->iov.iov_len / BDRV_SECTOR_SIZE, - qed_write_table_cb, write_table_cb); - if (!aiocb) { - qed_write_table_cb(write_table_cb, -EIO); + qemu_co_mutex_unlock(&s->table_lock); + ret = bdrv_pwritev(s->bs->file, offset, &qiov); + qemu_co_mutex_lock(&s->table_lock); + trace_qed_write_table_cb(s, table, flush, ret); + if (ret < 0) { + goto out; } -} -/** - * Propagate return value from async callback - */ -static void qed_sync_cb(void *opaque, int ret) -{ - *(int *)opaque = ret; + if (flush) { + ret = bdrv_flush(s->bs); + if (ret < 0) { + goto out; + } + } + + ret = 0; +out: + qemu_vfree(new_table); + return ret; } int qed_read_l1_table_sync(BDRVQEDState *s) { - int ret = -EINPROGRESS; - - qed_read_table(s, s->header.l1_table_offset, - s->l1_table, qed_sync_cb, &ret); - while (ret == -EINPROGRESS) { - qemu_aio_wait(); - } - - return ret; + return qed_read_table(s, s->header.l1_table_offset, s->l1_table); } -void qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n, - BlockDriverCompletionFunc *cb, void *opaque) +/* Called with table_lock held. */ +int qed_write_l1_table(BDRVQEDState *s, unsigned int index, unsigned int n) { BLKDBG_EVENT(s->bs->file, BLKDBG_L1_UPDATE); - qed_write_table(s, s->header.l1_table_offset, - s->l1_table, index, n, false, cb, opaque); + return qed_write_table(s, s->header.l1_table_offset, + s->l1_table, index, n, false); } int qed_write_l1_table_sync(BDRVQEDState *s, unsigned int index, unsigned int n) { - int ret = -EINPROGRESS; + return qed_write_l1_table(s, index, n); +} - qed_write_l1_table(s, index, n, qed_sync_cb, &ret); - while (ret == -EINPROGRESS) { - qemu_aio_wait(); - } +/* Called with table_lock held. */ +int qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset) +{ + int ret; - return ret; -} + qed_unref_l2_cache_entry(request->l2_table); + + /* Check for cached L2 entry */ + request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); + if (request->l2_table) { + return 0; + } -typedef struct { - GenericCB gencb; - BDRVQEDState *s; - uint64_t l2_offset; - QEDRequest *request; -} QEDReadL2TableCB; + request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); + request->l2_table->table = qed_alloc_table(s); -static void qed_read_l2_table_cb(void *opaque, int ret) -{ - QEDReadL2TableCB *read_l2_table_cb = opaque; - QEDRequest *request = read_l2_table_cb->request; - BDRVQEDState *s = read_l2_table_cb->s; - CachedL2Table *l2_table = request->l2_table; - uint64_t l2_offset = read_l2_table_cb->l2_offset; + BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD); + ret = qed_read_table(s, offset, request->l2_table->table); if (ret) { /* can't trust loaded L2 table anymore */ - qed_unref_l2_cache_entry(l2_table); + qed_unref_l2_cache_entry(request->l2_table); request->l2_table = NULL; } else { - l2_table->offset = l2_offset; + request->l2_table->offset = offset; - qed_commit_l2_cache_entry(&s->l2_cache, l2_table); + qed_commit_l2_cache_entry(&s->l2_cache, request->l2_table); /* This is guaranteed to succeed because we just committed the entry * to the cache. */ - request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); + request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); assert(request->l2_table != NULL); } - gencb_complete(&read_l2_table_cb->gencb, ret); -} - -void qed_read_l2_table(BDRVQEDState *s, QEDRequest *request, uint64_t offset, - BlockDriverCompletionFunc *cb, void *opaque) -{ - QEDReadL2TableCB *read_l2_table_cb; - - qed_unref_l2_cache_entry(request->l2_table); - - /* Check for cached L2 entry */ - request->l2_table = qed_find_l2_cache_entry(&s->l2_cache, offset); - if (request->l2_table) { - cb(opaque, 0); - return; - } - - request->l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); - request->l2_table->table = qed_alloc_table(s); - - read_l2_table_cb = gencb_alloc(sizeof(*read_l2_table_cb), cb, opaque); - read_l2_table_cb->s = s; - read_l2_table_cb->l2_offset = offset; - read_l2_table_cb->request = request; - - BLKDBG_EVENT(s->bs->file, BLKDBG_L2_LOAD); - qed_read_table(s, offset, request->l2_table->table, - qed_read_l2_table_cb, read_l2_table_cb); + return ret; } int qed_read_l2_table_sync(BDRVQEDState *s, QEDRequest *request, uint64_t offset) { - int ret = -EINPROGRESS; - - qed_read_l2_table(s, request, offset, qed_sync_cb, &ret); - while (ret == -EINPROGRESS) { - qemu_aio_wait(); - } - - return ret; + return qed_read_l2_table(s, request, offset); } -void qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, - unsigned int index, unsigned int n, bool flush, - BlockDriverCompletionFunc *cb, void *opaque) +/* Called with table_lock held. */ +int qed_write_l2_table(BDRVQEDState *s, QEDRequest *request, + unsigned int index, unsigned int n, bool flush) { BLKDBG_EVENT(s->bs->file, BLKDBG_L2_UPDATE); - qed_write_table(s, request->l2_table->offset, - request->l2_table->table, index, n, flush, cb, opaque); + return qed_write_table(s, request->l2_table->offset, + request->l2_table->table, index, n, flush); } int qed_write_l2_table_sync(BDRVQEDState *s, QEDRequest *request, unsigned int index, unsigned int n, bool flush) { - int ret = -EINPROGRESS; - - qed_write_l2_table(s, request, index, n, flush, qed_sync_cb, &ret); - while (ret == -EINPROGRESS) { - qemu_aio_wait(); - } - - return ret; + return qed_write_l2_table(s, request, index, n, flush); }