#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qstring.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "qemu/cutils.h"
#include "qemu/option.h"
size_t doorbell_scale;
bool write_cache_supported;
EventNotifier irq_notifier;
+
uint64_t nsze; /* Namespace size reported by identify command */
int nsid; /* The namespace id to read/write data. */
+ int blkshift;
+
uint64_t max_transfer;
bool plugged;
error_propagate(errp, local_err);
goto fail;
}
- q->cq.doorbell = &s->regs->doorbells[idx * 2 * s->doorbell_scale + 1];
+ q->cq.doorbell = &s->regs->doorbells[(idx * 2 + 1) * s->doorbell_scale];
return q;
fail:
while (q->inflight) {
int16_t cid;
c = (NvmeCqe *)&q->cq.queue[q->cq.head * NVME_CQ_ENTRY_BYTES];
- if (!c->cid || (le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
+ if ((le16_to_cpu(c->status) & 0x1) == q->cq_phase) {
break;
}
q->cq.head = (q->cq.head + 1) % NVME_QUEUE_SIZE;
qemu_mutex_unlock(&q->lock);
req.cb(req.opaque, nvme_translate_error(c));
qemu_mutex_lock(&q->lock);
- c->cid = cpu_to_le16(0);
q->inflight--;
- /* Flip Phase Tag bit. */
- c->status = cpu_to_le16(le16_to_cpu(c->status) ^ 0x1);
progress = true;
}
if (progress) {
BDRVNVMeState *s = bs->opaque;
NvmeIdCtrl *idctrl;
NvmeIdNs *idns;
+ NvmeLBAF *lbaf;
uint8_t *resp;
int r;
uint64_t iova;
}
s->nsze = le64_to_cpu(idns->nsze);
+ lbaf = &idns->lbaf[NVME_ID_NS_FLBAS_INDEX(idns->flbas)];
+
+ if (lbaf->ms) {
+ error_setg(errp, "Namespaces with metadata are not yet supported");
+ goto out;
+ }
+ if (lbaf->ds < BDRV_SECTOR_BITS || lbaf->ds > 12 ||
+ (1 << lbaf->ds) > s->page_size)
+ {
+ error_setg(errp, "Namespace has unsupported block size (2^%d)",
+ lbaf->ds);
+ goto out;
+ }
+
+ s->blkshift = lbaf->ds;
out:
qemu_vfio_dma_unmap(s->vfio, resp);
qemu_vfree(resp);
/* Set up admin queue. */
s->queues = g_new(NVMeQueuePair *, 1);
- s->nr_queues = 1;
s->queues[0] = nvme_create_queue_pair(bs, 0, NVME_QUEUE_SIZE, errp);
if (!s->queues[0]) {
ret = -EINVAL;
goto out;
}
+ s->nr_queues = 1;
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
s->regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << 16) | NVME_QUEUE_SIZE);
s->regs->asq = cpu_to_le64(s->queues[0]->sq.iova);
static int64_t nvme_getlength(BlockDriverState *bs)
{
BDRVNVMeState *s = bs->opaque;
+ return s->nsze << s->blkshift;
+}
- return s->nsze << BDRV_SECTOR_BITS;
+static uint32_t nvme_get_blocksize(BlockDriverState *bs)
+{
+ BDRVNVMeState *s = bs->opaque;
+ assert(s->blkshift >= BDRV_SECTOR_BITS && s->blkshift <= 12);
+ return UINT32_C(1) << s->blkshift;
+}
+
+static int nvme_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz)
+{
+ uint32_t blocksize = nvme_get_blocksize(bs);
+ bsz->phys = blocksize;
+ bsz->log = blocksize;
+ return 0;
}
/* Called with s->dma_map_lock */
BDRVNVMeState *s = bs->opaque;
NVMeQueuePair *ioq = s->queues[1];
NVMeRequest *req;
- uint32_t cdw12 = (((bytes >> BDRV_SECTOR_BITS) - 1) & 0xFFFF) |
+
+ uint32_t cdw12 = (((bytes >> s->blkshift) - 1) & 0xFFFF) |
(flags & BDRV_REQ_FUA ? 1 << 30 : 0);
NvmeCmd cmd = {
.opcode = is_write ? NVME_CMD_WRITE : NVME_CMD_READ,
.nsid = cpu_to_le32(s->nsid),
- .cdw10 = cpu_to_le32((offset >> BDRV_SECTOR_BITS) & 0xFFFFFFFF),
- .cdw11 = cpu_to_le32(((offset >> BDRV_SECTOR_BITS) >> 32) & 0xFFFFFFFF),
+ .cdw10 = cpu_to_le32((offset >> s->blkshift) & 0xFFFFFFFF),
+ .cdw11 = cpu_to_le32(((offset >> s->blkshift) >> 32) & 0xFFFFFFFF),
.cdw12 = cpu_to_le32(cdw12),
};
NVMeCoData data = {
.bdrv_file_open = nvme_file_open,
.bdrv_close = nvme_close,
.bdrv_getlength = nvme_getlength,
+ .bdrv_probe_blocksizes = nvme_probe_blocksizes,
.bdrv_co_preadv = nvme_co_preadv,
.bdrv_co_pwritev = nvme_co_pwritev,