]> Git Repo - qemu.git/blame - hw/block/nvme.c
hw/block/nvme: Align I/O BAR to 4 KiB
[qemu.git] / hw / block / nvme.c
CommitLineData
f3c507ad
KB
1/*
2 * QEMU NVM Express Controller
3 *
4 * Copyright (c) 2012, Intel Corporation
5 *
6 * Written by Keith Busch <[email protected]>
7 *
8 * This code is licensed under the GNU GPL v2 or later.
9 */
10
11/**
a896f7f2 12 * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
f3c507ad 13 *
7a85fb7f 14 * https://nvmexpress.org/developers/nvme-specification/
f3c507ad
KB
15 */
16
17/**
18 * Usage: add options:
19 * -drive file=<file>,if=none,id=<drive_id>
a896f7f2 20 * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
7c895269 21 * cmb_size_mb=<cmb_size_mb[optional]>, \
6cf94132 22 * [pmrdev=<mem_backend_file_id>,] \
dce22c86 23 * max_ioqpairs=<N[optional]>
a896f7f2
SB
24 *
25 * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
b2b2b67a 26 * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
6cf94132
AJ
27 *
28 * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
29 * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
30 * both provided.
31 * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
32 * For example:
33 * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
34 * size=<size> .... -device nvme,...,pmrdev=<mem_id>
f3c507ad
KB
35 */
36
80c71a24 37#include "qemu/osdep.h"
e8400cf3 38#include "qemu/units.h"
dce22c86 39#include "qemu/error-report.h"
a9c94277 40#include "hw/block/block.h"
a9c94277
MA
41#include "hw/pci/msix.h"
42#include "hw/pci/pci.h"
a27bd6c7 43#include "hw/qdev-properties.h"
d6454270 44#include "migration/vmstate.h"
33739c71 45#include "sysemu/sysemu.h"
da34e65c 46#include "qapi/error.h"
33739c71 47#include "qapi/visitor.h"
6cf94132 48#include "sysemu/hostmem.h"
4be74634 49#include "sysemu/block-backend.h"
bc2a2364 50#include "exec/memory.h"
1ee24514 51#include "qemu/log.h"
0b8fa32f 52#include "qemu/module.h"
6b39bad0 53#include "qemu/cutils.h"
1ee24514 54#include "trace.h"
f3c507ad
KB
55#include "nvme.h"
56
6a25a4b4 57#define NVME_MAX_IOQPAIRS 0xffff
f7e8c23f 58#define NVME_DB_SIZE 4
51ec094d 59#define NVME_CMB_BIR 2
37712e00 60#define NVME_PMR_BIR 2
f7e8c23f 61
1ee24514
DG
62#define NVME_GUEST_ERR(trace, fmt, ...) \
63 do { \
64 (trace_##trace)(__VA_ARGS__); \
65 qemu_log_mask(LOG_GUEST_ERROR, #trace \
66 " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
67 } while (0)
68
f3c507ad
KB
69static void nvme_process_sq(void *opaque);
70
b4529c5c
KJ
71static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
72{
73 hwaddr low = n->ctrl_mem.addr;
74 hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
75
76 return addr >= low && addr < hi;
77}
78
a896f7f2
SB
79static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
80{
e1731e81 81 if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
a896f7f2 82 memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
b4529c5c 83 return;
a896f7f2 84 }
b4529c5c
KJ
85
86 pci_dma_read(&n->parent_obj, addr, buf, size);
a896f7f2
SB
87}
88
f3c507ad
KB
89static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
90{
dce22c86 91 return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
f3c507ad
KB
92}
93
94static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
95{
dce22c86 96 return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
f3c507ad
KB
97}
98
99static void nvme_inc_cq_tail(NvmeCQueue *cq)
100{
101 cq->tail++;
102 if (cq->tail >= cq->size) {
103 cq->tail = 0;
104 cq->phase = !cq->phase;
105 }
106}
107
108static void nvme_inc_sq_head(NvmeSQueue *sq)
109{
110 sq->head = (sq->head + 1) % sq->size;
111}
112
113static uint8_t nvme_cq_full(NvmeCQueue *cq)
114{
115 return (cq->tail + 1) % cq->size == cq->head;
116}
117
118static uint8_t nvme_sq_empty(NvmeSQueue *sq)
119{
120 return sq->head == sq->tail;
121}
122
5e9aa92e
HN
123static void nvme_irq_check(NvmeCtrl *n)
124{
125 if (msix_enabled(&(n->parent_obj))) {
126 return;
127 }
128 if (~n->bar.intms & n->irq_status) {
129 pci_irq_assert(&n->parent_obj);
130 } else {
131 pci_irq_deassert(&n->parent_obj);
132 }
133}
134
135static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
f3c507ad
KB
136{
137 if (cq->irq_enabled) {
138 if (msix_enabled(&(n->parent_obj))) {
6f4ee2e9 139 trace_pci_nvme_irq_msix(cq->vector);
f3c507ad
KB
140 msix_notify(&(n->parent_obj), cq->vector);
141 } else {
6f4ee2e9 142 trace_pci_nvme_irq_pin();
ca247d35
KJ
143 assert(cq->vector < 32);
144 n->irq_status |= 1 << cq->vector;
5e9aa92e 145 nvme_irq_check(n);
f3c507ad 146 }
1ee24514 147 } else {
6f4ee2e9 148 trace_pci_nvme_irq_masked();
f3c507ad
KB
149 }
150}
151
5e9aa92e
HN
152static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
153{
154 if (cq->irq_enabled) {
155 if (msix_enabled(&(n->parent_obj))) {
156 return;
157 } else {
ca247d35
KJ
158 assert(cq->vector < 32);
159 n->irq_status &= ~(1 << cq->vector);
5e9aa92e
HN
160 nvme_irq_check(n);
161 }
162 }
163}
164
b2b2b67a
SB
165static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
166 uint64_t prp2, uint32_t len, NvmeCtrl *n)
f3c507ad
KB
167{
168 hwaddr trans_len = n->page_size - (prp1 % n->page_size);
169 trans_len = MIN(len, trans_len);
170 int num_prps = (len >> n->page_bits) + 1;
171
1ee24514 172 if (unlikely(!prp1)) {
6f4ee2e9 173 trace_pci_nvme_err_invalid_prp();
f3c507ad 174 return NVME_INVALID_FIELD | NVME_DNR;
e1731e81 175 } else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
b2b2b67a
SB
176 prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
177 qsg->nsg = 0;
178 qemu_iovec_init(iov, num_prps);
179 qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len);
180 } else {
181 pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
182 qemu_sglist_add(qsg, prp1, trans_len);
f3c507ad 183 }
f3c507ad
KB
184 len -= trans_len;
185 if (len) {
1ee24514 186 if (unlikely(!prp2)) {
6f4ee2e9 187 trace_pci_nvme_err_invalid_prp2_missing();
f3c507ad
KB
188 goto unmap;
189 }
190 if (len > n->page_size) {
191 uint64_t prp_list[n->max_prp_ents];
192 uint32_t nents, prp_trans;
193 int i = 0;
194
195 nents = (len + n->page_size - 1) >> n->page_bits;
196 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
b2b2b67a 197 nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
f3c507ad
KB
198 while (len != 0) {
199 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
200
201 if (i == n->max_prp_ents - 1 && len > n->page_size) {
1ee24514 202 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
6f4ee2e9 203 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
f3c507ad
KB
204 goto unmap;
205 }
206
207 i = 0;
208 nents = (len + n->page_size - 1) >> n->page_bits;
209 prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
b2b2b67a 210 nvme_addr_read(n, prp_ent, (void *)prp_list,
f3c507ad
KB
211 prp_trans);
212 prp_ent = le64_to_cpu(prp_list[i]);
213 }
214
1ee24514 215 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
6f4ee2e9 216 trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
f3c507ad
KB
217 goto unmap;
218 }
219
220 trans_len = MIN(len, n->page_size);
b2b2b67a
SB
221 if (qsg->nsg){
222 qemu_sglist_add(qsg, prp_ent, trans_len);
223 } else {
224 qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len);
225 }
f3c507ad
KB
226 len -= trans_len;
227 i++;
228 }
229 } else {
1ee24514 230 if (unlikely(prp2 & (n->page_size - 1))) {
6f4ee2e9 231 trace_pci_nvme_err_invalid_prp2_align(prp2);
f3c507ad
KB
232 goto unmap;
233 }
b2b2b67a
SB
234 if (qsg->nsg) {
235 qemu_sglist_add(qsg, prp2, len);
236 } else {
237 qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len);
238 }
f3c507ad
KB
239 }
240 }
241 return NVME_SUCCESS;
242
243 unmap:
244 qemu_sglist_destroy(qsg);
245 return NVME_INVALID_FIELD | NVME_DNR;
246}
247
3036a626
KH
248static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
249 uint64_t prp1, uint64_t prp2)
250{
251 QEMUSGList qsg;
252 QEMUIOVector iov;
253 uint16_t status = NVME_SUCCESS;
254
255 if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
256 return NVME_INVALID_FIELD | NVME_DNR;
257 }
258 if (qsg.nsg > 0) {
259 if (dma_buf_write(ptr, len, &qsg)) {
260 status = NVME_INVALID_FIELD | NVME_DNR;
261 }
262 qemu_sglist_destroy(&qsg);
263 } else {
264 if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) {
265 status = NVME_INVALID_FIELD | NVME_DNR;
266 }
267 qemu_iovec_destroy(&iov);
268 }
269 return status;
270}
271
f3c507ad
KB
272static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
273 uint64_t prp1, uint64_t prp2)
274{
275 QEMUSGList qsg;
b2b2b67a
SB
276 QEMUIOVector iov;
277 uint16_t status = NVME_SUCCESS;
f3c507ad 278
6f4ee2e9 279 trace_pci_nvme_dma_read(prp1, prp2);
1ee24514 280
b2b2b67a 281 if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
f3c507ad
KB
282 return NVME_INVALID_FIELD | NVME_DNR;
283 }
b2b2b67a 284 if (qsg.nsg > 0) {
1ee24514 285 if (unlikely(dma_buf_read(ptr, len, &qsg))) {
6f4ee2e9 286 trace_pci_nvme_err_invalid_dma();
b2b2b67a
SB
287 status = NVME_INVALID_FIELD | NVME_DNR;
288 }
f3c507ad 289 qemu_sglist_destroy(&qsg);
b2b2b67a 290 } else {
25349e82 291 if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
6f4ee2e9 292 trace_pci_nvme_err_invalid_dma();
b2b2b67a
SB
293 status = NVME_INVALID_FIELD | NVME_DNR;
294 }
295 qemu_iovec_destroy(&iov);
f3c507ad 296 }
b2b2b67a 297 return status;
f3c507ad
KB
298}
299
300static void nvme_post_cqes(void *opaque)
301{
302 NvmeCQueue *cq = opaque;
303 NvmeCtrl *n = cq->ctrl;
304 NvmeRequest *req, *next;
305
306 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
307 NvmeSQueue *sq;
308 hwaddr addr;
309
310 if (nvme_cq_full(cq)) {
311 break;
312 }
313
314 QTAILQ_REMOVE(&cq->req_list, req, entry);
315 sq = req->sq;
316 req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
317 req->cqe.sq_id = cpu_to_le16(sq->sqid);
318 req->cqe.sq_head = cpu_to_le16(sq->head);
319 addr = cq->dma_addr + cq->tail * n->cqe_size;
320 nvme_inc_cq_tail(cq);
321 pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
322 sizeof(req->cqe));
323 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
324 }
6da02181
KB
325 if (cq->tail != cq->head) {
326 nvme_irq_assert(n, cq);
327 }
f3c507ad
KB
328}
329
330static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
331{
332 assert(cq->cqid == req->sq->cqid);
333 QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
334 QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
bc72ad67 335 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad
KB
336}
337
338static void nvme_rw_cb(void *opaque, int ret)
339{
340 NvmeRequest *req = opaque;
341 NvmeSQueue *sq = req->sq;
342 NvmeCtrl *n = sq->ctrl;
343 NvmeCQueue *cq = n->cq[sq->cqid];
344
f3c507ad 345 if (!ret) {
1753f3dc 346 block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
f3c507ad
KB
347 req->status = NVME_SUCCESS;
348 } else {
1753f3dc 349 block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
f3c507ad
KB
350 req->status = NVME_INTERNAL_DEV_ERROR;
351 }
8b9d74e0
CH
352 if (req->has_sg) {
353 qemu_sglist_destroy(&req->qsg);
354 }
f3c507ad
KB
355 nvme_enqueue_req_completion(cq, req);
356}
357
8b9d74e0
CH
358static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
359 NvmeRequest *req)
360{
361 req->has_sg = false;
362 block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
363 BLOCK_ACCT_FLUSH);
364 req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
365
366 return NVME_NO_COMPLETE;
367}
368
c03e7ef1
CH
369static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
370 NvmeRequest *req)
371{
372 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
373 const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
374 const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
375 uint64_t slba = le64_to_cpu(rw->slba);
376 uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
9d6459d2
KB
377 uint64_t offset = slba << data_shift;
378 uint32_t count = nlb << data_shift;
c03e7ef1 379
1ee24514 380 if (unlikely(slba + nlb > ns->id_ns.nsze)) {
6f4ee2e9 381 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
c03e7ef1
CH
382 return NVME_LBA_RANGE | NVME_DNR;
383 }
384
385 req->has_sg = false;
386 block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
387 BLOCK_ACCT_WRITE);
9d6459d2 388 req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
c03e7ef1
CH
389 BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
390 return NVME_NO_COMPLETE;
391}
392
f3c507ad
KB
393static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
394 NvmeRequest *req)
395{
396 NvmeRwCmd *rw = (NvmeRwCmd *)cmd;
397 uint32_t nlb = le32_to_cpu(rw->nlb) + 1;
398 uint64_t slba = le64_to_cpu(rw->slba);
399 uint64_t prp1 = le64_to_cpu(rw->prp1);
400 uint64_t prp2 = le64_to_cpu(rw->prp2);
401
402 uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
403 uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
2115f2a1 404 uint64_t data_size = (uint64_t)nlb << data_shift;
cbe0ed62 405 uint64_t data_offset = slba << data_shift;
f3c507ad 406 int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
1753f3dc 407 enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
f3c507ad 408
6f4ee2e9 409 trace_pci_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
1ee24514
DG
410
411 if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
1753f3dc 412 block_acct_invalid(blk_get_stats(n->conf.blk), acct);
6f4ee2e9 413 trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
f3c507ad
KB
414 return NVME_LBA_RANGE | NVME_DNR;
415 }
1753f3dc 416
b2b2b67a 417 if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) {
1753f3dc 418 block_acct_invalid(blk_get_stats(n->conf.blk), acct);
f3c507ad
KB
419 return NVME_INVALID_FIELD | NVME_DNR;
420 }
1753f3dc 421
1753f3dc 422 dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct);
b2b2b67a
SB
423 if (req->qsg.nsg > 0) {
424 req->has_sg = true;
425 req->aiocb = is_write ?
426 dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
427 nvme_rw_cb, req) :
428 dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
429 nvme_rw_cb, req);
430 } else {
431 req->has_sg = false;
432 req->aiocb = is_write ?
433 blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
434 req) :
435 blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
436 req);
437 }
f3c507ad
KB
438
439 return NVME_NO_COMPLETE;
440}
441
442static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
443{
444 NvmeNamespace *ns;
445 uint32_t nsid = le32_to_cpu(cmd->nsid);
446
1ee24514 447 if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
6f4ee2e9 448 trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
f3c507ad
KB
449 return NVME_INVALID_NSID | NVME_DNR;
450 }
451
452 ns = &n->namespaces[nsid - 1];
453 switch (cmd->opcode) {
454 case NVME_CMD_FLUSH:
8b9d74e0 455 return nvme_flush(n, ns, cmd, req);
c03e7ef1
CH
456 case NVME_CMD_WRITE_ZEROS:
457 return nvme_write_zeros(n, ns, cmd, req);
f3c507ad
KB
458 case NVME_CMD_WRITE:
459 case NVME_CMD_READ:
460 return nvme_rw(n, ns, cmd, req);
461 default:
6f4ee2e9 462 trace_pci_nvme_err_invalid_opc(cmd->opcode);
f3c507ad
KB
463 return NVME_INVALID_OPCODE | NVME_DNR;
464 }
465}
466
467static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
468{
469 n->sq[sq->sqid] = NULL;
bc72ad67
AB
470 timer_del(sq->timer);
471 timer_free(sq->timer);
f3c507ad
KB
472 g_free(sq->io_req);
473 if (sq->sqid) {
474 g_free(sq);
475 }
476}
477
478static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
479{
480 NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
481 NvmeRequest *req, *next;
482 NvmeSQueue *sq;
483 NvmeCQueue *cq;
484 uint16_t qid = le16_to_cpu(c->qid);
485
1ee24514 486 if (unlikely(!qid || nvme_check_sqid(n, qid))) {
6f4ee2e9 487 trace_pci_nvme_err_invalid_del_sq(qid);
f3c507ad
KB
488 return NVME_INVALID_QID | NVME_DNR;
489 }
490
6f4ee2e9 491 trace_pci_nvme_del_sq(qid);
1ee24514 492
f3c507ad
KB
493 sq = n->sq[qid];
494 while (!QTAILQ_EMPTY(&sq->out_req_list)) {
495 req = QTAILQ_FIRST(&sq->out_req_list);
496 assert(req->aiocb);
4be74634 497 blk_aio_cancel(req->aiocb);
f3c507ad
KB
498 }
499 if (!nvme_check_cqid(n, sq->cqid)) {
500 cq = n->cq[sq->cqid];
501 QTAILQ_REMOVE(&cq->sq_list, sq, entry);
502
503 nvme_post_cqes(cq);
504 QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
505 if (req->sq == sq) {
506 QTAILQ_REMOVE(&cq->req_list, req, entry);
507 QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
508 }
509 }
510 }
511
512 nvme_free_sq(sq, n);
513 return NVME_SUCCESS;
514}
515
516static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
517 uint16_t sqid, uint16_t cqid, uint16_t size)
518{
519 int i;
520 NvmeCQueue *cq;
521
522 sq->ctrl = n;
523 sq->dma_addr = dma_addr;
524 sq->sqid = sqid;
525 sq->size = size;
526 sq->cqid = cqid;
527 sq->head = sq->tail = 0;
02c4f26b 528 sq->io_req = g_new(NvmeRequest, sq->size);
f3c507ad
KB
529
530 QTAILQ_INIT(&sq->req_list);
531 QTAILQ_INIT(&sq->out_req_list);
532 for (i = 0; i < sq->size; i++) {
533 sq->io_req[i].sq = sq;
534 QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
535 }
bc72ad67 536 sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
f3c507ad
KB
537
538 assert(n->cq[cqid]);
539 cq = n->cq[cqid];
540 QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
541 n->sq[sqid] = sq;
542}
543
544static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
545{
546 NvmeSQueue *sq;
547 NvmeCreateSq *c = (NvmeCreateSq *)cmd;
548
549 uint16_t cqid = le16_to_cpu(c->cqid);
550 uint16_t sqid = le16_to_cpu(c->sqid);
551 uint16_t qsize = le16_to_cpu(c->qsize);
552 uint16_t qflags = le16_to_cpu(c->sq_flags);
553 uint64_t prp1 = le64_to_cpu(c->prp1);
554
6f4ee2e9 555 trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
1ee24514
DG
556
557 if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
6f4ee2e9 558 trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
f3c507ad
KB
559 return NVME_INVALID_CQID | NVME_DNR;
560 }
1ee24514 561 if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
6f4ee2e9 562 trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
f3c507ad
KB
563 return NVME_INVALID_QID | NVME_DNR;
564 }
1ee24514 565 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
6f4ee2e9 566 trace_pci_nvme_err_invalid_create_sq_size(qsize);
f3c507ad
KB
567 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
568 }
1ee24514 569 if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
6f4ee2e9 570 trace_pci_nvme_err_invalid_create_sq_addr(prp1);
f3c507ad
KB
571 return NVME_INVALID_FIELD | NVME_DNR;
572 }
1ee24514 573 if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
6f4ee2e9 574 trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
f3c507ad
KB
575 return NVME_INVALID_FIELD | NVME_DNR;
576 }
577 sq = g_malloc0(sizeof(*sq));
578 nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
579 return NVME_SUCCESS;
580}
581
582static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
583{
584 n->cq[cq->cqid] = NULL;
bc72ad67
AB
585 timer_del(cq->timer);
586 timer_free(cq->timer);
f3c507ad
KB
587 msix_vector_unuse(&n->parent_obj, cq->vector);
588 if (cq->cqid) {
589 g_free(cq);
590 }
591}
592
593static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
594{
595 NvmeDeleteQ *c = (NvmeDeleteQ *)cmd;
596 NvmeCQueue *cq;
597 uint16_t qid = le16_to_cpu(c->qid);
598
1ee24514 599 if (unlikely(!qid || nvme_check_cqid(n, qid))) {
6f4ee2e9 600 trace_pci_nvme_err_invalid_del_cq_cqid(qid);
f3c507ad
KB
601 return NVME_INVALID_CQID | NVME_DNR;
602 }
603
604 cq = n->cq[qid];
1ee24514 605 if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
6f4ee2e9 606 trace_pci_nvme_err_invalid_del_cq_notempty(qid);
f3c507ad
KB
607 return NVME_INVALID_QUEUE_DEL;
608 }
ad3a7e45 609 nvme_irq_deassert(n, cq);
6f4ee2e9 610 trace_pci_nvme_del_cq(qid);
f3c507ad
KB
611 nvme_free_cq(cq, n);
612 return NVME_SUCCESS;
613}
614
615static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
616 uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled)
617{
fbf2e537
PMD
618 int ret;
619
620 ret = msix_vector_use(&n->parent_obj, vector);
621 assert(ret == 0);
f3c507ad
KB
622 cq->ctrl = n;
623 cq->cqid = cqid;
624 cq->size = size;
625 cq->dma_addr = dma_addr;
626 cq->phase = 1;
627 cq->irq_enabled = irq_enabled;
628 cq->vector = vector;
629 cq->head = cq->tail = 0;
630 QTAILQ_INIT(&cq->req_list);
631 QTAILQ_INIT(&cq->sq_list);
f3c507ad 632 n->cq[cqid] = cq;
bc72ad67 633 cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
f3c507ad
KB
634}
635
636static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
637{
638 NvmeCQueue *cq;
639 NvmeCreateCq *c = (NvmeCreateCq *)cmd;
640 uint16_t cqid = le16_to_cpu(c->cqid);
641 uint16_t vector = le16_to_cpu(c->irq_vector);
642 uint16_t qsize = le16_to_cpu(c->qsize);
643 uint16_t qflags = le16_to_cpu(c->cq_flags);
644 uint64_t prp1 = le64_to_cpu(c->prp1);
645
6f4ee2e9
KJ
646 trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
647 NVME_CQ_FLAGS_IEN(qflags) != 0);
1ee24514
DG
648
649 if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
6f4ee2e9 650 trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
f3c507ad
KB
651 return NVME_INVALID_CQID | NVME_DNR;
652 }
1ee24514 653 if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
6f4ee2e9 654 trace_pci_nvme_err_invalid_create_cq_size(qsize);
f3c507ad
KB
655 return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
656 }
1ee24514 657 if (unlikely(!prp1)) {
6f4ee2e9 658 trace_pci_nvme_err_invalid_create_cq_addr(prp1);
f3c507ad
KB
659 return NVME_INVALID_FIELD | NVME_DNR;
660 }
ca247d35
KJ
661 if (unlikely(!msix_enabled(&n->parent_obj) && vector)) {
662 trace_pci_nvme_err_invalid_create_cq_vector(vector);
663 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
664 }
6a25a4b4 665 if (unlikely(vector >= n->params.msix_qsize)) {
6f4ee2e9 666 trace_pci_nvme_err_invalid_create_cq_vector(vector);
f3c507ad
KB
667 return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
668 }
1ee24514 669 if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
6f4ee2e9 670 trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
f3c507ad
KB
671 return NVME_INVALID_FIELD | NVME_DNR;
672 }
673
674 cq = g_malloc0(sizeof(*cq));
675 nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
676 NVME_CQ_FLAGS_IEN(qflags));
677 return NVME_SUCCESS;
678}
679
03035a23
CH
680static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
681{
682 uint64_t prp1 = le64_to_cpu(c->prp1);
683 uint64_t prp2 = le64_to_cpu(c->prp2);
684
6f4ee2e9 685 trace_pci_nvme_identify_ctrl();
1ee24514 686
03035a23
CH
687 return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
688 prp1, prp2);
689}
690
691static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
f3c507ad
KB
692{
693 NvmeNamespace *ns;
f3c507ad
KB
694 uint32_t nsid = le32_to_cpu(c->nsid);
695 uint64_t prp1 = le64_to_cpu(c->prp1);
696 uint64_t prp2 = le64_to_cpu(c->prp2);
697
6f4ee2e9 698 trace_pci_nvme_identify_ns(nsid);
1ee24514
DG
699
700 if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
6f4ee2e9 701 trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
f3c507ad
KB
702 return NVME_INVALID_NSID | NVME_DNR;
703 }
704
705 ns = &n->namespaces[nsid - 1];
1ee24514 706
f3c507ad
KB
707 return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns),
708 prp1, prp2);
709}
710
03035a23
CH
711static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
712{
3e829fd4 713 static const int data_len = NVME_IDENTIFY_DATA_SIZE;
03035a23
CH
714 uint32_t min_nsid = le32_to_cpu(c->nsid);
715 uint64_t prp1 = le64_to_cpu(c->prp1);
716 uint64_t prp2 = le64_to_cpu(c->prp2);
717 uint32_t *list;
718 uint16_t ret;
719 int i, j = 0;
720
6f4ee2e9 721 trace_pci_nvme_identify_nslist(min_nsid);
1ee24514 722
03035a23
CH
723 list = g_malloc0(data_len);
724 for (i = 0; i < n->num_namespaces; i++) {
725 if (i < min_nsid) {
726 continue;
727 }
728 list[j++] = cpu_to_le32(i + 1);
729 if (j == data_len / sizeof(uint32_t)) {
730 break;
731 }
732 }
733 ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2);
734 g_free(list);
735 return ret;
736}
737
03035a23
CH
738static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
739{
740 NvmeIdentify *c = (NvmeIdentify *)cmd;
741
742 switch (le32_to_cpu(c->cns)) {
3e829fd4 743 case NVME_ID_CNS_NS:
03035a23 744 return nvme_identify_ns(n, c);
3e829fd4 745 case NVME_ID_CNS_CTRL:
03035a23 746 return nvme_identify_ctrl(n, c);
3e829fd4 747 case NVME_ID_CNS_NS_ACTIVE_LIST:
03035a23
CH
748 return nvme_identify_nslist(n, c);
749 default:
6f4ee2e9 750 trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
03035a23
CH
751 return NVME_INVALID_FIELD | NVME_DNR;
752 }
753}
754
3036a626
KH
755static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
756{
6f4ee2e9 757 trace_pci_nvme_setfeat_timestamp(ts);
3036a626
KH
758
759 n->host_timestamp = le64_to_cpu(ts);
760 n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
761}
762
763static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
764{
765 uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
766 uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
767
768 union nvme_timestamp {
769 struct {
770 uint64_t timestamp:48;
771 uint64_t sync:1;
772 uint64_t origin:3;
773 uint64_t rsvd1:12;
774 };
775 uint64_t all;
776 };
777
778 union nvme_timestamp ts;
779 ts.all = 0;
780
781 /*
782 * If the sum of the Timestamp value set by the host and the elapsed
783 * time exceeds 2^48, the value returned should be reduced modulo 2^48.
784 */
785 ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff;
786
787 /* If the host timestamp is non-zero, set the timestamp origin */
788 ts.origin = n->host_timestamp ? 0x01 : 0x00;
789
6f4ee2e9 790 trace_pci_nvme_getfeat_timestamp(ts.all);
3036a626
KH
791
792 return cpu_to_le64(ts.all);
793}
794
795static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
796{
797 uint64_t prp1 = le64_to_cpu(cmd->prp1);
798 uint64_t prp2 = le64_to_cpu(cmd->prp2);
799
800 uint64_t timestamp = nvme_get_timestamp(n);
801
802 return nvme_dma_read_prp(n, (uint8_t *)&timestamp,
803 sizeof(timestamp), prp1, prp2);
804}
805
f3c507ad
KB
806static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
807{
808 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
30349fd0 809 uint32_t result;
f3c507ad
KB
810
811 switch (dw10) {
aacd5650 812 case NVME_VOLATILE_WRITE_CACHE:
30349fd0 813 result = blk_enable_write_cache(n->conf.blk);
6f4ee2e9 814 trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
30349fd0
CH
815 break;
816 case NVME_NUMBER_OF_QUEUES:
dce22c86
KJ
817 result = cpu_to_le32((n->params.max_ioqpairs - 1) |
818 ((n->params.max_ioqpairs - 1) << 16));
6f4ee2e9 819 trace_pci_nvme_getfeat_numq(result);
aacd5650 820 break;
3036a626
KH
821 case NVME_TIMESTAMP:
822 return nvme_get_feature_timestamp(n, cmd);
f3c507ad 823 default:
6f4ee2e9 824 trace_pci_nvme_err_invalid_getfeat(dw10);
f3c507ad
KB
825 return NVME_INVALID_FIELD | NVME_DNR;
826 }
30349fd0
CH
827
828 req->cqe.result = result;
f3c507ad
KB
829 return NVME_SUCCESS;
830}
831
3036a626
KH
832static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd)
833{
834 uint16_t ret;
835 uint64_t timestamp;
836 uint64_t prp1 = le64_to_cpu(cmd->prp1);
837 uint64_t prp2 = le64_to_cpu(cmd->prp2);
838
839 ret = nvme_dma_write_prp(n, (uint8_t *)&timestamp,
840 sizeof(timestamp), prp1, prp2);
841 if (ret != NVME_SUCCESS) {
842 return ret;
843 }
844
845 nvme_set_timestamp(n, timestamp);
846
847 return NVME_SUCCESS;
848}
849
f3c507ad
KB
850static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
851{
852 uint32_t dw10 = le32_to_cpu(cmd->cdw10);
30349fd0 853 uint32_t dw11 = le32_to_cpu(cmd->cdw11);
f3c507ad
KB
854
855 switch (dw10) {
30349fd0
CH
856 case NVME_VOLATILE_WRITE_CACHE:
857 blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
858 break;
f3c507ad 859 case NVME_NUMBER_OF_QUEUES:
6f4ee2e9
KJ
860 trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
861 ((dw11 >> 16) & 0xFFFF) + 1,
dce22c86
KJ
862 n->params.max_ioqpairs,
863 n->params.max_ioqpairs);
864 req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
865 ((n->params.max_ioqpairs - 1) << 16));
f3c507ad 866 break;
3036a626
KH
867 case NVME_TIMESTAMP:
868 return nvme_set_feature_timestamp(n, cmd);
f3c507ad 869 default:
6f4ee2e9 870 trace_pci_nvme_err_invalid_setfeat(dw10);
f3c507ad
KB
871 return NVME_INVALID_FIELD | NVME_DNR;
872 }
873 return NVME_SUCCESS;
874}
875
876static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
877{
878 switch (cmd->opcode) {
879 case NVME_ADM_CMD_DELETE_SQ:
880 return nvme_del_sq(n, cmd);
881 case NVME_ADM_CMD_CREATE_SQ:
882 return nvme_create_sq(n, cmd);
883 case NVME_ADM_CMD_DELETE_CQ:
884 return nvme_del_cq(n, cmd);
885 case NVME_ADM_CMD_CREATE_CQ:
886 return nvme_create_cq(n, cmd);
887 case NVME_ADM_CMD_IDENTIFY:
888 return nvme_identify(n, cmd);
889 case NVME_ADM_CMD_SET_FEATURES:
890 return nvme_set_feature(n, cmd, req);
891 case NVME_ADM_CMD_GET_FEATURES:
892 return nvme_get_feature(n, cmd, req);
893 default:
6f4ee2e9 894 trace_pci_nvme_err_invalid_admin_opc(cmd->opcode);
f3c507ad
KB
895 return NVME_INVALID_OPCODE | NVME_DNR;
896 }
897}
898
899static void nvme_process_sq(void *opaque)
900{
901 NvmeSQueue *sq = opaque;
902 NvmeCtrl *n = sq->ctrl;
903 NvmeCQueue *cq = n->cq[sq->cqid];
904
905 uint16_t status;
906 hwaddr addr;
907 NvmeCmd cmd;
908 NvmeRequest *req;
909
910 while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
911 addr = sq->dma_addr + sq->head * n->sqe_size;
a896f7f2 912 nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
f3c507ad
KB
913 nvme_inc_sq_head(sq);
914
915 req = QTAILQ_FIRST(&sq->req_list);
916 QTAILQ_REMOVE(&sq->req_list, req, entry);
917 QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
918 memset(&req->cqe, 0, sizeof(req->cqe));
919 req->cqe.cid = cmd.cid;
920
921 status = sq->sqid ? nvme_io_cmd(n, &cmd, req) :
922 nvme_admin_cmd(n, &cmd, req);
923 if (status != NVME_NO_COMPLETE) {
924 req->status = status;
925 nvme_enqueue_req_completion(cq, req);
926 }
927 }
928}
929
930static void nvme_clear_ctrl(NvmeCtrl *n)
931{
932 int i;
933
6bf74636
ID
934 blk_drain(n->conf.blk);
935
dce22c86 936 for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
f3c507ad
KB
937 if (n->sq[i] != NULL) {
938 nvme_free_sq(n->sq[i], n);
939 }
940 }
dce22c86 941 for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
f3c507ad
KB
942 if (n->cq[i] != NULL) {
943 nvme_free_cq(n->cq[i], n);
944 }
945 }
946
4be74634 947 blk_flush(n->conf.blk);
f3c507ad
KB
948 n->bar.cc = 0;
949}
950
951static int nvme_start_ctrl(NvmeCtrl *n)
952{
953 uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
954 uint32_t page_size = 1 << page_bits;
955
1ee24514 956 if (unlikely(n->cq[0])) {
6f4ee2e9 957 trace_pci_nvme_err_startfail_cq();
1ee24514
DG
958 return -1;
959 }
960 if (unlikely(n->sq[0])) {
6f4ee2e9 961 trace_pci_nvme_err_startfail_sq();
1ee24514
DG
962 return -1;
963 }
964 if (unlikely(!n->bar.asq)) {
6f4ee2e9 965 trace_pci_nvme_err_startfail_nbarasq();
1ee24514
DG
966 return -1;
967 }
968 if (unlikely(!n->bar.acq)) {
6f4ee2e9 969 trace_pci_nvme_err_startfail_nbaracq();
1ee24514
DG
970 return -1;
971 }
972 if (unlikely(n->bar.asq & (page_size - 1))) {
6f4ee2e9 973 trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq);
1ee24514
DG
974 return -1;
975 }
976 if (unlikely(n->bar.acq & (page_size - 1))) {
6f4ee2e9 977 trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq);
1ee24514
DG
978 return -1;
979 }
980 if (unlikely(NVME_CC_MPS(n->bar.cc) <
981 NVME_CAP_MPSMIN(n->bar.cap))) {
6f4ee2e9 982 trace_pci_nvme_err_startfail_page_too_small(
1ee24514
DG
983 NVME_CC_MPS(n->bar.cc),
984 NVME_CAP_MPSMIN(n->bar.cap));
985 return -1;
986 }
987 if (unlikely(NVME_CC_MPS(n->bar.cc) >
988 NVME_CAP_MPSMAX(n->bar.cap))) {
6f4ee2e9 989 trace_pci_nvme_err_startfail_page_too_large(
1ee24514
DG
990 NVME_CC_MPS(n->bar.cc),
991 NVME_CAP_MPSMAX(n->bar.cap));
992 return -1;
993 }
994 if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
995 NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
6f4ee2e9 996 trace_pci_nvme_err_startfail_cqent_too_small(
1ee24514
DG
997 NVME_CC_IOCQES(n->bar.cc),
998 NVME_CTRL_CQES_MIN(n->bar.cap));
999 return -1;
1000 }
1001 if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
1002 NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
6f4ee2e9 1003 trace_pci_nvme_err_startfail_cqent_too_large(
1ee24514
DG
1004 NVME_CC_IOCQES(n->bar.cc),
1005 NVME_CTRL_CQES_MAX(n->bar.cap));
1006 return -1;
1007 }
1008 if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
1009 NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
6f4ee2e9 1010 trace_pci_nvme_err_startfail_sqent_too_small(
1ee24514
DG
1011 NVME_CC_IOSQES(n->bar.cc),
1012 NVME_CTRL_SQES_MIN(n->bar.cap));
1013 return -1;
1014 }
1015 if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
1016 NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
6f4ee2e9 1017 trace_pci_nvme_err_startfail_sqent_too_large(
1ee24514
DG
1018 NVME_CC_IOSQES(n->bar.cc),
1019 NVME_CTRL_SQES_MAX(n->bar.cap));
1020 return -1;
1021 }
1022 if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
6f4ee2e9 1023 trace_pci_nvme_err_startfail_asqent_sz_zero();
1ee24514
DG
1024 return -1;
1025 }
1026 if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
6f4ee2e9 1027 trace_pci_nvme_err_startfail_acqent_sz_zero();
f3c507ad
KB
1028 return -1;
1029 }
1030
1031 n->page_bits = page_bits;
1032 n->page_size = page_size;
1033 n->max_prp_ents = n->page_size / sizeof(uint64_t);
1034 n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
1035 n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
1036 nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
1037 NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
1038 nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
1039 NVME_AQA_ASQS(n->bar.aqa) + 1);
1040
3036a626
KH
1041 nvme_set_timestamp(n, 0ULL);
1042
f3c507ad
KB
1043 return 0;
1044}
1045
1046static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
1047 unsigned size)
1048{
1ee24514 1049 if (unlikely(offset & (sizeof(uint32_t) - 1))) {
6f4ee2e9 1050 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
1ee24514
DG
1051 "MMIO write not 32-bit aligned,"
1052 " offset=0x%"PRIx64"", offset);
1053 /* should be ignored, fall through for now */
1054 }
1055
1056 if (unlikely(size < sizeof(uint32_t))) {
6f4ee2e9 1057 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall,
1ee24514
DG
1058 "MMIO write smaller than 32-bits,"
1059 " offset=0x%"PRIx64", size=%u",
1060 offset, size);
1061 /* should be ignored, fall through for now */
1062 }
1063
f3c507ad 1064 switch (offset) {
1ee24514
DG
1065 case 0xc: /* INTMS */
1066 if (unlikely(msix_enabled(&(n->parent_obj)))) {
6f4ee2e9 1067 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
1ee24514
DG
1068 "undefined access to interrupt mask set"
1069 " when MSI-X is enabled");
1070 /* should be ignored, fall through for now */
1071 }
f3c507ad
KB
1072 n->bar.intms |= data & 0xffffffff;
1073 n->bar.intmc = n->bar.intms;
6f4ee2e9 1074 trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc);
5e9aa92e 1075 nvme_irq_check(n);
f3c507ad 1076 break;
1ee24514
DG
1077 case 0x10: /* INTMC */
1078 if (unlikely(msix_enabled(&(n->parent_obj)))) {
6f4ee2e9 1079 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
1ee24514
DG
1080 "undefined access to interrupt mask clr"
1081 " when MSI-X is enabled");
1082 /* should be ignored, fall through for now */
1083 }
f3c507ad
KB
1084 n->bar.intms &= ~(data & 0xffffffff);
1085 n->bar.intmc = n->bar.intms;
6f4ee2e9 1086 trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc);
5e9aa92e 1087 nvme_irq_check(n);
f3c507ad 1088 break;
1ee24514 1089 case 0x14: /* CC */
6f4ee2e9 1090 trace_pci_nvme_mmio_cfg(data & 0xffffffff);
4a4d614f
DS
1091 /* Windows first sends data, then sends enable bit */
1092 if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
1093 !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
1094 {
1095 n->bar.cc = data;
1096 }
1097
f3c507ad
KB
1098 if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
1099 n->bar.cc = data;
1ee24514 1100 if (unlikely(nvme_start_ctrl(n))) {
6f4ee2e9 1101 trace_pci_nvme_err_startfail();
f3c507ad
KB
1102 n->bar.csts = NVME_CSTS_FAILED;
1103 } else {
6f4ee2e9 1104 trace_pci_nvme_mmio_start_success();
f3c507ad
KB
1105 n->bar.csts = NVME_CSTS_READY;
1106 }
1107 } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
6f4ee2e9 1108 trace_pci_nvme_mmio_stopped();
f3c507ad
KB
1109 nvme_clear_ctrl(n);
1110 n->bar.csts &= ~NVME_CSTS_READY;
1111 }
1112 if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
6f4ee2e9 1113 trace_pci_nvme_mmio_shutdown_set();
1ee24514
DG
1114 nvme_clear_ctrl(n);
1115 n->bar.cc = data;
1116 n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
f3c507ad 1117 } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
6f4ee2e9 1118 trace_pci_nvme_mmio_shutdown_cleared();
1ee24514
DG
1119 n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
1120 n->bar.cc = data;
1121 }
1122 break;
1123 case 0x1C: /* CSTS */
1124 if (data & (1 << 4)) {
6f4ee2e9 1125 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
1ee24514
DG
1126 "attempted to W1C CSTS.NSSRO"
1127 " but CAP.NSSRS is zero (not supported)");
1128 } else if (data != 0) {
6f4ee2e9 1129 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts,
1ee24514
DG
1130 "attempted to set a read only bit"
1131 " of controller status");
1132 }
1133 break;
1134 case 0x20: /* NSSR */
1135 if (data == 0x4E564D65) {
6f4ee2e9 1136 trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
1ee24514
DG
1137 } else {
1138 /* The spec says that writes of other values have no effect */
1139 return;
f3c507ad
KB
1140 }
1141 break;
1ee24514 1142 case 0x24: /* AQA */
f3c507ad 1143 n->bar.aqa = data & 0xffffffff;
6f4ee2e9 1144 trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
f3c507ad 1145 break;
1ee24514 1146 case 0x28: /* ASQ */
f3c507ad 1147 n->bar.asq = data;
6f4ee2e9 1148 trace_pci_nvme_mmio_asqaddr(data);
f3c507ad 1149 break;
1ee24514 1150 case 0x2c: /* ASQ hi */
f3c507ad 1151 n->bar.asq |= data << 32;
6f4ee2e9 1152 trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq);
f3c507ad 1153 break;
1ee24514 1154 case 0x30: /* ACQ */
6f4ee2e9 1155 trace_pci_nvme_mmio_acqaddr(data);
f3c507ad
KB
1156 n->bar.acq = data;
1157 break;
1ee24514 1158 case 0x34: /* ACQ hi */
f3c507ad 1159 n->bar.acq |= data << 32;
6f4ee2e9 1160 trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq);
f3c507ad 1161 break;
1ee24514 1162 case 0x38: /* CMBLOC */
6f4ee2e9 1163 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
1ee24514
DG
1164 "invalid write to reserved CMBLOC"
1165 " when CMBSZ is zero, ignored");
1166 return;
1167 case 0x3C: /* CMBSZ */
6f4ee2e9 1168 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
1ee24514
DG
1169 "invalid write to read only CMBSZ, ignored");
1170 return;
6cf94132 1171 case 0xE00: /* PMRCAP */
6f4ee2e9 1172 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
6cf94132
AJ
1173 "invalid write to PMRCAP register, ignored");
1174 return;
1175 case 0xE04: /* TODO PMRCTL */
1176 break;
1177 case 0xE08: /* PMRSTS */
6f4ee2e9 1178 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
6cf94132
AJ
1179 "invalid write to PMRSTS register, ignored");
1180 return;
1181 case 0xE0C: /* PMREBS */
6f4ee2e9 1182 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
6cf94132
AJ
1183 "invalid write to PMREBS register, ignored");
1184 return;
1185 case 0xE10: /* PMRSWTP */
6f4ee2e9 1186 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
6cf94132
AJ
1187 "invalid write to PMRSWTP register, ignored");
1188 return;
1189 case 0xE14: /* TODO PMRMSC */
1190 break;
f3c507ad 1191 default:
6f4ee2e9 1192 NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
1ee24514
DG
1193 "invalid MMIO write,"
1194 " offset=0x%"PRIx64", data=%"PRIx64"",
1195 offset, data);
f3c507ad
KB
1196 break;
1197 }
1198}
1199
1200static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
1201{
1202 NvmeCtrl *n = (NvmeCtrl *)opaque;
1203 uint8_t *ptr = (uint8_t *)&n->bar;
1204 uint64_t val = 0;
1205
1ee24514 1206 if (unlikely(addr & (sizeof(uint32_t) - 1))) {
6f4ee2e9 1207 NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
1ee24514
DG
1208 "MMIO read not 32-bit aligned,"
1209 " offset=0x%"PRIx64"", addr);
1210 /* should RAZ, fall through for now */
1211 } else if (unlikely(size < sizeof(uint32_t))) {
6f4ee2e9 1212 NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall,
1ee24514
DG
1213 "MMIO read smaller than 32-bits,"
1214 " offset=0x%"PRIx64"", addr);
1215 /* should RAZ, fall through for now */
1216 }
1217
f3c507ad 1218 if (addr < sizeof(n->bar)) {
6cf94132
AJ
1219 /*
1220 * When PMRWBM bit 1 is set then read from
1221 * from PMRSTS should ensure prior writes
1222 * made it to persistent media
1223 */
1224 if (addr == 0xE08 &&
1225 (NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) {
bc2a2364 1226 memory_region_msync(&n->pmrdev->mr, 0, n->pmrdev->size);
6cf94132 1227 }
f3c507ad 1228 memcpy(&val, ptr + addr, size);
1ee24514 1229 } else {
6f4ee2e9 1230 NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
1ee24514
DG
1231 "MMIO read beyond last register,"
1232 " offset=0x%"PRIx64", returning 0", addr);
f3c507ad 1233 }
1ee24514 1234
f3c507ad
KB
1235 return val;
1236}
1237
1238static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
1239{
1240 uint32_t qid;
1241
1ee24514 1242 if (unlikely(addr & ((1 << 2) - 1))) {
6f4ee2e9 1243 NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
1ee24514
DG
1244 "doorbell write not 32-bit aligned,"
1245 " offset=0x%"PRIx64", ignoring", addr);
f3c507ad
KB
1246 return;
1247 }
1248
1249 if (((addr - 0x1000) >> 2) & 1) {
1ee24514
DG
1250 /* Completion queue doorbell write */
1251
f3c507ad
KB
1252 uint16_t new_head = val & 0xffff;
1253 int start_sqs;
1254 NvmeCQueue *cq;
1255
1256 qid = (addr - (0x1000 + (1 << 2))) >> 3;
1ee24514 1257 if (unlikely(nvme_check_cqid(n, qid))) {
6f4ee2e9 1258 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
1ee24514
DG
1259 "completion queue doorbell write"
1260 " for nonexistent queue,"
1261 " sqid=%"PRIu32", ignoring", qid);
f3c507ad
KB
1262 return;
1263 }
1264
1265 cq = n->cq[qid];
1ee24514 1266 if (unlikely(new_head >= cq->size)) {
6f4ee2e9 1267 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead,
1ee24514
DG
1268 "completion queue doorbell write value"
1269 " beyond queue size, sqid=%"PRIu32","
1270 " new_head=%"PRIu16", ignoring",
1271 qid, new_head);
f3c507ad
KB
1272 return;
1273 }
1274
1275 start_sqs = nvme_cq_full(cq) ? 1 : 0;
1276 cq->head = new_head;
1277 if (start_sqs) {
1278 NvmeSQueue *sq;
1279 QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
bc72ad67 1280 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad 1281 }
bc72ad67 1282 timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad
KB
1283 }
1284
5e9aa92e
HN
1285 if (cq->tail == cq->head) {
1286 nvme_irq_deassert(n, cq);
f3c507ad
KB
1287 }
1288 } else {
1ee24514
DG
1289 /* Submission queue doorbell write */
1290
f3c507ad
KB
1291 uint16_t new_tail = val & 0xffff;
1292 NvmeSQueue *sq;
1293
1294 qid = (addr - 0x1000) >> 3;
1ee24514 1295 if (unlikely(nvme_check_sqid(n, qid))) {
6f4ee2e9 1296 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
1ee24514
DG
1297 "submission queue doorbell write"
1298 " for nonexistent queue,"
1299 " sqid=%"PRIu32", ignoring", qid);
f3c507ad
KB
1300 return;
1301 }
1302
1303 sq = n->sq[qid];
1ee24514 1304 if (unlikely(new_tail >= sq->size)) {
6f4ee2e9 1305 NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail,
1ee24514
DG
1306 "submission queue doorbell write value"
1307 " beyond queue size, sqid=%"PRIu32","
1308 " new_tail=%"PRIu16", ignoring",
1309 qid, new_tail);
f3c507ad
KB
1310 return;
1311 }
1312
1313 sq->tail = new_tail;
bc72ad67 1314 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
f3c507ad
KB
1315 }
1316}
1317
1318static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
1319 unsigned size)
1320{
1321 NvmeCtrl *n = (NvmeCtrl *)opaque;
1322 if (addr < sizeof(n->bar)) {
1323 nvme_write_bar(n, addr, data, size);
74e18435 1324 } else {
f3c507ad
KB
1325 nvme_process_db(n, addr, data);
1326 }
1327}
1328
1329static const MemoryRegionOps nvme_mmio_ops = {
1330 .read = nvme_mmio_read,
1331 .write = nvme_mmio_write,
1332 .endianness = DEVICE_LITTLE_ENDIAN,
1333 .impl = {
1334 .min_access_size = 2,
1335 .max_access_size = 8,
1336 },
1337};
1338
a896f7f2
SB
1339static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
1340 unsigned size)
1341{
1342 NvmeCtrl *n = (NvmeCtrl *)opaque;
71a86dde 1343 stn_le_p(&n->cmbuf[addr], size, data);
a896f7f2
SB
1344}
1345
1346static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
1347{
a896f7f2 1348 NvmeCtrl *n = (NvmeCtrl *)opaque;
71a86dde 1349 return ldn_le_p(&n->cmbuf[addr], size);
a896f7f2
SB
1350}
1351
1352static const MemoryRegionOps nvme_cmb_ops = {
1353 .read = nvme_cmb_read,
1354 .write = nvme_cmb_write,
1355 .endianness = DEVICE_LITTLE_ENDIAN,
1356 .impl = {
87ad860c 1357 .min_access_size = 1,
a896f7f2
SB
1358 .max_access_size = 8,
1359 },
1360};
1361
54000c66 1362static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
f3c507ad 1363{
54000c66 1364 NvmeParams *params = &n->params;
f3c507ad 1365
54000c66 1366 if (params->num_queues) {
dce22c86
KJ
1367 warn_report("num_queues is deprecated; please use max_ioqpairs "
1368 "instead");
1369
54000c66 1370 params->max_ioqpairs = params->num_queues - 1;
dce22c86
KJ
1371 }
1372
54000c66 1373 if (params->max_ioqpairs < 1 ||
6a25a4b4 1374 params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
dce22c86 1375 error_setg(errp, "max_ioqpairs must be between 1 and %d",
6a25a4b4
KJ
1376 NVME_MAX_IOQPAIRS);
1377 return;
1378 }
1379
1380 if (params->msix_qsize < 1 ||
1381 params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
1382 error_setg(errp, "msix_qsize must be between 1 and %d",
1383 PCI_MSIX_FLAGS_QSIZE + 1);
2410e133
LQ
1384 return;
1385 }
1386
4be74634 1387 if (!n->conf.blk) {
e01d6a41
MZ
1388 error_setg(errp, "drive property not set");
1389 return;
f3c507ad
KB
1390 }
1391
54000c66 1392 if (!params->serial) {
e01d6a41
MZ
1393 error_setg(errp, "serial property not set");
1394 return;
f3c507ad 1395 }
6cf94132 1396
1065abfb 1397 if (!n->params.cmb_size_mb && n->pmrdev) {
6cf94132 1398 if (host_memory_backend_is_mapped(n->pmrdev)) {
7a309cc9
MA
1399 error_setg(errp, "can't use already busy memdev: %s",
1400 object_get_canonical_path_component(OBJECT(n->pmrdev)));
6cf94132
AJ
1401 return;
1402 }
1403
1404 if (!is_power_of_2(n->pmrdev->size)) {
1405 error_setg(errp, "pmr backend size needs to be power of 2 in size");
1406 return;
1407 }
1408
1409 host_memory_backend_set_mapped(n->pmrdev, true);
1410 }
54000c66
KJ
1411}
1412
a17f5018
KJ
1413static void nvme_init_state(NvmeCtrl *n)
1414{
1415 n->num_namespaces = 1;
1416 /* add one to max_ioqpairs to account for the admin queue pair */
74e18435 1417 n->reg_size = pow2ceil(sizeof(NvmeBar) +
a17f5018
KJ
1418 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
1419 n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
1420 n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
1421 n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
1422}
1423
90f45115
KJ
1424static void nvme_init_blk(NvmeCtrl *n, Error **errp)
1425{
c56ee92f
RK
1426 if (!blkconf_blocksizes(&n->conf, errp)) {
1427 return;
1428 }
90f45115
KJ
1429 blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
1430 false, errp);
1431}
1432
d634d742
KJ
1433static void nvme_init_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
1434{
1435 int64_t bs_size;
1436 NvmeIdNs *id_ns = &ns->id_ns;
1437
1438 bs_size = blk_getlength(n->conf.blk);
1439 if (bs_size < 0) {
1440 error_setg_errno(errp, -bs_size, "could not get backing file size");
1441 return;
1442 }
1443
1444 n->ns_size = bs_size;
1445
1446 id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
1447 id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(n, ns));
1448
1449 /* no thin provisioning */
1450 id_ns->ncap = id_ns->nsze;
1451 id_ns->nuse = id_ns->ncap;
1452}
1453
51ec094d
KJ
1454static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
1455{
1456 NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
1457 NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
1458
1459 NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
1460 NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
1461 NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
1462 NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
1463 NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
1464 NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
1465 NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
1466
1467 n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
1468 memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
1469 "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
1470 pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
1471 PCI_BASE_ADDRESS_SPACE_MEMORY |
1472 PCI_BASE_ADDRESS_MEM_TYPE_64 |
1473 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
1474}
1475
37712e00
KJ
1476static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
1477{
1478 /* Controller Capabilities register */
1479 NVME_CAP_SET_PMRS(n->bar.cap, 1);
1480
1481 /* PMR Capabities register */
1482 n->bar.pmrcap = 0;
1483 NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 0);
1484 NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 0);
1485 NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR);
1486 NVME_PMRCAP_SET_PMRTU(n->bar.pmrcap, 0);
1487 /* Turn on bit 1 support */
1488 NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
1489 NVME_PMRCAP_SET_PMRTO(n->bar.pmrcap, 0);
1490 NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 0);
1491
1492 /* PMR Control register */
1493 n->bar.pmrctl = 0;
1494 NVME_PMRCTL_SET_EN(n->bar.pmrctl, 0);
1495
1496 /* PMR Status register */
1497 n->bar.pmrsts = 0;
1498 NVME_PMRSTS_SET_ERR(n->bar.pmrsts, 0);
1499 NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 0);
1500 NVME_PMRSTS_SET_HSTS(n->bar.pmrsts, 0);
1501 NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 0);
1502
1503 /* PMR Elasticity Buffer Size register */
1504 n->bar.pmrebs = 0;
1505 NVME_PMREBS_SET_PMRSZU(n->bar.pmrebs, 0);
1506 NVME_PMREBS_SET_RBB(n->bar.pmrebs, 0);
1507 NVME_PMREBS_SET_PMRWBZ(n->bar.pmrebs, 0);
1508
1509 /* PMR Sustained Write Throughput register */
1510 n->bar.pmrswtp = 0;
1511 NVME_PMRSWTP_SET_PMRSWTU(n->bar.pmrswtp, 0);
1512 NVME_PMRSWTP_SET_PMRSWTV(n->bar.pmrswtp, 0);
1513
1514 /* PMR Memory Space Control register */
1515 n->bar.pmrmsc = 0;
1516 NVME_PMRMSC_SET_CMSE(n->bar.pmrmsc, 0);
1517 NVME_PMRMSC_SET_CBA(n->bar.pmrmsc, 0);
1518
1519 pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
1520 PCI_BASE_ADDRESS_SPACE_MEMORY |
1521 PCI_BASE_ADDRESS_MEM_TYPE_64 |
1522 PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmrdev->mr);
1523}
1524
1c0c2163 1525static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
c3f5526d
KJ
1526{
1527 uint8_t *pci_conf = pci_dev->config;
1528
1529 pci_conf[PCI_INTERRUPT_PIN] = 1;
1530 pci_config_set_prog_interface(pci_conf, 0x2);
1531 pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS);
1532 pcie_endpoint_cap_init(pci_dev, 0x80);
1533
1534 memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
1535 n->reg_size);
1536 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
1537 PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
1c0c2163
KJ
1538 if (msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp)) {
1539 return;
1540 }
0c35ad46
KJ
1541
1542 if (n->params.cmb_size_mb) {
1543 nvme_init_cmb(n, pci_dev);
1544 } else if (n->pmrdev) {
1545 nvme_init_pmr(n, pci_dev);
1546 }
c3f5526d
KJ
1547}
1548
945cb8f4 1549static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
54000c66 1550{
54000c66 1551 NvmeIdCtrl *id = &n->id_ctrl;
945cb8f4 1552 uint8_t *pci_conf = pci_dev->config;
f3c507ad
KB
1553
1554 id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
1555 id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
1556 strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
1557 strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
1065abfb 1558 strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
f3c507ad
KB
1559 id->rab = 6;
1560 id->ieee[0] = 0x00;
1561 id->ieee[1] = 0x02;
1562 id->ieee[2] = 0xb3;
1563 id->oacs = cpu_to_le16(0);
1564 id->frmw = 7 << 1;
1565 id->lpa = 1 << 0;
1566 id->sqes = (0x6 << 4) | 0x6;
1567 id->cqes = (0x4 << 4) | 0x4;
1568 id->nn = cpu_to_le32(n->num_namespaces);
3036a626 1569 id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP);
f3c507ad
KB
1570 id->psd[0].mp = cpu_to_le16(0x9c4);
1571 id->psd[0].enlat = cpu_to_le32(0x10);
1572 id->psd[0].exlat = cpu_to_le32(0x4);
30349fd0
CH
1573 if (blk_enable_write_cache(n->conf.blk)) {
1574 id->vwc = 1;
1575 }
f3c507ad
KB
1576
1577 n->bar.cap = 0;
1578 NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
1579 NVME_CAP_SET_CQR(n->bar.cap, 1);
f3c507ad
KB
1580 NVME_CAP_SET_TO(n->bar.cap, 0xf);
1581 NVME_CAP_SET_CSS(n->bar.cap, 1);
be0677a9 1582 NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
f3c507ad 1583
a896f7f2 1584 n->bar.vs = 0x00010200;
f3c507ad 1585 n->bar.intmc = n->bar.intms = 0;
945cb8f4
KJ
1586}
1587
1588static void nvme_realize(PCIDevice *pci_dev, Error **errp)
1589{
1590 NvmeCtrl *n = NVME(pci_dev);
1591 Error *local_err = NULL;
1592
1593 int i;
1594
1595 nvme_check_constraints(n, &local_err);
1596 if (local_err) {
1597 error_propagate(errp, local_err);
1598 return;
1599 }
1600
1601 nvme_init_state(n);
1602 nvme_init_blk(n, &local_err);
1603 if (local_err) {
1604 error_propagate(errp, local_err);
1605 return;
1606 }
1607
1c0c2163
KJ
1608 nvme_init_pci(n, pci_dev, &local_err);
1609 if (local_err) {
1610 error_propagate(errp, local_err);
1611 return;
1612 }
1613
945cb8f4 1614 nvme_init_ctrl(n, pci_dev);
f3c507ad
KB
1615
1616 for (i = 0; i < n->num_namespaces; i++) {
d634d742
KJ
1617 nvme_init_namespace(n, &n->namespaces[i], &local_err);
1618 if (local_err) {
1619 error_propagate(errp, local_err);
1620 return;
1621 }
f3c507ad 1622 }
f3c507ad
KB
1623}
1624
1625static void nvme_exit(PCIDevice *pci_dev)
1626{
1627 NvmeCtrl *n = NVME(pci_dev);
1628
1629 nvme_clear_ctrl(n);
1630 g_free(n->namespaces);
1631 g_free(n->cq);
1632 g_free(n->sq);
a896f7f2 1633
1065abfb 1634 if (n->params.cmb_size_mb) {
a883d6a0
LQ
1635 g_free(n->cmbuf);
1636 }
6cf94132
AJ
1637
1638 if (n->pmrdev) {
1639 host_memory_backend_set_mapped(n->pmrdev, false);
1640 }
f3c507ad 1641 msix_uninit_exclusive_bar(pci_dev);
f3c507ad
KB
1642}
1643
1644static Property nvme_props[] = {
1645 DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf),
6cf94132
AJ
1646 DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmrdev, TYPE_MEMORY_BACKEND,
1647 HostMemoryBackend *),
1065abfb
KJ
1648 DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
1649 DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
dce22c86
KJ
1650 DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
1651 DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
6a25a4b4 1652 DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
f3c507ad
KB
1653 DEFINE_PROP_END_OF_LIST(),
1654};
1655
1656static const VMStateDescription nvme_vmstate = {
1657 .name = "nvme",
1658 .unmigratable = 1,
1659};
1660
1661static void nvme_class_init(ObjectClass *oc, void *data)
1662{
1663 DeviceClass *dc = DEVICE_CLASS(oc);
1664 PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
1665
e01d6a41 1666 pc->realize = nvme_realize;
f3c507ad
KB
1667 pc->exit = nvme_exit;
1668 pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
1669 pc->vendor_id = PCI_VENDOR_ID_INTEL;
1670 pc->device_id = 0x5845;
47989f14 1671 pc->revision = 2;
f3c507ad 1672
125ee0ed 1673 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
f3c507ad 1674 dc->desc = "Non-Volatile Memory Express";
4f67d30b 1675 device_class_set_props(dc, nvme_props);
f3c507ad
KB
1676 dc->vmsd = &nvme_vmstate;
1677}
1678
a907ec52 1679static void nvme_instance_init(Object *obj)
33739c71
GA
1680{
1681 NvmeCtrl *s = NVME(obj);
33739c71 1682
a907ec52
LE
1683 device_add_bootindex_property(obj, &s->conf.bootindex,
1684 "bootindex", "/namespace@1,0",
40c2281c 1685 DEVICE(obj));
33739c71
GA
1686}
1687
f3c507ad 1688static const TypeInfo nvme_info = {
08db59e1 1689 .name = TYPE_NVME,
f3c507ad
KB
1690 .parent = TYPE_PCI_DEVICE,
1691 .instance_size = sizeof(NvmeCtrl),
1692 .class_init = nvme_class_init,
33739c71 1693 .instance_init = nvme_instance_init,
71d78767
EH
1694 .interfaces = (InterfaceInfo[]) {
1695 { INTERFACE_PCIE_DEVICE },
1696 { }
1697 },
f3c507ad
KB
1698};
1699
1700static void nvme_register_types(void)
1701{
1702 type_register_static(&nvme_info);
1703}
1704
1705type_init(nvme_register_types)
This page took 0.643946 seconds and 4 git commands to generate.