]> Git Repo - qemu.git/blob - hw/block/nvme.c
qmp: Move dispatcher to a coroutine
[qemu.git] / hw / block / nvme.c
1 /*
2  * QEMU NVM Express Controller
3  *
4  * Copyright (c) 2012, Intel Corporation
5  *
6  * Written by Keith Busch <[email protected]>
7  *
8  * This code is licensed under the GNU GPL v2 or later.
9  */
10
11 /**
12  * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e
13  *
14  *  https://nvmexpress.org/developers/nvme-specification/
15  */
16
17 /**
18  * Usage: add options:
19  *      -drive file=<file>,if=none,id=<drive_id>
20  *      -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
21  *              cmb_size_mb=<cmb_size_mb[optional]>, \
22  *              [pmrdev=<mem_backend_file_id>,] \
23  *              max_ioqpairs=<N[optional]>, \
24  *              aerl=<N[optional]>, aer_max_queued=<N[optional]>, \
25  *              mdts=<N[optional]>
26  *
27  * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
28  * offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
29  *
30  * cmb_size_mb= and pmrdev= options are mutually exclusive due to limitation
31  * in available BAR's. cmb_size_mb= will take precedence over pmrdev= when
32  * both provided.
33  * Enabling pmr emulation can be achieved by pointing to memory-backend-file.
34  * For example:
35  * -object memory-backend-file,id=<mem_id>,share=on,mem-path=<file_path>, \
36  *  size=<size> .... -device nvme,...,pmrdev=<mem_id>
37  *
38  *
39  * nvme device parameters
40  * ~~~~~~~~~~~~~~~~~~~~~~
41  * - `aerl`
42  *   The Asynchronous Event Request Limit (AERL). Indicates the maximum number
43  *   of concurrently outstanding Asynchronous Event Request commands suppoert
44  *   by the controller. This is a 0's based value.
45  *
46  * - `aer_max_queued`
47  *   This is the maximum number of events that the device will enqueue for
48  *   completion when there are no oustanding AERs. When the maximum number of
49  *   enqueued events are reached, subsequent events will be dropped.
50  *
51  */
52
53 #include "qemu/osdep.h"
54 #include "qemu/units.h"
55 #include "qemu/error-report.h"
56 #include "hw/block/block.h"
57 #include "hw/pci/msix.h"
58 #include "hw/pci/pci.h"
59 #include "hw/qdev-properties.h"
60 #include "migration/vmstate.h"
61 #include "sysemu/sysemu.h"
62 #include "qapi/error.h"
63 #include "qapi/visitor.h"
64 #include "sysemu/hostmem.h"
65 #include "sysemu/block-backend.h"
66 #include "exec/memory.h"
67 #include "qemu/log.h"
68 #include "qemu/module.h"
69 #include "qemu/cutils.h"
70 #include "trace.h"
71 #include "nvme.h"
72
73 #define NVME_MAX_IOQPAIRS 0xffff
74 #define NVME_DB_SIZE  4
75 #define NVME_SPEC_VER 0x00010300
76 #define NVME_CMB_BIR 2
77 #define NVME_PMR_BIR 2
78 #define NVME_TEMPERATURE 0x143
79 #define NVME_TEMPERATURE_WARNING 0x157
80 #define NVME_TEMPERATURE_CRITICAL 0x175
81 #define NVME_NUM_FW_SLOTS 1
82
83 #define NVME_GUEST_ERR(trace, fmt, ...) \
84     do { \
85         (trace_##trace)(__VA_ARGS__); \
86         qemu_log_mask(LOG_GUEST_ERROR, #trace \
87             " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \
88     } while (0)
89
90 static const bool nvme_feature_support[NVME_FID_MAX] = {
91     [NVME_ARBITRATION]              = true,
92     [NVME_POWER_MANAGEMENT]         = true,
93     [NVME_TEMPERATURE_THRESHOLD]    = true,
94     [NVME_ERROR_RECOVERY]           = true,
95     [NVME_VOLATILE_WRITE_CACHE]     = true,
96     [NVME_NUMBER_OF_QUEUES]         = true,
97     [NVME_INTERRUPT_COALESCING]     = true,
98     [NVME_INTERRUPT_VECTOR_CONF]    = true,
99     [NVME_WRITE_ATOMICITY]          = true,
100     [NVME_ASYNCHRONOUS_EVENT_CONF]  = true,
101     [NVME_TIMESTAMP]                = true,
102 };
103
104 static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
105     [NVME_TEMPERATURE_THRESHOLD]    = NVME_FEAT_CAP_CHANGE,
106     [NVME_VOLATILE_WRITE_CACHE]     = NVME_FEAT_CAP_CHANGE,
107     [NVME_NUMBER_OF_QUEUES]         = NVME_FEAT_CAP_CHANGE,
108     [NVME_ASYNCHRONOUS_EVENT_CONF]  = NVME_FEAT_CAP_CHANGE,
109     [NVME_TIMESTAMP]                = NVME_FEAT_CAP_CHANGE,
110 };
111
112 static void nvme_process_sq(void *opaque);
113
114 static uint16_t nvme_cid(NvmeRequest *req)
115 {
116     if (!req) {
117         return 0xffff;
118     }
119
120     return le16_to_cpu(req->cqe.cid);
121 }
122
123 static uint16_t nvme_sqid(NvmeRequest *req)
124 {
125     return le16_to_cpu(req->sq->sqid);
126 }
127
128 static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
129 {
130     hwaddr low = n->ctrl_mem.addr;
131     hwaddr hi  = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
132
133     return addr >= low && addr < hi;
134 }
135
136 static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
137 {
138     assert(nvme_addr_is_cmb(n, addr));
139
140     return &n->cmbuf[addr - n->ctrl_mem.addr];
141 }
142
143 static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
144 {
145     if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
146         memcpy(buf, nvme_addr_to_cmb(n, addr), size);
147         return;
148     }
149
150     pci_dma_read(&n->parent_obj, addr, buf, size);
151 }
152
153 static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
154 {
155     return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
156 }
157
158 static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
159 {
160     return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
161 }
162
163 static void nvme_inc_cq_tail(NvmeCQueue *cq)
164 {
165     cq->tail++;
166     if (cq->tail >= cq->size) {
167         cq->tail = 0;
168         cq->phase = !cq->phase;
169     }
170 }
171
172 static void nvme_inc_sq_head(NvmeSQueue *sq)
173 {
174     sq->head = (sq->head + 1) % sq->size;
175 }
176
177 static uint8_t nvme_cq_full(NvmeCQueue *cq)
178 {
179     return (cq->tail + 1) % cq->size == cq->head;
180 }
181
182 static uint8_t nvme_sq_empty(NvmeSQueue *sq)
183 {
184     return sq->head == sq->tail;
185 }
186
187 static void nvme_irq_check(NvmeCtrl *n)
188 {
189     if (msix_enabled(&(n->parent_obj))) {
190         return;
191     }
192     if (~n->bar.intms & n->irq_status) {
193         pci_irq_assert(&n->parent_obj);
194     } else {
195         pci_irq_deassert(&n->parent_obj);
196     }
197 }
198
199 static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
200 {
201     if (cq->irq_enabled) {
202         if (msix_enabled(&(n->parent_obj))) {
203             trace_pci_nvme_irq_msix(cq->vector);
204             msix_notify(&(n->parent_obj), cq->vector);
205         } else {
206             trace_pci_nvme_irq_pin();
207             assert(cq->vector < 32);
208             n->irq_status |= 1 << cq->vector;
209             nvme_irq_check(n);
210         }
211     } else {
212         trace_pci_nvme_irq_masked();
213     }
214 }
215
216 static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
217 {
218     if (cq->irq_enabled) {
219         if (msix_enabled(&(n->parent_obj))) {
220             return;
221         } else {
222             assert(cq->vector < 32);
223             n->irq_status &= ~(1 << cq->vector);
224             nvme_irq_check(n);
225         }
226     }
227 }
228
229 static void nvme_req_clear(NvmeRequest *req)
230 {
231     req->ns = NULL;
232     memset(&req->cqe, 0x0, sizeof(req->cqe));
233 }
234
235 static void nvme_req_exit(NvmeRequest *req)
236 {
237     if (req->qsg.sg) {
238         qemu_sglist_destroy(&req->qsg);
239     }
240
241     if (req->iov.iov) {
242         qemu_iovec_destroy(&req->iov);
243     }
244 }
245
246 static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr,
247                                   size_t len)
248 {
249     if (!len) {
250         return NVME_SUCCESS;
251     }
252
253     trace_pci_nvme_map_addr_cmb(addr, len);
254
255     if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len - 1)) {
256         return NVME_DATA_TRAS_ERROR;
257     }
258
259     qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len);
260
261     return NVME_SUCCESS;
262 }
263
264 static uint16_t nvme_map_addr(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov,
265                               hwaddr addr, size_t len)
266 {
267     if (!len) {
268         return NVME_SUCCESS;
269     }
270
271     trace_pci_nvme_map_addr(addr, len);
272
273     if (nvme_addr_is_cmb(n, addr)) {
274         if (qsg && qsg->sg) {
275             return NVME_INVALID_USE_OF_CMB | NVME_DNR;
276         }
277
278         assert(iov);
279
280         if (!iov->iov) {
281             qemu_iovec_init(iov, 1);
282         }
283
284         return nvme_map_addr_cmb(n, iov, addr, len);
285     }
286
287     if (iov && iov->iov) {
288         return NVME_INVALID_USE_OF_CMB | NVME_DNR;
289     }
290
291     assert(qsg);
292
293     if (!qsg->sg) {
294         pci_dma_sglist_init(qsg, &n->parent_obj, 1);
295     }
296
297     qemu_sglist_add(qsg, addr, len);
298
299     return NVME_SUCCESS;
300 }
301
302 static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
303                              uint32_t len, NvmeRequest *req)
304 {
305     hwaddr trans_len = n->page_size - (prp1 % n->page_size);
306     trans_len = MIN(len, trans_len);
307     int num_prps = (len >> n->page_bits) + 1;
308     uint16_t status;
309     bool prp_list_in_cmb = false;
310
311     QEMUSGList *qsg = &req->qsg;
312     QEMUIOVector *iov = &req->iov;
313
314     trace_pci_nvme_map_prp(trans_len, len, prp1, prp2, num_prps);
315
316     if (unlikely(!prp1)) {
317         trace_pci_nvme_err_invalid_prp();
318         return NVME_INVALID_FIELD | NVME_DNR;
319     }
320
321     if (nvme_addr_is_cmb(n, prp1)) {
322         qemu_iovec_init(iov, num_prps);
323     } else {
324         pci_dma_sglist_init(qsg, &n->parent_obj, num_prps);
325     }
326
327     status = nvme_map_addr(n, qsg, iov, prp1, trans_len);
328     if (status) {
329         return status;
330     }
331
332     len -= trans_len;
333     if (len) {
334         if (unlikely(!prp2)) {
335             trace_pci_nvme_err_invalid_prp2_missing();
336             return NVME_INVALID_FIELD | NVME_DNR;
337         }
338
339         if (len > n->page_size) {
340             uint64_t prp_list[n->max_prp_ents];
341             uint32_t nents, prp_trans;
342             int i = 0;
343
344             if (nvme_addr_is_cmb(n, prp2)) {
345                 prp_list_in_cmb = true;
346             }
347
348             nents = (len + n->page_size - 1) >> n->page_bits;
349             prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
350             nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
351             while (len != 0) {
352                 uint64_t prp_ent = le64_to_cpu(prp_list[i]);
353
354                 if (i == n->max_prp_ents - 1 && len > n->page_size) {
355                     if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
356                         trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
357                         return NVME_INVALID_FIELD | NVME_DNR;
358                     }
359
360                     if (prp_list_in_cmb != nvme_addr_is_cmb(n, prp_ent)) {
361                         return NVME_INVALID_USE_OF_CMB | NVME_DNR;
362                     }
363
364                     i = 0;
365                     nents = (len + n->page_size - 1) >> n->page_bits;
366                     prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
367                     nvme_addr_read(n, prp_ent, (void *)prp_list,
368                         prp_trans);
369                     prp_ent = le64_to_cpu(prp_list[i]);
370                 }
371
372                 if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
373                     trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
374                     return NVME_INVALID_FIELD | NVME_DNR;
375                 }
376
377                 trans_len = MIN(len, n->page_size);
378                 status = nvme_map_addr(n, qsg, iov, prp_ent, trans_len);
379                 if (status) {
380                     return status;
381                 }
382
383                 len -= trans_len;
384                 i++;
385             }
386         } else {
387             if (unlikely(prp2 & (n->page_size - 1))) {
388                 trace_pci_nvme_err_invalid_prp2_align(prp2);
389                 return NVME_INVALID_FIELD | NVME_DNR;
390             }
391             status = nvme_map_addr(n, qsg, iov, prp2, len);
392             if (status) {
393                 return status;
394             }
395         }
396     }
397
398     return NVME_SUCCESS;
399 }
400
401 static uint16_t nvme_dma_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
402                              uint64_t prp1, uint64_t prp2, DMADirection dir,
403                              NvmeRequest *req)
404 {
405     uint16_t status = NVME_SUCCESS;
406
407     status = nvme_map_prp(n, prp1, prp2, len, req);
408     if (status) {
409         return status;
410     }
411
412     /* assert that only one of qsg and iov carries data */
413     assert((req->qsg.nsg > 0) != (req->iov.niov > 0));
414
415     if (req->qsg.nsg > 0) {
416         uint64_t residual;
417
418         if (dir == DMA_DIRECTION_TO_DEVICE) {
419             residual = dma_buf_write(ptr, len, &req->qsg);
420         } else {
421             residual = dma_buf_read(ptr, len, &req->qsg);
422         }
423
424         if (unlikely(residual)) {
425             trace_pci_nvme_err_invalid_dma();
426             status = NVME_INVALID_FIELD | NVME_DNR;
427         }
428     } else {
429         size_t bytes;
430
431         if (dir == DMA_DIRECTION_TO_DEVICE) {
432             bytes = qemu_iovec_to_buf(&req->iov, 0, ptr, len);
433         } else {
434             bytes = qemu_iovec_from_buf(&req->iov, 0, ptr, len);
435         }
436
437         if (unlikely(bytes != len)) {
438             trace_pci_nvme_err_invalid_dma();
439             status = NVME_INVALID_FIELD | NVME_DNR;
440         }
441     }
442
443     return status;
444 }
445
446 static uint16_t nvme_map_dptr(NvmeCtrl *n, size_t len, NvmeRequest *req)
447 {
448     NvmeCmd *cmd = &req->cmd;
449     uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
450     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
451
452     return nvme_map_prp(n, prp1, prp2, len, req);
453 }
454
455 static void nvme_post_cqes(void *opaque)
456 {
457     NvmeCQueue *cq = opaque;
458     NvmeCtrl *n = cq->ctrl;
459     NvmeRequest *req, *next;
460
461     QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
462         NvmeSQueue *sq;
463         hwaddr addr;
464
465         if (nvme_cq_full(cq)) {
466             break;
467         }
468
469         QTAILQ_REMOVE(&cq->req_list, req, entry);
470         sq = req->sq;
471         req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
472         req->cqe.sq_id = cpu_to_le16(sq->sqid);
473         req->cqe.sq_head = cpu_to_le16(sq->head);
474         addr = cq->dma_addr + cq->tail * n->cqe_size;
475         nvme_inc_cq_tail(cq);
476         pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
477             sizeof(req->cqe));
478         nvme_req_exit(req);
479         QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
480     }
481     if (cq->tail != cq->head) {
482         nvme_irq_assert(n, cq);
483     }
484 }
485
486 static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req)
487 {
488     assert(cq->cqid == req->sq->cqid);
489     trace_pci_nvme_enqueue_req_completion(nvme_cid(req), cq->cqid,
490                                           req->status);
491     QTAILQ_REMOVE(&req->sq->out_req_list, req, entry);
492     QTAILQ_INSERT_TAIL(&cq->req_list, req, entry);
493     timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
494 }
495
496 static void nvme_process_aers(void *opaque)
497 {
498     NvmeCtrl *n = opaque;
499     NvmeAsyncEvent *event, *next;
500
501     trace_pci_nvme_process_aers(n->aer_queued);
502
503     QTAILQ_FOREACH_SAFE(event, &n->aer_queue, entry, next) {
504         NvmeRequest *req;
505         NvmeAerResult *result;
506
507         /* can't post cqe if there is nothing to complete */
508         if (!n->outstanding_aers) {
509             trace_pci_nvme_no_outstanding_aers();
510             break;
511         }
512
513         /* ignore if masked (cqe posted, but event not cleared) */
514         if (n->aer_mask & (1 << event->result.event_type)) {
515             trace_pci_nvme_aer_masked(event->result.event_type, n->aer_mask);
516             continue;
517         }
518
519         QTAILQ_REMOVE(&n->aer_queue, event, entry);
520         n->aer_queued--;
521
522         n->aer_mask |= 1 << event->result.event_type;
523         n->outstanding_aers--;
524
525         req = n->aer_reqs[n->outstanding_aers];
526
527         result = (NvmeAerResult *) &req->cqe.result;
528         result->event_type = event->result.event_type;
529         result->event_info = event->result.event_info;
530         result->log_page = event->result.log_page;
531         g_free(event);
532
533         req->status = NVME_SUCCESS;
534
535         trace_pci_nvme_aer_post_cqe(result->event_type, result->event_info,
536                                     result->log_page);
537
538         nvme_enqueue_req_completion(&n->admin_cq, req);
539     }
540 }
541
542 static void nvme_enqueue_event(NvmeCtrl *n, uint8_t event_type,
543                                uint8_t event_info, uint8_t log_page)
544 {
545     NvmeAsyncEvent *event;
546
547     trace_pci_nvme_enqueue_event(event_type, event_info, log_page);
548
549     if (n->aer_queued == n->params.aer_max_queued) {
550         trace_pci_nvme_enqueue_event_noqueue(n->aer_queued);
551         return;
552     }
553
554     event = g_new(NvmeAsyncEvent, 1);
555     event->result = (NvmeAerResult) {
556         .event_type = event_type,
557         .event_info = event_info,
558         .log_page   = log_page,
559     };
560
561     QTAILQ_INSERT_TAIL(&n->aer_queue, event, entry);
562     n->aer_queued++;
563
564     nvme_process_aers(n);
565 }
566
567 static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
568 {
569     n->aer_mask &= ~(1 << event_type);
570     if (!QTAILQ_EMPTY(&n->aer_queue)) {
571         nvme_process_aers(n);
572     }
573 }
574
575 static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
576 {
577     uint8_t mdts = n->params.mdts;
578
579     if (mdts && len > n->page_size << mdts) {
580         return NVME_INVALID_FIELD | NVME_DNR;
581     }
582
583     return NVME_SUCCESS;
584 }
585
586 static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns,
587                                          uint64_t slba, uint32_t nlb)
588 {
589     uint64_t nsze = le64_to_cpu(ns->id_ns.nsze);
590
591     if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) {
592         return NVME_LBA_RANGE | NVME_DNR;
593     }
594
595     return NVME_SUCCESS;
596 }
597
598 static void nvme_rw_cb(void *opaque, int ret)
599 {
600     NvmeRequest *req = opaque;
601     NvmeSQueue *sq = req->sq;
602     NvmeCtrl *n = sq->ctrl;
603     NvmeCQueue *cq = n->cq[sq->cqid];
604
605     trace_pci_nvme_rw_cb(nvme_cid(req));
606
607     if (!ret) {
608         block_acct_done(blk_get_stats(n->conf.blk), &req->acct);
609         req->status = NVME_SUCCESS;
610     } else {
611         block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
612         req->status = NVME_INTERNAL_DEV_ERROR;
613     }
614
615     nvme_enqueue_req_completion(cq, req);
616 }
617
618 static uint16_t nvme_flush(NvmeCtrl *n, NvmeRequest *req)
619 {
620     block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
621          BLOCK_ACCT_FLUSH);
622     req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
623
624     return NVME_NO_COMPLETE;
625 }
626
627 static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
628 {
629     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
630     NvmeNamespace *ns = req->ns;
631     const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
632     const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
633     uint64_t slba = le64_to_cpu(rw->slba);
634     uint32_t nlb  = le16_to_cpu(rw->nlb) + 1;
635     uint64_t offset = slba << data_shift;
636     uint32_t count = nlb << data_shift;
637     uint16_t status;
638
639     trace_pci_nvme_write_zeroes(nvme_cid(req), slba, nlb);
640
641     status = nvme_check_bounds(n, ns, slba, nlb);
642     if (status) {
643         trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
644         return status;
645     }
646
647     block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
648                      BLOCK_ACCT_WRITE);
649     req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
650                                         BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req);
651     return NVME_NO_COMPLETE;
652 }
653
654 static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
655 {
656     NvmeRwCmd *rw = (NvmeRwCmd *)&req->cmd;
657     NvmeNamespace *ns = req->ns;
658     uint32_t nlb  = le32_to_cpu(rw->nlb) + 1;
659     uint64_t slba = le64_to_cpu(rw->slba);
660
661     uint8_t lba_index  = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas);
662     uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds;
663     uint64_t data_size = (uint64_t)nlb << data_shift;
664     uint64_t data_offset = slba << data_shift;
665     int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
666     enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
667     uint16_t status;
668
669     trace_pci_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
670
671     status = nvme_check_mdts(n, data_size);
672     if (status) {
673         trace_pci_nvme_err_mdts(nvme_cid(req), data_size);
674         block_acct_invalid(blk_get_stats(n->conf.blk), acct);
675         return status;
676     }
677
678     status = nvme_check_bounds(n, ns, slba, nlb);
679     if (status) {
680         trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
681         block_acct_invalid(blk_get_stats(n->conf.blk), acct);
682         return status;
683     }
684
685     if (nvme_map_dptr(n, data_size, req)) {
686         block_acct_invalid(blk_get_stats(n->conf.blk), acct);
687         return NVME_INVALID_FIELD | NVME_DNR;
688     }
689
690     if (req->qsg.nsg > 0) {
691         block_acct_start(blk_get_stats(n->conf.blk), &req->acct, req->qsg.size,
692                          acct);
693         req->aiocb = is_write ?
694             dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
695                           nvme_rw_cb, req) :
696             dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
697                          nvme_rw_cb, req);
698     } else {
699         block_acct_start(blk_get_stats(n->conf.blk), &req->acct, req->iov.size,
700                          acct);
701         req->aiocb = is_write ?
702             blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
703                             req) :
704             blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb,
705                            req);
706     }
707
708     return NVME_NO_COMPLETE;
709 }
710
711 static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
712 {
713     uint32_t nsid = le32_to_cpu(req->cmd.nsid);
714
715     trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
716                           req->cmd.opcode);
717
718     if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
719         trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
720         return NVME_INVALID_NSID | NVME_DNR;
721     }
722
723     req->ns = &n->namespaces[nsid - 1];
724     switch (req->cmd.opcode) {
725     case NVME_CMD_FLUSH:
726         return nvme_flush(n, req);
727     case NVME_CMD_WRITE_ZEROES:
728         return nvme_write_zeroes(n, req);
729     case NVME_CMD_WRITE:
730     case NVME_CMD_READ:
731         return nvme_rw(n, req);
732     default:
733         trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
734         return NVME_INVALID_OPCODE | NVME_DNR;
735     }
736 }
737
738 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
739 {
740     n->sq[sq->sqid] = NULL;
741     timer_del(sq->timer);
742     timer_free(sq->timer);
743     g_free(sq->io_req);
744     if (sq->sqid) {
745         g_free(sq);
746     }
747 }
748
749 static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeRequest *req)
750 {
751     NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
752     NvmeRequest *r, *next;
753     NvmeSQueue *sq;
754     NvmeCQueue *cq;
755     uint16_t qid = le16_to_cpu(c->qid);
756
757     if (unlikely(!qid || nvme_check_sqid(n, qid))) {
758         trace_pci_nvme_err_invalid_del_sq(qid);
759         return NVME_INVALID_QID | NVME_DNR;
760     }
761
762     trace_pci_nvme_del_sq(qid);
763
764     sq = n->sq[qid];
765     while (!QTAILQ_EMPTY(&sq->out_req_list)) {
766         r = QTAILQ_FIRST(&sq->out_req_list);
767         assert(r->aiocb);
768         blk_aio_cancel(r->aiocb);
769     }
770     if (!nvme_check_cqid(n, sq->cqid)) {
771         cq = n->cq[sq->cqid];
772         QTAILQ_REMOVE(&cq->sq_list, sq, entry);
773
774         nvme_post_cqes(cq);
775         QTAILQ_FOREACH_SAFE(r, &cq->req_list, entry, next) {
776             if (r->sq == sq) {
777                 QTAILQ_REMOVE(&cq->req_list, r, entry);
778                 QTAILQ_INSERT_TAIL(&sq->req_list, r, entry);
779             }
780         }
781     }
782
783     nvme_free_sq(sq, n);
784     return NVME_SUCCESS;
785 }
786
787 static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
788     uint16_t sqid, uint16_t cqid, uint16_t size)
789 {
790     int i;
791     NvmeCQueue *cq;
792
793     sq->ctrl = n;
794     sq->dma_addr = dma_addr;
795     sq->sqid = sqid;
796     sq->size = size;
797     sq->cqid = cqid;
798     sq->head = sq->tail = 0;
799     sq->io_req = g_new0(NvmeRequest, sq->size);
800
801     QTAILQ_INIT(&sq->req_list);
802     QTAILQ_INIT(&sq->out_req_list);
803     for (i = 0; i < sq->size; i++) {
804         sq->io_req[i].sq = sq;
805         QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry);
806     }
807     sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
808
809     assert(n->cq[cqid]);
810     cq = n->cq[cqid];
811     QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry);
812     n->sq[sqid] = sq;
813 }
814
815 static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
816 {
817     NvmeSQueue *sq;
818     NvmeCreateSq *c = (NvmeCreateSq *)&req->cmd;
819
820     uint16_t cqid = le16_to_cpu(c->cqid);
821     uint16_t sqid = le16_to_cpu(c->sqid);
822     uint16_t qsize = le16_to_cpu(c->qsize);
823     uint16_t qflags = le16_to_cpu(c->sq_flags);
824     uint64_t prp1 = le64_to_cpu(c->prp1);
825
826     trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
827
828     if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
829         trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
830         return NVME_INVALID_CQID | NVME_DNR;
831     }
832     if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
833         trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
834         return NVME_INVALID_QID | NVME_DNR;
835     }
836     if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
837         trace_pci_nvme_err_invalid_create_sq_size(qsize);
838         return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
839     }
840     if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
841         trace_pci_nvme_err_invalid_create_sq_addr(prp1);
842         return NVME_INVALID_FIELD | NVME_DNR;
843     }
844     if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
845         trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
846         return NVME_INVALID_FIELD | NVME_DNR;
847     }
848     sq = g_malloc0(sizeof(*sq));
849     nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1);
850     return NVME_SUCCESS;
851 }
852
853 static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
854                                 uint64_t off, NvmeRequest *req)
855 {
856     NvmeCmd *cmd = &req->cmd;
857     uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
858     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
859     uint32_t nsid = le32_to_cpu(cmd->nsid);
860
861     uint32_t trans_len;
862     time_t current_ms;
863     uint64_t units_read = 0, units_written = 0;
864     uint64_t read_commands = 0, write_commands = 0;
865     NvmeSmartLog smart;
866     BlockAcctStats *s;
867
868     if (nsid && nsid != 0xffffffff) {
869         return NVME_INVALID_FIELD | NVME_DNR;
870     }
871
872     s = blk_get_stats(n->conf.blk);
873
874     units_read = s->nr_bytes[BLOCK_ACCT_READ] >> BDRV_SECTOR_BITS;
875     units_written = s->nr_bytes[BLOCK_ACCT_WRITE] >> BDRV_SECTOR_BITS;
876     read_commands = s->nr_ops[BLOCK_ACCT_READ];
877     write_commands = s->nr_ops[BLOCK_ACCT_WRITE];
878
879     if (off > sizeof(smart)) {
880         return NVME_INVALID_FIELD | NVME_DNR;
881     }
882
883     trans_len = MIN(sizeof(smart) - off, buf_len);
884
885     memset(&smart, 0x0, sizeof(smart));
886
887     smart.data_units_read[0] = cpu_to_le64(DIV_ROUND_UP(units_read, 1000));
888     smart.data_units_written[0] = cpu_to_le64(DIV_ROUND_UP(units_written,
889                                                            1000));
890     smart.host_read_commands[0] = cpu_to_le64(read_commands);
891     smart.host_write_commands[0] = cpu_to_le64(write_commands);
892
893     smart.temperature = cpu_to_le16(n->temperature);
894
895     if ((n->temperature >= n->features.temp_thresh_hi) ||
896         (n->temperature <= n->features.temp_thresh_low)) {
897         smart.critical_warning |= NVME_SMART_TEMPERATURE;
898     }
899
900     current_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
901     smart.power_on_hours[0] =
902         cpu_to_le64((((current_ms - n->starttime_ms) / 1000) / 60) / 60);
903
904     if (!rae) {
905         nvme_clear_events(n, NVME_AER_TYPE_SMART);
906     }
907
908     return nvme_dma_prp(n, (uint8_t *) &smart + off, trans_len, prp1, prp2,
909                         DMA_DIRECTION_FROM_DEVICE, req);
910 }
911
912 static uint16_t nvme_fw_log_info(NvmeCtrl *n, uint32_t buf_len, uint64_t off,
913                                  NvmeRequest *req)
914 {
915     uint32_t trans_len;
916     NvmeCmd *cmd = &req->cmd;
917     uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
918     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
919     NvmeFwSlotInfoLog fw_log = {
920         .afi = 0x1,
921     };
922
923     strpadcpy((char *)&fw_log.frs1, sizeof(fw_log.frs1), "1.0", ' ');
924
925     if (off > sizeof(fw_log)) {
926         return NVME_INVALID_FIELD | NVME_DNR;
927     }
928
929     trans_len = MIN(sizeof(fw_log) - off, buf_len);
930
931     return nvme_dma_prp(n, (uint8_t *) &fw_log + off, trans_len, prp1, prp2,
932                         DMA_DIRECTION_FROM_DEVICE, req);
933 }
934
935 static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
936                                 uint64_t off, NvmeRequest *req)
937 {
938     uint32_t trans_len;
939     NvmeCmd *cmd = &req->cmd;
940     uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
941     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
942     NvmeErrorLog errlog;
943
944     if (!rae) {
945         nvme_clear_events(n, NVME_AER_TYPE_ERROR);
946     }
947
948     if (off > sizeof(errlog)) {
949         return NVME_INVALID_FIELD | NVME_DNR;
950     }
951
952     memset(&errlog, 0x0, sizeof(errlog));
953
954     trans_len = MIN(sizeof(errlog) - off, buf_len);
955
956     return nvme_dma_prp(n, (uint8_t *)&errlog, trans_len, prp1, prp2,
957                         DMA_DIRECTION_FROM_DEVICE, req);
958 }
959
960 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
961 {
962     NvmeCmd *cmd = &req->cmd;
963
964     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
965     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
966     uint32_t dw12 = le32_to_cpu(cmd->cdw12);
967     uint32_t dw13 = le32_to_cpu(cmd->cdw13);
968     uint8_t  lid = dw10 & 0xff;
969     uint8_t  lsp = (dw10 >> 8) & 0xf;
970     uint8_t  rae = (dw10 >> 15) & 0x1;
971     uint32_t numdl, numdu;
972     uint64_t off, lpol, lpou;
973     size_t   len;
974     uint16_t status;
975
976     numdl = (dw10 >> 16);
977     numdu = (dw11 & 0xffff);
978     lpol = dw12;
979     lpou = dw13;
980
981     len = (((numdu << 16) | numdl) + 1) << 2;
982     off = (lpou << 32ULL) | lpol;
983
984     if (off & 0x3) {
985         return NVME_INVALID_FIELD | NVME_DNR;
986     }
987
988     trace_pci_nvme_get_log(nvme_cid(req), lid, lsp, rae, len, off);
989
990     status = nvme_check_mdts(n, len);
991     if (status) {
992         trace_pci_nvme_err_mdts(nvme_cid(req), len);
993         return status;
994     }
995
996     switch (lid) {
997     case NVME_LOG_ERROR_INFO:
998         return nvme_error_info(n, rae, len, off, req);
999     case NVME_LOG_SMART_INFO:
1000         return nvme_smart_info(n, rae, len, off, req);
1001     case NVME_LOG_FW_SLOT_INFO:
1002         return nvme_fw_log_info(n, len, off, req);
1003     default:
1004         trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
1005         return NVME_INVALID_FIELD | NVME_DNR;
1006     }
1007 }
1008
1009 static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
1010 {
1011     n->cq[cq->cqid] = NULL;
1012     timer_del(cq->timer);
1013     timer_free(cq->timer);
1014     msix_vector_unuse(&n->parent_obj, cq->vector);
1015     if (cq->cqid) {
1016         g_free(cq);
1017     }
1018 }
1019
1020 static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeRequest *req)
1021 {
1022     NvmeDeleteQ *c = (NvmeDeleteQ *)&req->cmd;
1023     NvmeCQueue *cq;
1024     uint16_t qid = le16_to_cpu(c->qid);
1025
1026     if (unlikely(!qid || nvme_check_cqid(n, qid))) {
1027         trace_pci_nvme_err_invalid_del_cq_cqid(qid);
1028         return NVME_INVALID_CQID | NVME_DNR;
1029     }
1030
1031     cq = n->cq[qid];
1032     if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
1033         trace_pci_nvme_err_invalid_del_cq_notempty(qid);
1034         return NVME_INVALID_QUEUE_DEL;
1035     }
1036     nvme_irq_deassert(n, cq);
1037     trace_pci_nvme_del_cq(qid);
1038     nvme_free_cq(cq, n);
1039     return NVME_SUCCESS;
1040 }
1041
1042 static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
1043     uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled)
1044 {
1045     int ret;
1046
1047     ret = msix_vector_use(&n->parent_obj, vector);
1048     assert(ret == 0);
1049     cq->ctrl = n;
1050     cq->cqid = cqid;
1051     cq->size = size;
1052     cq->dma_addr = dma_addr;
1053     cq->phase = 1;
1054     cq->irq_enabled = irq_enabled;
1055     cq->vector = vector;
1056     cq->head = cq->tail = 0;
1057     QTAILQ_INIT(&cq->req_list);
1058     QTAILQ_INIT(&cq->sq_list);
1059     n->cq[cqid] = cq;
1060     cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
1061 }
1062
1063 static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
1064 {
1065     NvmeCQueue *cq;
1066     NvmeCreateCq *c = (NvmeCreateCq *)&req->cmd;
1067     uint16_t cqid = le16_to_cpu(c->cqid);
1068     uint16_t vector = le16_to_cpu(c->irq_vector);
1069     uint16_t qsize = le16_to_cpu(c->qsize);
1070     uint16_t qflags = le16_to_cpu(c->cq_flags);
1071     uint64_t prp1 = le64_to_cpu(c->prp1);
1072
1073     trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
1074                              NVME_CQ_FLAGS_IEN(qflags) != 0);
1075
1076     if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
1077         trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
1078         return NVME_INVALID_CQID | NVME_DNR;
1079     }
1080     if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
1081         trace_pci_nvme_err_invalid_create_cq_size(qsize);
1082         return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
1083     }
1084     if (unlikely(!prp1)) {
1085         trace_pci_nvme_err_invalid_create_cq_addr(prp1);
1086         return NVME_INVALID_FIELD | NVME_DNR;
1087     }
1088     if (unlikely(!msix_enabled(&n->parent_obj) && vector)) {
1089         trace_pci_nvme_err_invalid_create_cq_vector(vector);
1090         return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
1091     }
1092     if (unlikely(vector >= n->params.msix_qsize)) {
1093         trace_pci_nvme_err_invalid_create_cq_vector(vector);
1094         return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
1095     }
1096     if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
1097         trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
1098         return NVME_INVALID_FIELD | NVME_DNR;
1099     }
1100
1101     cq = g_malloc0(sizeof(*cq));
1102     nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1,
1103         NVME_CQ_FLAGS_IEN(qflags));
1104
1105     /*
1106      * It is only required to set qs_created when creating a completion queue;
1107      * creating a submission queue without a matching completion queue will
1108      * fail.
1109      */
1110     n->qs_created = true;
1111     return NVME_SUCCESS;
1112 }
1113
1114 static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeRequest *req)
1115 {
1116     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1117     uint64_t prp1 = le64_to_cpu(c->prp1);
1118     uint64_t prp2 = le64_to_cpu(c->prp2);
1119
1120     trace_pci_nvme_identify_ctrl();
1121
1122     return nvme_dma_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), prp1,
1123                         prp2, DMA_DIRECTION_FROM_DEVICE, req);
1124 }
1125
1126 static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req)
1127 {
1128     NvmeNamespace *ns;
1129     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1130     uint32_t nsid = le32_to_cpu(c->nsid);
1131     uint64_t prp1 = le64_to_cpu(c->prp1);
1132     uint64_t prp2 = le64_to_cpu(c->prp2);
1133
1134     trace_pci_nvme_identify_ns(nsid);
1135
1136     if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
1137         trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
1138         return NVME_INVALID_NSID | NVME_DNR;
1139     }
1140
1141     ns = &n->namespaces[nsid - 1];
1142
1143     return nvme_dma_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), prp1,
1144                         prp2, DMA_DIRECTION_FROM_DEVICE, req);
1145 }
1146
1147 static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req)
1148 {
1149     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1150     static const int data_len = NVME_IDENTIFY_DATA_SIZE;
1151     uint32_t min_nsid = le32_to_cpu(c->nsid);
1152     uint64_t prp1 = le64_to_cpu(c->prp1);
1153     uint64_t prp2 = le64_to_cpu(c->prp2);
1154     uint32_t *list;
1155     uint16_t ret;
1156     int i, j = 0;
1157
1158     trace_pci_nvme_identify_nslist(min_nsid);
1159
1160     /*
1161      * Both 0xffffffff (NVME_NSID_BROADCAST) and 0xfffffffe are invalid values
1162      * since the Active Namespace ID List should return namespaces with ids
1163      * *higher* than the NSID specified in the command. This is also specified
1164      * in the spec (NVM Express v1.3d, Section 5.15.4).
1165      */
1166     if (min_nsid >= NVME_NSID_BROADCAST - 1) {
1167         return NVME_INVALID_NSID | NVME_DNR;
1168     }
1169
1170     list = g_malloc0(data_len);
1171     for (i = 0; i < n->num_namespaces; i++) {
1172         if (i < min_nsid) {
1173             continue;
1174         }
1175         list[j++] = cpu_to_le32(i + 1);
1176         if (j == data_len / sizeof(uint32_t)) {
1177             break;
1178         }
1179     }
1180     ret = nvme_dma_prp(n, (uint8_t *)list, data_len, prp1, prp2,
1181                        DMA_DIRECTION_FROM_DEVICE, req);
1182     g_free(list);
1183     return ret;
1184 }
1185
1186 static uint16_t nvme_identify_ns_descr_list(NvmeCtrl *n, NvmeRequest *req)
1187 {
1188     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1189     uint32_t nsid = le32_to_cpu(c->nsid);
1190     uint64_t prp1 = le64_to_cpu(c->prp1);
1191     uint64_t prp2 = le64_to_cpu(c->prp2);
1192
1193     uint8_t list[NVME_IDENTIFY_DATA_SIZE];
1194
1195     struct data {
1196         struct {
1197             NvmeIdNsDescr hdr;
1198             uint8_t v[16];
1199         } uuid;
1200     };
1201
1202     struct data *ns_descrs = (struct data *)list;
1203
1204     trace_pci_nvme_identify_ns_descr_list(nsid);
1205
1206     if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
1207         trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
1208         return NVME_INVALID_NSID | NVME_DNR;
1209     }
1210
1211     memset(list, 0x0, sizeof(list));
1212
1213     /*
1214      * Because the NGUID and EUI64 fields are 0 in the Identify Namespace data
1215      * structure, a Namespace UUID (nidt = 0x3) must be reported in the
1216      * Namespace Identification Descriptor. Add a very basic Namespace UUID
1217      * here.
1218      */
1219     ns_descrs->uuid.hdr.nidt = NVME_NIDT_UUID;
1220     ns_descrs->uuid.hdr.nidl = NVME_NIDT_UUID_LEN;
1221     stl_be_p(&ns_descrs->uuid.v, nsid);
1222
1223     return nvme_dma_prp(n, list, NVME_IDENTIFY_DATA_SIZE, prp1, prp2,
1224                         DMA_DIRECTION_FROM_DEVICE, req);
1225 }
1226
1227 static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req)
1228 {
1229     NvmeIdentify *c = (NvmeIdentify *)&req->cmd;
1230
1231     switch (le32_to_cpu(c->cns)) {
1232     case NVME_ID_CNS_NS:
1233         return nvme_identify_ns(n, req);
1234     case NVME_ID_CNS_CTRL:
1235         return nvme_identify_ctrl(n, req);
1236     case NVME_ID_CNS_NS_ACTIVE_LIST:
1237         return nvme_identify_nslist(n, req);
1238     case NVME_ID_CNS_NS_DESCR_LIST:
1239         return nvme_identify_ns_descr_list(n, req);
1240     default:
1241         trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
1242         return NVME_INVALID_FIELD | NVME_DNR;
1243     }
1244 }
1245
1246 static uint16_t nvme_abort(NvmeCtrl *n, NvmeRequest *req)
1247 {
1248     uint16_t sqid = le32_to_cpu(req->cmd.cdw10) & 0xffff;
1249
1250     req->cqe.result = 1;
1251     if (nvme_check_sqid(n, sqid)) {
1252         return NVME_INVALID_FIELD | NVME_DNR;
1253     }
1254
1255     return NVME_SUCCESS;
1256 }
1257
1258 static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
1259 {
1260     trace_pci_nvme_setfeat_timestamp(ts);
1261
1262     n->host_timestamp = le64_to_cpu(ts);
1263     n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1264 }
1265
1266 static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
1267 {
1268     uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
1269     uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms;
1270
1271     union nvme_timestamp {
1272         struct {
1273             uint64_t timestamp:48;
1274             uint64_t sync:1;
1275             uint64_t origin:3;
1276             uint64_t rsvd1:12;
1277         };
1278         uint64_t all;
1279     };
1280
1281     union nvme_timestamp ts;
1282     ts.all = 0;
1283
1284     /*
1285      * If the sum of the Timestamp value set by the host and the elapsed
1286      * time exceeds 2^48, the value returned should be reduced modulo 2^48.
1287      */
1288     ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff;
1289
1290     /* If the host timestamp is non-zero, set the timestamp origin */
1291     ts.origin = n->host_timestamp ? 0x01 : 0x00;
1292
1293     trace_pci_nvme_getfeat_timestamp(ts.all);
1294
1295     return cpu_to_le64(ts.all);
1296 }
1297
1298 static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
1299 {
1300     NvmeCmd *cmd = &req->cmd;
1301     uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
1302     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
1303
1304     uint64_t timestamp = nvme_get_timestamp(n);
1305
1306     return nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
1307                         prp2, DMA_DIRECTION_FROM_DEVICE, req);
1308 }
1309
1310 static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeRequest *req)
1311 {
1312     NvmeCmd *cmd = &req->cmd;
1313     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
1314     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
1315     uint32_t nsid = le32_to_cpu(cmd->nsid);
1316     uint32_t result;
1317     uint8_t fid = NVME_GETSETFEAT_FID(dw10);
1318     NvmeGetFeatureSelect sel = NVME_GETFEAT_SELECT(dw10);
1319     uint16_t iv;
1320
1321     static const uint32_t nvme_feature_default[NVME_FID_MAX] = {
1322         [NVME_ARBITRATION] = NVME_ARB_AB_NOLIMIT,
1323     };
1324
1325     trace_pci_nvme_getfeat(nvme_cid(req), fid, sel, dw11);
1326
1327     if (!nvme_feature_support[fid]) {
1328         return NVME_INVALID_FIELD | NVME_DNR;
1329     }
1330
1331     if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
1332         if (!nsid || nsid > n->num_namespaces) {
1333             /*
1334              * The Reservation Notification Mask and Reservation Persistence
1335              * features require a status code of Invalid Field in Command when
1336              * NSID is 0xFFFFFFFF. Since the device does not support those
1337              * features we can always return Invalid Namespace or Format as we
1338              * should do for all other features.
1339              */
1340             return NVME_INVALID_NSID | NVME_DNR;
1341         }
1342     }
1343
1344     switch (sel) {
1345     case NVME_GETFEAT_SELECT_CURRENT:
1346         break;
1347     case NVME_GETFEAT_SELECT_SAVED:
1348         /* no features are saveable by the controller; fallthrough */
1349     case NVME_GETFEAT_SELECT_DEFAULT:
1350         goto defaults;
1351     case NVME_GETFEAT_SELECT_CAP:
1352         result = nvme_feature_cap[fid];
1353         goto out;
1354     }
1355
1356     switch (fid) {
1357     case NVME_TEMPERATURE_THRESHOLD:
1358         result = 0;
1359
1360         /*
1361          * The controller only implements the Composite Temperature sensor, so
1362          * return 0 for all other sensors.
1363          */
1364         if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
1365             goto out;
1366         }
1367
1368         switch (NVME_TEMP_THSEL(dw11)) {
1369         case NVME_TEMP_THSEL_OVER:
1370             result = n->features.temp_thresh_hi;
1371             goto out;
1372         case NVME_TEMP_THSEL_UNDER:
1373             result = n->features.temp_thresh_low;
1374             goto out;
1375         }
1376
1377         return NVME_INVALID_FIELD | NVME_DNR;
1378     case NVME_VOLATILE_WRITE_CACHE:
1379         result = blk_enable_write_cache(n->conf.blk);
1380         trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
1381         goto out;
1382     case NVME_ASYNCHRONOUS_EVENT_CONF:
1383         result = n->features.async_config;
1384         goto out;
1385     case NVME_TIMESTAMP:
1386         return nvme_get_feature_timestamp(n, req);
1387     default:
1388         break;
1389     }
1390
1391 defaults:
1392     switch (fid) {
1393     case NVME_TEMPERATURE_THRESHOLD:
1394         result = 0;
1395
1396         if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
1397             break;
1398         }
1399
1400         if (NVME_TEMP_THSEL(dw11) == NVME_TEMP_THSEL_OVER) {
1401             result = NVME_TEMPERATURE_WARNING;
1402         }
1403
1404         break;
1405     case NVME_NUMBER_OF_QUEUES:
1406         result = (n->params.max_ioqpairs - 1) |
1407             ((n->params.max_ioqpairs - 1) << 16);
1408         trace_pci_nvme_getfeat_numq(result);
1409         break;
1410     case NVME_INTERRUPT_VECTOR_CONF:
1411         iv = dw11 & 0xffff;
1412         if (iv >= n->params.max_ioqpairs + 1) {
1413             return NVME_INVALID_FIELD | NVME_DNR;
1414         }
1415
1416         result = iv;
1417         if (iv == n->admin_cq.vector) {
1418             result |= NVME_INTVC_NOCOALESCING;
1419         }
1420
1421         break;
1422     default:
1423         result = nvme_feature_default[fid];
1424         break;
1425     }
1426
1427 out:
1428     req->cqe.result = cpu_to_le32(result);
1429     return NVME_SUCCESS;
1430 }
1431
1432 static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeRequest *req)
1433 {
1434     uint16_t ret;
1435     uint64_t timestamp;
1436     NvmeCmd *cmd = &req->cmd;
1437     uint64_t prp1 = le64_to_cpu(cmd->dptr.prp1);
1438     uint64_t prp2 = le64_to_cpu(cmd->dptr.prp2);
1439
1440     ret = nvme_dma_prp(n, (uint8_t *)&timestamp, sizeof(timestamp), prp1,
1441                        prp2, DMA_DIRECTION_TO_DEVICE, req);
1442     if (ret != NVME_SUCCESS) {
1443         return ret;
1444     }
1445
1446     nvme_set_timestamp(n, timestamp);
1447
1448     return NVME_SUCCESS;
1449 }
1450
1451 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
1452 {
1453     NvmeCmd *cmd = &req->cmd;
1454     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
1455     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
1456     uint32_t nsid = le32_to_cpu(cmd->nsid);
1457     uint8_t fid = NVME_GETSETFEAT_FID(dw10);
1458     uint8_t save = NVME_SETFEAT_SAVE(dw10);
1459
1460     trace_pci_nvme_setfeat(nvme_cid(req), fid, save, dw11);
1461
1462     if (save) {
1463         return NVME_FID_NOT_SAVEABLE | NVME_DNR;
1464     }
1465
1466     if (!nvme_feature_support[fid]) {
1467         return NVME_INVALID_FIELD | NVME_DNR;
1468     }
1469
1470     if (nvme_feature_cap[fid] & NVME_FEAT_CAP_NS) {
1471         if (!nsid || (nsid != NVME_NSID_BROADCAST &&
1472                       nsid > n->num_namespaces)) {
1473             return NVME_INVALID_NSID | NVME_DNR;
1474         }
1475     } else if (nsid && nsid != NVME_NSID_BROADCAST) {
1476         if (nsid > n->num_namespaces) {
1477             return NVME_INVALID_NSID | NVME_DNR;
1478         }
1479
1480         return NVME_FEAT_NOT_NS_SPEC | NVME_DNR;
1481     }
1482
1483     if (!(nvme_feature_cap[fid] & NVME_FEAT_CAP_CHANGE)) {
1484         return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
1485     }
1486
1487     switch (fid) {
1488     case NVME_TEMPERATURE_THRESHOLD:
1489         if (NVME_TEMP_TMPSEL(dw11) != NVME_TEMP_TMPSEL_COMPOSITE) {
1490             break;
1491         }
1492
1493         switch (NVME_TEMP_THSEL(dw11)) {
1494         case NVME_TEMP_THSEL_OVER:
1495             n->features.temp_thresh_hi = NVME_TEMP_TMPTH(dw11);
1496             break;
1497         case NVME_TEMP_THSEL_UNDER:
1498             n->features.temp_thresh_low = NVME_TEMP_TMPTH(dw11);
1499             break;
1500         default:
1501             return NVME_INVALID_FIELD | NVME_DNR;
1502         }
1503
1504         if (((n->temperature >= n->features.temp_thresh_hi) ||
1505             (n->temperature <= n->features.temp_thresh_low)) &&
1506             NVME_AEC_SMART(n->features.async_config) & NVME_SMART_TEMPERATURE) {
1507             nvme_enqueue_event(n, NVME_AER_TYPE_SMART,
1508                                NVME_AER_INFO_SMART_TEMP_THRESH,
1509                                NVME_LOG_SMART_INFO);
1510         }
1511
1512         break;
1513     case NVME_VOLATILE_WRITE_CACHE:
1514         if (!(dw11 & 0x1) && blk_enable_write_cache(n->conf.blk)) {
1515             blk_flush(n->conf.blk);
1516         }
1517
1518         blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
1519         break;
1520     case NVME_NUMBER_OF_QUEUES:
1521         if (n->qs_created) {
1522             return NVME_CMD_SEQ_ERROR | NVME_DNR;
1523         }
1524
1525         /*
1526          * NVMe v1.3, Section 5.21.1.7: 0xffff is not an allowed value for NCQR
1527          * and NSQR.
1528          */
1529         if ((dw11 & 0xffff) == 0xffff || ((dw11 >> 16) & 0xffff) == 0xffff) {
1530             return NVME_INVALID_FIELD | NVME_DNR;
1531         }
1532
1533         trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
1534                                     ((dw11 >> 16) & 0xFFFF) + 1,
1535                                     n->params.max_ioqpairs,
1536                                     n->params.max_ioqpairs);
1537         req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
1538                                       ((n->params.max_ioqpairs - 1) << 16));
1539         break;
1540     case NVME_ASYNCHRONOUS_EVENT_CONF:
1541         n->features.async_config = dw11;
1542         break;
1543     case NVME_TIMESTAMP:
1544         return nvme_set_feature_timestamp(n, req);
1545     default:
1546         return NVME_FEAT_NOT_CHANGEABLE | NVME_DNR;
1547     }
1548     return NVME_SUCCESS;
1549 }
1550
1551 static uint16_t nvme_aer(NvmeCtrl *n, NvmeRequest *req)
1552 {
1553     trace_pci_nvme_aer(nvme_cid(req));
1554
1555     if (n->outstanding_aers > n->params.aerl) {
1556         trace_pci_nvme_aer_aerl_exceeded();
1557         return NVME_AER_LIMIT_EXCEEDED;
1558     }
1559
1560     n->aer_reqs[n->outstanding_aers] = req;
1561     n->outstanding_aers++;
1562
1563     if (!QTAILQ_EMPTY(&n->aer_queue)) {
1564         nvme_process_aers(n);
1565     }
1566
1567     return NVME_NO_COMPLETE;
1568 }
1569
1570 static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
1571 {
1572     trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode);
1573
1574     switch (req->cmd.opcode) {
1575     case NVME_ADM_CMD_DELETE_SQ:
1576         return nvme_del_sq(n, req);
1577     case NVME_ADM_CMD_CREATE_SQ:
1578         return nvme_create_sq(n, req);
1579     case NVME_ADM_CMD_GET_LOG_PAGE:
1580         return nvme_get_log(n, req);
1581     case NVME_ADM_CMD_DELETE_CQ:
1582         return nvme_del_cq(n, req);
1583     case NVME_ADM_CMD_CREATE_CQ:
1584         return nvme_create_cq(n, req);
1585     case NVME_ADM_CMD_IDENTIFY:
1586         return nvme_identify(n, req);
1587     case NVME_ADM_CMD_ABORT:
1588         return nvme_abort(n, req);
1589     case NVME_ADM_CMD_SET_FEATURES:
1590         return nvme_set_feature(n, req);
1591     case NVME_ADM_CMD_GET_FEATURES:
1592         return nvme_get_feature(n, req);
1593     case NVME_ADM_CMD_ASYNC_EV_REQ:
1594         return nvme_aer(n, req);
1595     default:
1596         trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
1597         return NVME_INVALID_OPCODE | NVME_DNR;
1598     }
1599 }
1600
1601 static void nvme_process_sq(void *opaque)
1602 {
1603     NvmeSQueue *sq = opaque;
1604     NvmeCtrl *n = sq->ctrl;
1605     NvmeCQueue *cq = n->cq[sq->cqid];
1606
1607     uint16_t status;
1608     hwaddr addr;
1609     NvmeCmd cmd;
1610     NvmeRequest *req;
1611
1612     while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
1613         addr = sq->dma_addr + sq->head * n->sqe_size;
1614         nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
1615         nvme_inc_sq_head(sq);
1616
1617         req = QTAILQ_FIRST(&sq->req_list);
1618         QTAILQ_REMOVE(&sq->req_list, req, entry);
1619         QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry);
1620         nvme_req_clear(req);
1621         req->cqe.cid = cmd.cid;
1622         memcpy(&req->cmd, &cmd, sizeof(NvmeCmd));
1623
1624         status = sq->sqid ? nvme_io_cmd(n, req) :
1625             nvme_admin_cmd(n, req);
1626         if (status != NVME_NO_COMPLETE) {
1627             req->status = status;
1628             nvme_enqueue_req_completion(cq, req);
1629         }
1630     }
1631 }
1632
1633 static void nvme_clear_ctrl(NvmeCtrl *n)
1634 {
1635     int i;
1636
1637     blk_drain(n->conf.blk);
1638
1639     for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
1640         if (n->sq[i] != NULL) {
1641             nvme_free_sq(n->sq[i], n);
1642         }
1643     }
1644     for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
1645         if (n->cq[i] != NULL) {
1646             nvme_free_cq(n->cq[i], n);
1647         }
1648     }
1649
1650     while (!QTAILQ_EMPTY(&n->aer_queue)) {
1651         NvmeAsyncEvent *event = QTAILQ_FIRST(&n->aer_queue);
1652         QTAILQ_REMOVE(&n->aer_queue, event, entry);
1653         g_free(event);
1654     }
1655
1656     n->aer_queued = 0;
1657     n->outstanding_aers = 0;
1658     n->qs_created = false;
1659
1660     blk_flush(n->conf.blk);
1661     n->bar.cc = 0;
1662 }
1663
1664 static int nvme_start_ctrl(NvmeCtrl *n)
1665 {
1666     uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
1667     uint32_t page_size = 1 << page_bits;
1668
1669     if (unlikely(n->cq[0])) {
1670         trace_pci_nvme_err_startfail_cq();
1671         return -1;
1672     }
1673     if (unlikely(n->sq[0])) {
1674         trace_pci_nvme_err_startfail_sq();
1675         return -1;
1676     }
1677     if (unlikely(!n->bar.asq)) {
1678         trace_pci_nvme_err_startfail_nbarasq();
1679         return -1;
1680     }
1681     if (unlikely(!n->bar.acq)) {
1682         trace_pci_nvme_err_startfail_nbaracq();
1683         return -1;
1684     }
1685     if (unlikely(n->bar.asq & (page_size - 1))) {
1686         trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq);
1687         return -1;
1688     }
1689     if (unlikely(n->bar.acq & (page_size - 1))) {
1690         trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq);
1691         return -1;
1692     }
1693     if (unlikely(NVME_CC_MPS(n->bar.cc) <
1694                  NVME_CAP_MPSMIN(n->bar.cap))) {
1695         trace_pci_nvme_err_startfail_page_too_small(
1696                     NVME_CC_MPS(n->bar.cc),
1697                     NVME_CAP_MPSMIN(n->bar.cap));
1698         return -1;
1699     }
1700     if (unlikely(NVME_CC_MPS(n->bar.cc) >
1701                  NVME_CAP_MPSMAX(n->bar.cap))) {
1702         trace_pci_nvme_err_startfail_page_too_large(
1703                     NVME_CC_MPS(n->bar.cc),
1704                     NVME_CAP_MPSMAX(n->bar.cap));
1705         return -1;
1706     }
1707     if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
1708                  NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
1709         trace_pci_nvme_err_startfail_cqent_too_small(
1710                     NVME_CC_IOCQES(n->bar.cc),
1711                     NVME_CTRL_CQES_MIN(n->bar.cap));
1712         return -1;
1713     }
1714     if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
1715                  NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
1716         trace_pci_nvme_err_startfail_cqent_too_large(
1717                     NVME_CC_IOCQES(n->bar.cc),
1718                     NVME_CTRL_CQES_MAX(n->bar.cap));
1719         return -1;
1720     }
1721     if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
1722                  NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
1723         trace_pci_nvme_err_startfail_sqent_too_small(
1724                     NVME_CC_IOSQES(n->bar.cc),
1725                     NVME_CTRL_SQES_MIN(n->bar.cap));
1726         return -1;
1727     }
1728     if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
1729                  NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
1730         trace_pci_nvme_err_startfail_sqent_too_large(
1731                     NVME_CC_IOSQES(n->bar.cc),
1732                     NVME_CTRL_SQES_MAX(n->bar.cap));
1733         return -1;
1734     }
1735     if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
1736         trace_pci_nvme_err_startfail_asqent_sz_zero();
1737         return -1;
1738     }
1739     if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
1740         trace_pci_nvme_err_startfail_acqent_sz_zero();
1741         return -1;
1742     }
1743
1744     n->page_bits = page_bits;
1745     n->page_size = page_size;
1746     n->max_prp_ents = n->page_size / sizeof(uint64_t);
1747     n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc);
1748     n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc);
1749     nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0,
1750         NVME_AQA_ACQS(n->bar.aqa) + 1, 1);
1751     nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0,
1752         NVME_AQA_ASQS(n->bar.aqa) + 1);
1753
1754     nvme_set_timestamp(n, 0ULL);
1755
1756     QTAILQ_INIT(&n->aer_queue);
1757
1758     return 0;
1759 }
1760
1761 static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
1762     unsigned size)
1763 {
1764     if (unlikely(offset & (sizeof(uint32_t) - 1))) {
1765         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
1766                        "MMIO write not 32-bit aligned,"
1767                        " offset=0x%"PRIx64"", offset);
1768         /* should be ignored, fall through for now */
1769     }
1770
1771     if (unlikely(size < sizeof(uint32_t))) {
1772         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall,
1773                        "MMIO write smaller than 32-bits,"
1774                        " offset=0x%"PRIx64", size=%u",
1775                        offset, size);
1776         /* should be ignored, fall through for now */
1777     }
1778
1779     switch (offset) {
1780     case 0xc:   /* INTMS */
1781         if (unlikely(msix_enabled(&(n->parent_obj)))) {
1782             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
1783                            "undefined access to interrupt mask set"
1784                            " when MSI-X is enabled");
1785             /* should be ignored, fall through for now */
1786         }
1787         n->bar.intms |= data & 0xffffffff;
1788         n->bar.intmc = n->bar.intms;
1789         trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc);
1790         nvme_irq_check(n);
1791         break;
1792     case 0x10:  /* INTMC */
1793         if (unlikely(msix_enabled(&(n->parent_obj)))) {
1794             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
1795                            "undefined access to interrupt mask clr"
1796                            " when MSI-X is enabled");
1797             /* should be ignored, fall through for now */
1798         }
1799         n->bar.intms &= ~(data & 0xffffffff);
1800         n->bar.intmc = n->bar.intms;
1801         trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc);
1802         nvme_irq_check(n);
1803         break;
1804     case 0x14:  /* CC */
1805         trace_pci_nvme_mmio_cfg(data & 0xffffffff);
1806         /* Windows first sends data, then sends enable bit */
1807         if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
1808             !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
1809         {
1810             n->bar.cc = data;
1811         }
1812
1813         if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
1814             n->bar.cc = data;
1815             if (unlikely(nvme_start_ctrl(n))) {
1816                 trace_pci_nvme_err_startfail();
1817                 n->bar.csts = NVME_CSTS_FAILED;
1818             } else {
1819                 trace_pci_nvme_mmio_start_success();
1820                 n->bar.csts = NVME_CSTS_READY;
1821             }
1822         } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
1823             trace_pci_nvme_mmio_stopped();
1824             nvme_clear_ctrl(n);
1825             n->bar.csts &= ~NVME_CSTS_READY;
1826         }
1827         if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
1828             trace_pci_nvme_mmio_shutdown_set();
1829             nvme_clear_ctrl(n);
1830             n->bar.cc = data;
1831             n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
1832         } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
1833             trace_pci_nvme_mmio_shutdown_cleared();
1834             n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
1835             n->bar.cc = data;
1836         }
1837         break;
1838     case 0x1C:  /* CSTS */
1839         if (data & (1 << 4)) {
1840             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
1841                            "attempted to W1C CSTS.NSSRO"
1842                            " but CAP.NSSRS is zero (not supported)");
1843         } else if (data != 0) {
1844             NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts,
1845                            "attempted to set a read only bit"
1846                            " of controller status");
1847         }
1848         break;
1849     case 0x20:  /* NSSR */
1850         if (data == 0x4E564D65) {
1851             trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
1852         } else {
1853             /* The spec says that writes of other values have no effect */
1854             return;
1855         }
1856         break;
1857     case 0x24:  /* AQA */
1858         n->bar.aqa = data & 0xffffffff;
1859         trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
1860         break;
1861     case 0x28:  /* ASQ */
1862         n->bar.asq = data;
1863         trace_pci_nvme_mmio_asqaddr(data);
1864         break;
1865     case 0x2c:  /* ASQ hi */
1866         n->bar.asq |= data << 32;
1867         trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq);
1868         break;
1869     case 0x30:  /* ACQ */
1870         trace_pci_nvme_mmio_acqaddr(data);
1871         n->bar.acq = data;
1872         break;
1873     case 0x34:  /* ACQ hi */
1874         n->bar.acq |= data << 32;
1875         trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq);
1876         break;
1877     case 0x38:  /* CMBLOC */
1878         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
1879                        "invalid write to reserved CMBLOC"
1880                        " when CMBSZ is zero, ignored");
1881         return;
1882     case 0x3C:  /* CMBSZ */
1883         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
1884                        "invalid write to read only CMBSZ, ignored");
1885         return;
1886     case 0xE00: /* PMRCAP */
1887         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
1888                        "invalid write to PMRCAP register, ignored");
1889         return;
1890     case 0xE04: /* TODO PMRCTL */
1891         break;
1892     case 0xE08: /* PMRSTS */
1893         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
1894                        "invalid write to PMRSTS register, ignored");
1895         return;
1896     case 0xE0C: /* PMREBS */
1897         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
1898                        "invalid write to PMREBS register, ignored");
1899         return;
1900     case 0xE10: /* PMRSWTP */
1901         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
1902                        "invalid write to PMRSWTP register, ignored");
1903         return;
1904     case 0xE14: /* TODO PMRMSC */
1905          break;
1906     default:
1907         NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
1908                        "invalid MMIO write,"
1909                        " offset=0x%"PRIx64", data=%"PRIx64"",
1910                        offset, data);
1911         break;
1912     }
1913 }
1914
1915 static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
1916 {
1917     NvmeCtrl *n = (NvmeCtrl *)opaque;
1918     uint8_t *ptr = (uint8_t *)&n->bar;
1919     uint64_t val = 0;
1920
1921     trace_pci_nvme_mmio_read(addr);
1922
1923     if (unlikely(addr & (sizeof(uint32_t) - 1))) {
1924         NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
1925                        "MMIO read not 32-bit aligned,"
1926                        " offset=0x%"PRIx64"", addr);
1927         /* should RAZ, fall through for now */
1928     } else if (unlikely(size < sizeof(uint32_t))) {
1929         NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall,
1930                        "MMIO read smaller than 32-bits,"
1931                        " offset=0x%"PRIx64"", addr);
1932         /* should RAZ, fall through for now */
1933     }
1934
1935     if (addr < sizeof(n->bar)) {
1936         /*
1937          * When PMRWBM bit 1 is set then read from
1938          * from PMRSTS should ensure prior writes
1939          * made it to persistent media
1940          */
1941         if (addr == 0xE08 &&
1942             (NVME_PMRCAP_PMRWBM(n->bar.pmrcap) & 0x02)) {
1943             memory_region_msync(&n->pmrdev->mr, 0, n->pmrdev->size);
1944         }
1945         memcpy(&val, ptr + addr, size);
1946     } else {
1947         NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
1948                        "MMIO read beyond last register,"
1949                        " offset=0x%"PRIx64", returning 0", addr);
1950     }
1951
1952     return val;
1953 }
1954
1955 static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
1956 {
1957     uint32_t qid;
1958
1959     if (unlikely(addr & ((1 << 2) - 1))) {
1960         NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
1961                        "doorbell write not 32-bit aligned,"
1962                        " offset=0x%"PRIx64", ignoring", addr);
1963         return;
1964     }
1965
1966     if (((addr - 0x1000) >> 2) & 1) {
1967         /* Completion queue doorbell write */
1968
1969         uint16_t new_head = val & 0xffff;
1970         int start_sqs;
1971         NvmeCQueue *cq;
1972
1973         qid = (addr - (0x1000 + (1 << 2))) >> 3;
1974         if (unlikely(nvme_check_cqid(n, qid))) {
1975             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
1976                            "completion queue doorbell write"
1977                            " for nonexistent queue,"
1978                            " sqid=%"PRIu32", ignoring", qid);
1979
1980             /*
1981              * NVM Express v1.3d, Section 4.1 state: "If host software writes
1982              * an invalid value to the Submission Queue Tail Doorbell or
1983              * Completion Queue Head Doorbell regiter and an Asynchronous Event
1984              * Request command is outstanding, then an asynchronous event is
1985              * posted to the Admin Completion Queue with a status code of
1986              * Invalid Doorbell Write Value."
1987              *
1988              * Also note that the spec includes the "Invalid Doorbell Register"
1989              * status code, but nowhere does it specify when to use it.
1990              * However, it seems reasonable to use it here in a similar
1991              * fashion.
1992              */
1993             if (n->outstanding_aers) {
1994                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
1995                                    NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
1996                                    NVME_LOG_ERROR_INFO);
1997             }
1998
1999             return;
2000         }
2001
2002         cq = n->cq[qid];
2003         if (unlikely(new_head >= cq->size)) {
2004             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead,
2005                            "completion queue doorbell write value"
2006                            " beyond queue size, sqid=%"PRIu32","
2007                            " new_head=%"PRIu16", ignoring",
2008                            qid, new_head);
2009
2010             if (n->outstanding_aers) {
2011                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2012                                    NVME_AER_INFO_ERR_INVALID_DB_VALUE,
2013                                    NVME_LOG_ERROR_INFO);
2014             }
2015
2016             return;
2017         }
2018
2019         trace_pci_nvme_mmio_doorbell_cq(cq->cqid, new_head);
2020
2021         start_sqs = nvme_cq_full(cq) ? 1 : 0;
2022         cq->head = new_head;
2023         if (start_sqs) {
2024             NvmeSQueue *sq;
2025             QTAILQ_FOREACH(sq, &cq->sq_list, entry) {
2026                 timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
2027             }
2028             timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
2029         }
2030
2031         if (cq->tail == cq->head) {
2032             nvme_irq_deassert(n, cq);
2033         }
2034     } else {
2035         /* Submission queue doorbell write */
2036
2037         uint16_t new_tail = val & 0xffff;
2038         NvmeSQueue *sq;
2039
2040         qid = (addr - 0x1000) >> 3;
2041         if (unlikely(nvme_check_sqid(n, qid))) {
2042             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
2043                            "submission queue doorbell write"
2044                            " for nonexistent queue,"
2045                            " sqid=%"PRIu32", ignoring", qid);
2046
2047             if (n->outstanding_aers) {
2048                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2049                                    NVME_AER_INFO_ERR_INVALID_DB_REGISTER,
2050                                    NVME_LOG_ERROR_INFO);
2051             }
2052
2053             return;
2054         }
2055
2056         sq = n->sq[qid];
2057         if (unlikely(new_tail >= sq->size)) {
2058             NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail,
2059                            "submission queue doorbell write value"
2060                            " beyond queue size, sqid=%"PRIu32","
2061                            " new_tail=%"PRIu16", ignoring",
2062                            qid, new_tail);
2063
2064             if (n->outstanding_aers) {
2065                 nvme_enqueue_event(n, NVME_AER_TYPE_ERROR,
2066                                    NVME_AER_INFO_ERR_INVALID_DB_VALUE,
2067                                    NVME_LOG_ERROR_INFO);
2068             }
2069
2070             return;
2071         }
2072
2073         trace_pci_nvme_mmio_doorbell_sq(sq->sqid, new_tail);
2074
2075         sq->tail = new_tail;
2076         timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500);
2077     }
2078 }
2079
2080 static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data,
2081     unsigned size)
2082 {
2083     NvmeCtrl *n = (NvmeCtrl *)opaque;
2084
2085     trace_pci_nvme_mmio_write(addr, data);
2086
2087     if (addr < sizeof(n->bar)) {
2088         nvme_write_bar(n, addr, data, size);
2089     } else {
2090         nvme_process_db(n, addr, data);
2091     }
2092 }
2093
2094 static const MemoryRegionOps nvme_mmio_ops = {
2095     .read = nvme_mmio_read,
2096     .write = nvme_mmio_write,
2097     .endianness = DEVICE_LITTLE_ENDIAN,
2098     .impl = {
2099         .min_access_size = 2,
2100         .max_access_size = 8,
2101     },
2102 };
2103
2104 static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data,
2105     unsigned size)
2106 {
2107     NvmeCtrl *n = (NvmeCtrl *)opaque;
2108     stn_le_p(&n->cmbuf[addr], size, data);
2109 }
2110
2111 static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size)
2112 {
2113     NvmeCtrl *n = (NvmeCtrl *)opaque;
2114     return ldn_le_p(&n->cmbuf[addr], size);
2115 }
2116
2117 static const MemoryRegionOps nvme_cmb_ops = {
2118     .read = nvme_cmb_read,
2119     .write = nvme_cmb_write,
2120     .endianness = DEVICE_LITTLE_ENDIAN,
2121     .impl = {
2122         .min_access_size = 1,
2123         .max_access_size = 8,
2124     },
2125 };
2126
2127 static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
2128 {
2129     NvmeParams *params = &n->params;
2130
2131     if (params->num_queues) {
2132         warn_report("num_queues is deprecated; please use max_ioqpairs "
2133                     "instead");
2134
2135         params->max_ioqpairs = params->num_queues - 1;
2136     }
2137
2138     if (params->max_ioqpairs < 1 ||
2139         params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
2140         error_setg(errp, "max_ioqpairs must be between 1 and %d",
2141                    NVME_MAX_IOQPAIRS);
2142         return;
2143     }
2144
2145     if (params->msix_qsize < 1 ||
2146         params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
2147         error_setg(errp, "msix_qsize must be between 1 and %d",
2148                    PCI_MSIX_FLAGS_QSIZE + 1);
2149         return;
2150     }
2151
2152     if (!n->conf.blk) {
2153         error_setg(errp, "drive property not set");
2154         return;
2155     }
2156
2157     if (!params->serial) {
2158         error_setg(errp, "serial property not set");
2159         return;
2160     }
2161
2162     if (!n->params.cmb_size_mb && n->pmrdev) {
2163         if (host_memory_backend_is_mapped(n->pmrdev)) {
2164             error_setg(errp, "can't use already busy memdev: %s",
2165                        object_get_canonical_path_component(OBJECT(n->pmrdev)));
2166             return;
2167         }
2168
2169         if (!is_power_of_2(n->pmrdev->size)) {
2170             error_setg(errp, "pmr backend size needs to be power of 2 in size");
2171             return;
2172         }
2173
2174         host_memory_backend_set_mapped(n->pmrdev, true);
2175     }
2176 }
2177
2178 static void nvme_init_state(NvmeCtrl *n)
2179 {
2180     n->num_namespaces = 1;
2181     /* add one to max_ioqpairs to account for the admin queue pair */
2182     n->reg_size = pow2ceil(sizeof(NvmeBar) +
2183                            2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
2184     n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
2185     n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
2186     n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
2187     n->temperature = NVME_TEMPERATURE;
2188     n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
2189     n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
2190     n->aer_reqs = g_new0(NvmeRequest *, n->params.aerl + 1);
2191 }
2192
2193 static void nvme_init_blk(NvmeCtrl *n, Error **errp)
2194 {
2195     if (!blkconf_blocksizes(&n->conf, errp)) {
2196         return;
2197     }
2198     blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
2199                                   false, errp);
2200 }
2201
2202 static void nvme_init_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
2203 {
2204     int64_t bs_size;
2205     NvmeIdNs *id_ns = &ns->id_ns;
2206
2207     bs_size = blk_getlength(n->conf.blk);
2208     if (bs_size < 0) {
2209         error_setg_errno(errp, -bs_size, "could not get backing file size");
2210         return;
2211     }
2212
2213     n->ns_size = bs_size;
2214
2215     id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
2216     id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(n, ns));
2217
2218     /* no thin provisioning */
2219     id_ns->ncap = id_ns->nsze;
2220     id_ns->nuse = id_ns->ncap;
2221 }
2222
2223 static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
2224 {
2225     NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
2226     NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
2227
2228     NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
2229     NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
2230     NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 1);
2231     NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
2232     NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
2233     NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
2234     NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
2235
2236     n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
2237     memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
2238                           "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
2239     pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
2240                      PCI_BASE_ADDRESS_SPACE_MEMORY |
2241                      PCI_BASE_ADDRESS_MEM_TYPE_64 |
2242                      PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
2243 }
2244
2245 static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
2246 {
2247     /* Controller Capabilities register */
2248     NVME_CAP_SET_PMRS(n->bar.cap, 1);
2249
2250     /* PMR Capabities register */
2251     n->bar.pmrcap = 0;
2252     NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 0);
2253     NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 0);
2254     NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR);
2255     NVME_PMRCAP_SET_PMRTU(n->bar.pmrcap, 0);
2256     /* Turn on bit 1 support */
2257     NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
2258     NVME_PMRCAP_SET_PMRTO(n->bar.pmrcap, 0);
2259     NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 0);
2260
2261     /* PMR Control register */
2262     n->bar.pmrctl = 0;
2263     NVME_PMRCTL_SET_EN(n->bar.pmrctl, 0);
2264
2265     /* PMR Status register */
2266     n->bar.pmrsts = 0;
2267     NVME_PMRSTS_SET_ERR(n->bar.pmrsts, 0);
2268     NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 0);
2269     NVME_PMRSTS_SET_HSTS(n->bar.pmrsts, 0);
2270     NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 0);
2271
2272     /* PMR Elasticity Buffer Size register */
2273     n->bar.pmrebs = 0;
2274     NVME_PMREBS_SET_PMRSZU(n->bar.pmrebs, 0);
2275     NVME_PMREBS_SET_RBB(n->bar.pmrebs, 0);
2276     NVME_PMREBS_SET_PMRWBZ(n->bar.pmrebs, 0);
2277
2278     /* PMR Sustained Write Throughput register */
2279     n->bar.pmrswtp = 0;
2280     NVME_PMRSWTP_SET_PMRSWTU(n->bar.pmrswtp, 0);
2281     NVME_PMRSWTP_SET_PMRSWTV(n->bar.pmrswtp, 0);
2282
2283     /* PMR Memory Space Control register */
2284     n->bar.pmrmsc = 0;
2285     NVME_PMRMSC_SET_CMSE(n->bar.pmrmsc, 0);
2286     NVME_PMRMSC_SET_CBA(n->bar.pmrmsc, 0);
2287
2288     pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
2289                      PCI_BASE_ADDRESS_SPACE_MEMORY |
2290                      PCI_BASE_ADDRESS_MEM_TYPE_64 |
2291                      PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmrdev->mr);
2292 }
2293
2294 static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
2295 {
2296     uint8_t *pci_conf = pci_dev->config;
2297
2298     pci_conf[PCI_INTERRUPT_PIN] = 1;
2299     pci_config_set_prog_interface(pci_conf, 0x2);
2300     pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS);
2301     pcie_endpoint_cap_init(pci_dev, 0x80);
2302
2303     memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
2304                           n->reg_size);
2305     pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
2306                      PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
2307     if (msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp)) {
2308         return;
2309     }
2310
2311     if (n->params.cmb_size_mb) {
2312         nvme_init_cmb(n, pci_dev);
2313     } else if (n->pmrdev) {
2314         nvme_init_pmr(n, pci_dev);
2315     }
2316 }
2317
2318 static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
2319 {
2320     NvmeIdCtrl *id = &n->id_ctrl;
2321     uint8_t *pci_conf = pci_dev->config;
2322     char *subnqn;
2323
2324     id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
2325     id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
2326     strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
2327     strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
2328     strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
2329     id->rab = 6;
2330     id->ieee[0] = 0x00;
2331     id->ieee[1] = 0x02;
2332     id->ieee[2] = 0xb3;
2333     id->mdts = n->params.mdts;
2334     id->ver = cpu_to_le32(NVME_SPEC_VER);
2335     id->oacs = cpu_to_le16(0);
2336
2337     /*
2338      * Because the controller always completes the Abort command immediately,
2339      * there can never be more than one concurrently executing Abort command,
2340      * so this value is never used for anything. Note that there can easily be
2341      * many Abort commands in the queues, but they are not considered
2342      * "executing" until processed by nvme_abort.
2343      *
2344      * The specification recommends a value of 3 for Abort Command Limit (four
2345      * concurrently outstanding Abort commands), so lets use that though it is
2346      * inconsequential.
2347      */
2348     id->acl = 3;
2349     id->aerl = n->params.aerl;
2350     id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO;
2351     id->lpa = NVME_LPA_EXTENDED;
2352
2353     /* recommended default value (~70 C) */
2354     id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING);
2355     id->cctemp = cpu_to_le16(NVME_TEMPERATURE_CRITICAL);
2356
2357     id->sqes = (0x6 << 4) | 0x6;
2358     id->cqes = (0x4 << 4) | 0x4;
2359     id->nn = cpu_to_le32(n->num_namespaces);
2360     id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROES | NVME_ONCS_TIMESTAMP |
2361                            NVME_ONCS_FEATURES);
2362
2363     subnqn = g_strdup_printf("nqn.2019-08.org.qemu:%s", n->params.serial);
2364     strpadcpy((char *)id->subnqn, sizeof(id->subnqn), subnqn, '\0');
2365     g_free(subnqn);
2366
2367     id->psd[0].mp = cpu_to_le16(0x9c4);
2368     id->psd[0].enlat = cpu_to_le32(0x10);
2369     id->psd[0].exlat = cpu_to_le32(0x4);
2370     if (blk_enable_write_cache(n->conf.blk)) {
2371         id->vwc = 1;
2372     }
2373
2374     n->bar.cap = 0;
2375     NVME_CAP_SET_MQES(n->bar.cap, 0x7ff);
2376     NVME_CAP_SET_CQR(n->bar.cap, 1);
2377     NVME_CAP_SET_TO(n->bar.cap, 0xf);
2378     NVME_CAP_SET_CSS(n->bar.cap, 1);
2379     NVME_CAP_SET_MPSMAX(n->bar.cap, 4);
2380
2381     n->bar.vs = NVME_SPEC_VER;
2382     n->bar.intmc = n->bar.intms = 0;
2383 }
2384
2385 static void nvme_realize(PCIDevice *pci_dev, Error **errp)
2386 {
2387     NvmeCtrl *n = NVME(pci_dev);
2388     Error *local_err = NULL;
2389
2390     int i;
2391
2392     nvme_check_constraints(n, &local_err);
2393     if (local_err) {
2394         error_propagate(errp, local_err);
2395         return;
2396     }
2397
2398     nvme_init_state(n);
2399     nvme_init_blk(n, &local_err);
2400     if (local_err) {
2401         error_propagate(errp, local_err);
2402         return;
2403     }
2404
2405     nvme_init_pci(n, pci_dev, &local_err);
2406     if (local_err) {
2407         error_propagate(errp, local_err);
2408         return;
2409     }
2410
2411     nvme_init_ctrl(n, pci_dev);
2412
2413     for (i = 0; i < n->num_namespaces; i++) {
2414         nvme_init_namespace(n, &n->namespaces[i], &local_err);
2415         if (local_err) {
2416             error_propagate(errp, local_err);
2417             return;
2418         }
2419     }
2420 }
2421
2422 static void nvme_exit(PCIDevice *pci_dev)
2423 {
2424     NvmeCtrl *n = NVME(pci_dev);
2425
2426     nvme_clear_ctrl(n);
2427     g_free(n->namespaces);
2428     g_free(n->cq);
2429     g_free(n->sq);
2430     g_free(n->aer_reqs);
2431
2432     if (n->params.cmb_size_mb) {
2433         g_free(n->cmbuf);
2434     }
2435
2436     if (n->pmrdev) {
2437         host_memory_backend_set_mapped(n->pmrdev, false);
2438     }
2439     msix_uninit_exclusive_bar(pci_dev);
2440 }
2441
2442 static Property nvme_props[] = {
2443     DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf),
2444     DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmrdev, TYPE_MEMORY_BACKEND,
2445                      HostMemoryBackend *),
2446     DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
2447     DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
2448     DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
2449     DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
2450     DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
2451     DEFINE_PROP_UINT8("aerl", NvmeCtrl, params.aerl, 3),
2452     DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
2453     DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
2454     DEFINE_PROP_END_OF_LIST(),
2455 };
2456
2457 static const VMStateDescription nvme_vmstate = {
2458     .name = "nvme",
2459     .unmigratable = 1,
2460 };
2461
2462 static void nvme_class_init(ObjectClass *oc, void *data)
2463 {
2464     DeviceClass *dc = DEVICE_CLASS(oc);
2465     PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
2466
2467     pc->realize = nvme_realize;
2468     pc->exit = nvme_exit;
2469     pc->class_id = PCI_CLASS_STORAGE_EXPRESS;
2470     pc->vendor_id = PCI_VENDOR_ID_INTEL;
2471     pc->device_id = 0x5845;
2472     pc->revision = 2;
2473
2474     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
2475     dc->desc = "Non-Volatile Memory Express";
2476     device_class_set_props(dc, nvme_props);
2477     dc->vmsd = &nvme_vmstate;
2478 }
2479
2480 static void nvme_instance_init(Object *obj)
2481 {
2482     NvmeCtrl *s = NVME(obj);
2483
2484     device_add_bootindex_property(obj, &s->conf.bootindex,
2485                                   "bootindex", "/namespace@1,0",
2486                                   DEVICE(obj));
2487 }
2488
2489 static const TypeInfo nvme_info = {
2490     .name          = TYPE_NVME,
2491     .parent        = TYPE_PCI_DEVICE,
2492     .instance_size = sizeof(NvmeCtrl),
2493     .class_init    = nvme_class_init,
2494     .instance_init = nvme_instance_init,
2495     .interfaces = (InterfaceInfo[]) {
2496         { INTERFACE_PCIE_DEVICE },
2497         { }
2498     },
2499 };
2500
2501 static void nvme_register_types(void)
2502 {
2503     type_register_static(&nvme_info);
2504 }
2505
2506 type_init(nvme_register_types)
This page took 0.156646 seconds and 4 git commands to generate.