]>
Commit | Line | Data |
---|---|---|
f3c507ad KB |
1 | /* |
2 | * QEMU NVM Express Controller | |
3 | * | |
4 | * Copyright (c) 2012, Intel Corporation | |
5 | * | |
6 | * Written by Keith Busch <[email protected]> | |
7 | * | |
8 | * This code is licensed under the GNU GPL v2 or later. | |
9 | */ | |
10 | ||
11 | /** | |
a896f7f2 | 12 | * Reference Specs: http://www.nvmexpress.org, 1.2, 1.1, 1.0e |
f3c507ad KB |
13 | * |
14 | * http://www.nvmexpress.org/resources/ | |
15 | */ | |
16 | ||
17 | /** | |
18 | * Usage: add options: | |
19 | * -drive file=<file>,if=none,id=<drive_id> | |
a896f7f2 | 20 | * -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \ |
7c895269 WZ |
21 | * cmb_size_mb=<cmb_size_mb[optional]>, \ |
22 | * num_queues=<N[optional]> | |
a896f7f2 SB |
23 | * |
24 | * Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at | |
b2b2b67a | 25 | * offset 0 in BAR2 and supports only WDS, RDS and SQS for now. |
f3c507ad KB |
26 | */ |
27 | ||
80c71a24 | 28 | #include "qemu/osdep.h" |
e8400cf3 | 29 | #include "qemu/units.h" |
a9c94277 | 30 | #include "hw/block/block.h" |
a9c94277 MA |
31 | #include "hw/pci/msix.h" |
32 | #include "hw/pci/pci.h" | |
a27bd6c7 | 33 | #include "hw/qdev-properties.h" |
d6454270 | 34 | #include "migration/vmstate.h" |
33739c71 | 35 | #include "sysemu/sysemu.h" |
da34e65c | 36 | #include "qapi/error.h" |
33739c71 | 37 | #include "qapi/visitor.h" |
4be74634 | 38 | #include "sysemu/block-backend.h" |
f3c507ad | 39 | |
1ee24514 | 40 | #include "qemu/log.h" |
0b8fa32f | 41 | #include "qemu/module.h" |
6b39bad0 | 42 | #include "qemu/cutils.h" |
1ee24514 | 43 | #include "trace.h" |
f3c507ad KB |
44 | #include "nvme.h" |
45 | ||
1ee24514 DG |
46 | #define NVME_GUEST_ERR(trace, fmt, ...) \ |
47 | do { \ | |
48 | (trace_##trace)(__VA_ARGS__); \ | |
49 | qemu_log_mask(LOG_GUEST_ERROR, #trace \ | |
50 | " in %s: " fmt "\n", __func__, ## __VA_ARGS__); \ | |
51 | } while (0) | |
52 | ||
f3c507ad KB |
53 | static void nvme_process_sq(void *opaque); |
54 | ||
a896f7f2 SB |
55 | static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) |
56 | { | |
57 | if (n->cmbsz && addr >= n->ctrl_mem.addr && | |
58 | addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) { | |
59 | memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size); | |
60 | } else { | |
61 | pci_dma_read(&n->parent_obj, addr, buf, size); | |
62 | } | |
63 | } | |
64 | ||
f3c507ad KB |
65 | static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) |
66 | { | |
67 | return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1; | |
68 | } | |
69 | ||
70 | static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid) | |
71 | { | |
72 | return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1; | |
73 | } | |
74 | ||
75 | static void nvme_inc_cq_tail(NvmeCQueue *cq) | |
76 | { | |
77 | cq->tail++; | |
78 | if (cq->tail >= cq->size) { | |
79 | cq->tail = 0; | |
80 | cq->phase = !cq->phase; | |
81 | } | |
82 | } | |
83 | ||
84 | static void nvme_inc_sq_head(NvmeSQueue *sq) | |
85 | { | |
86 | sq->head = (sq->head + 1) % sq->size; | |
87 | } | |
88 | ||
89 | static uint8_t nvme_cq_full(NvmeCQueue *cq) | |
90 | { | |
91 | return (cq->tail + 1) % cq->size == cq->head; | |
92 | } | |
93 | ||
94 | static uint8_t nvme_sq_empty(NvmeSQueue *sq) | |
95 | { | |
96 | return sq->head == sq->tail; | |
97 | } | |
98 | ||
5e9aa92e HN |
99 | static void nvme_irq_check(NvmeCtrl *n) |
100 | { | |
101 | if (msix_enabled(&(n->parent_obj))) { | |
102 | return; | |
103 | } | |
104 | if (~n->bar.intms & n->irq_status) { | |
105 | pci_irq_assert(&n->parent_obj); | |
106 | } else { | |
107 | pci_irq_deassert(&n->parent_obj); | |
108 | } | |
109 | } | |
110 | ||
111 | static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq) | |
f3c507ad KB |
112 | { |
113 | if (cq->irq_enabled) { | |
114 | if (msix_enabled(&(n->parent_obj))) { | |
1ee24514 | 115 | trace_nvme_irq_msix(cq->vector); |
f3c507ad KB |
116 | msix_notify(&(n->parent_obj), cq->vector); |
117 | } else { | |
1ee24514 | 118 | trace_nvme_irq_pin(); |
5e9aa92e HN |
119 | assert(cq->cqid < 64); |
120 | n->irq_status |= 1 << cq->cqid; | |
121 | nvme_irq_check(n); | |
f3c507ad | 122 | } |
1ee24514 DG |
123 | } else { |
124 | trace_nvme_irq_masked(); | |
f3c507ad KB |
125 | } |
126 | } | |
127 | ||
5e9aa92e HN |
128 | static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq) |
129 | { | |
130 | if (cq->irq_enabled) { | |
131 | if (msix_enabled(&(n->parent_obj))) { | |
132 | return; | |
133 | } else { | |
134 | assert(cq->cqid < 64); | |
135 | n->irq_status &= ~(1 << cq->cqid); | |
136 | nvme_irq_check(n); | |
137 | } | |
138 | } | |
139 | } | |
140 | ||
b2b2b67a SB |
141 | static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1, |
142 | uint64_t prp2, uint32_t len, NvmeCtrl *n) | |
f3c507ad KB |
143 | { |
144 | hwaddr trans_len = n->page_size - (prp1 % n->page_size); | |
145 | trans_len = MIN(len, trans_len); | |
146 | int num_prps = (len >> n->page_bits) + 1; | |
147 | ||
1ee24514 DG |
148 | if (unlikely(!prp1)) { |
149 | trace_nvme_err_invalid_prp(); | |
f3c507ad | 150 | return NVME_INVALID_FIELD | NVME_DNR; |
b2b2b67a SB |
151 | } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr && |
152 | prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) { | |
153 | qsg->nsg = 0; | |
154 | qemu_iovec_init(iov, num_prps); | |
155 | qemu_iovec_add(iov, (void *)&n->cmbuf[prp1 - n->ctrl_mem.addr], trans_len); | |
156 | } else { | |
157 | pci_dma_sglist_init(qsg, &n->parent_obj, num_prps); | |
158 | qemu_sglist_add(qsg, prp1, trans_len); | |
f3c507ad | 159 | } |
f3c507ad KB |
160 | len -= trans_len; |
161 | if (len) { | |
1ee24514 DG |
162 | if (unlikely(!prp2)) { |
163 | trace_nvme_err_invalid_prp2_missing(); | |
f3c507ad KB |
164 | goto unmap; |
165 | } | |
166 | if (len > n->page_size) { | |
167 | uint64_t prp_list[n->max_prp_ents]; | |
168 | uint32_t nents, prp_trans; | |
169 | int i = 0; | |
170 | ||
171 | nents = (len + n->page_size - 1) >> n->page_bits; | |
172 | prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); | |
b2b2b67a | 173 | nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); |
f3c507ad KB |
174 | while (len != 0) { |
175 | uint64_t prp_ent = le64_to_cpu(prp_list[i]); | |
176 | ||
177 | if (i == n->max_prp_ents - 1 && len > n->page_size) { | |
1ee24514 DG |
178 | if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { |
179 | trace_nvme_err_invalid_prplist_ent(prp_ent); | |
f3c507ad KB |
180 | goto unmap; |
181 | } | |
182 | ||
183 | i = 0; | |
184 | nents = (len + n->page_size - 1) >> n->page_bits; | |
185 | prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); | |
b2b2b67a | 186 | nvme_addr_read(n, prp_ent, (void *)prp_list, |
f3c507ad KB |
187 | prp_trans); |
188 | prp_ent = le64_to_cpu(prp_list[i]); | |
189 | } | |
190 | ||
1ee24514 DG |
191 | if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) { |
192 | trace_nvme_err_invalid_prplist_ent(prp_ent); | |
f3c507ad KB |
193 | goto unmap; |
194 | } | |
195 | ||
196 | trans_len = MIN(len, n->page_size); | |
b2b2b67a SB |
197 | if (qsg->nsg){ |
198 | qemu_sglist_add(qsg, prp_ent, trans_len); | |
199 | } else { | |
200 | qemu_iovec_add(iov, (void *)&n->cmbuf[prp_ent - n->ctrl_mem.addr], trans_len); | |
201 | } | |
f3c507ad KB |
202 | len -= trans_len; |
203 | i++; | |
204 | } | |
205 | } else { | |
1ee24514 DG |
206 | if (unlikely(prp2 & (n->page_size - 1))) { |
207 | trace_nvme_err_invalid_prp2_align(prp2); | |
f3c507ad KB |
208 | goto unmap; |
209 | } | |
b2b2b67a SB |
210 | if (qsg->nsg) { |
211 | qemu_sglist_add(qsg, prp2, len); | |
212 | } else { | |
213 | qemu_iovec_add(iov, (void *)&n->cmbuf[prp2 - n->ctrl_mem.addr], trans_len); | |
214 | } | |
f3c507ad KB |
215 | } |
216 | } | |
217 | return NVME_SUCCESS; | |
218 | ||
219 | unmap: | |
220 | qemu_sglist_destroy(qsg); | |
221 | return NVME_INVALID_FIELD | NVME_DNR; | |
222 | } | |
223 | ||
3036a626 KH |
224 | static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, |
225 | uint64_t prp1, uint64_t prp2) | |
226 | { | |
227 | QEMUSGList qsg; | |
228 | QEMUIOVector iov; | |
229 | uint16_t status = NVME_SUCCESS; | |
230 | ||
231 | if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { | |
232 | return NVME_INVALID_FIELD | NVME_DNR; | |
233 | } | |
234 | if (qsg.nsg > 0) { | |
235 | if (dma_buf_write(ptr, len, &qsg)) { | |
236 | status = NVME_INVALID_FIELD | NVME_DNR; | |
237 | } | |
238 | qemu_sglist_destroy(&qsg); | |
239 | } else { | |
240 | if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) { | |
241 | status = NVME_INVALID_FIELD | NVME_DNR; | |
242 | } | |
243 | qemu_iovec_destroy(&iov); | |
244 | } | |
245 | return status; | |
246 | } | |
247 | ||
f3c507ad KB |
248 | static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, |
249 | uint64_t prp1, uint64_t prp2) | |
250 | { | |
251 | QEMUSGList qsg; | |
b2b2b67a SB |
252 | QEMUIOVector iov; |
253 | uint16_t status = NVME_SUCCESS; | |
f3c507ad | 254 | |
1ee24514 DG |
255 | trace_nvme_dma_read(prp1, prp2); |
256 | ||
b2b2b67a | 257 | if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { |
f3c507ad KB |
258 | return NVME_INVALID_FIELD | NVME_DNR; |
259 | } | |
b2b2b67a | 260 | if (qsg.nsg > 0) { |
1ee24514 DG |
261 | if (unlikely(dma_buf_read(ptr, len, &qsg))) { |
262 | trace_nvme_err_invalid_dma(); | |
b2b2b67a SB |
263 | status = NVME_INVALID_FIELD | NVME_DNR; |
264 | } | |
f3c507ad | 265 | qemu_sglist_destroy(&qsg); |
b2b2b67a | 266 | } else { |
25349e82 | 267 | if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) { |
1ee24514 | 268 | trace_nvme_err_invalid_dma(); |
b2b2b67a SB |
269 | status = NVME_INVALID_FIELD | NVME_DNR; |
270 | } | |
271 | qemu_iovec_destroy(&iov); | |
f3c507ad | 272 | } |
b2b2b67a | 273 | return status; |
f3c507ad KB |
274 | } |
275 | ||
276 | static void nvme_post_cqes(void *opaque) | |
277 | { | |
278 | NvmeCQueue *cq = opaque; | |
279 | NvmeCtrl *n = cq->ctrl; | |
280 | NvmeRequest *req, *next; | |
281 | ||
282 | QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { | |
283 | NvmeSQueue *sq; | |
284 | hwaddr addr; | |
285 | ||
286 | if (nvme_cq_full(cq)) { | |
287 | break; | |
288 | } | |
289 | ||
290 | QTAILQ_REMOVE(&cq->req_list, req, entry); | |
291 | sq = req->sq; | |
292 | req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); | |
293 | req->cqe.sq_id = cpu_to_le16(sq->sqid); | |
294 | req->cqe.sq_head = cpu_to_le16(sq->head); | |
295 | addr = cq->dma_addr + cq->tail * n->cqe_size; | |
296 | nvme_inc_cq_tail(cq); | |
297 | pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe, | |
298 | sizeof(req->cqe)); | |
299 | QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); | |
300 | } | |
6da02181 KB |
301 | if (cq->tail != cq->head) { |
302 | nvme_irq_assert(n, cq); | |
303 | } | |
f3c507ad KB |
304 | } |
305 | ||
306 | static void nvme_enqueue_req_completion(NvmeCQueue *cq, NvmeRequest *req) | |
307 | { | |
308 | assert(cq->cqid == req->sq->cqid); | |
309 | QTAILQ_REMOVE(&req->sq->out_req_list, req, entry); | |
310 | QTAILQ_INSERT_TAIL(&cq->req_list, req, entry); | |
bc72ad67 | 311 | timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
f3c507ad KB |
312 | } |
313 | ||
314 | static void nvme_rw_cb(void *opaque, int ret) | |
315 | { | |
316 | NvmeRequest *req = opaque; | |
317 | NvmeSQueue *sq = req->sq; | |
318 | NvmeCtrl *n = sq->ctrl; | |
319 | NvmeCQueue *cq = n->cq[sq->cqid]; | |
320 | ||
f3c507ad | 321 | if (!ret) { |
1753f3dc | 322 | block_acct_done(blk_get_stats(n->conf.blk), &req->acct); |
f3c507ad KB |
323 | req->status = NVME_SUCCESS; |
324 | } else { | |
1753f3dc | 325 | block_acct_failed(blk_get_stats(n->conf.blk), &req->acct); |
f3c507ad KB |
326 | req->status = NVME_INTERNAL_DEV_ERROR; |
327 | } | |
8b9d74e0 CH |
328 | if (req->has_sg) { |
329 | qemu_sglist_destroy(&req->qsg); | |
330 | } | |
f3c507ad KB |
331 | nvme_enqueue_req_completion(cq, req); |
332 | } | |
333 | ||
8b9d74e0 CH |
334 | static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, |
335 | NvmeRequest *req) | |
336 | { | |
337 | req->has_sg = false; | |
338 | block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, | |
339 | BLOCK_ACCT_FLUSH); | |
340 | req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); | |
341 | ||
342 | return NVME_NO_COMPLETE; | |
343 | } | |
344 | ||
c03e7ef1 CH |
345 | static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, |
346 | NvmeRequest *req) | |
347 | { | |
348 | NvmeRwCmd *rw = (NvmeRwCmd *)cmd; | |
349 | const uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); | |
350 | const uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; | |
351 | uint64_t slba = le64_to_cpu(rw->slba); | |
352 | uint32_t nlb = le16_to_cpu(rw->nlb) + 1; | |
9d6459d2 KB |
353 | uint64_t offset = slba << data_shift; |
354 | uint32_t count = nlb << data_shift; | |
c03e7ef1 | 355 | |
1ee24514 DG |
356 | if (unlikely(slba + nlb > ns->id_ns.nsze)) { |
357 | trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); | |
c03e7ef1 CH |
358 | return NVME_LBA_RANGE | NVME_DNR; |
359 | } | |
360 | ||
361 | req->has_sg = false; | |
362 | block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, | |
363 | BLOCK_ACCT_WRITE); | |
9d6459d2 | 364 | req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count, |
c03e7ef1 CH |
365 | BDRV_REQ_MAY_UNMAP, nvme_rw_cb, req); |
366 | return NVME_NO_COMPLETE; | |
367 | } | |
368 | ||
f3c507ad KB |
369 | static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, |
370 | NvmeRequest *req) | |
371 | { | |
372 | NvmeRwCmd *rw = (NvmeRwCmd *)cmd; | |
373 | uint32_t nlb = le32_to_cpu(rw->nlb) + 1; | |
374 | uint64_t slba = le64_to_cpu(rw->slba); | |
375 | uint64_t prp1 = le64_to_cpu(rw->prp1); | |
376 | uint64_t prp2 = le64_to_cpu(rw->prp2); | |
377 | ||
378 | uint8_t lba_index = NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas); | |
379 | uint8_t data_shift = ns->id_ns.lbaf[lba_index].ds; | |
2115f2a1 | 380 | uint64_t data_size = (uint64_t)nlb << data_shift; |
cbe0ed62 | 381 | uint64_t data_offset = slba << data_shift; |
f3c507ad | 382 | int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0; |
1753f3dc | 383 | enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ; |
f3c507ad | 384 | |
1ee24514 DG |
385 | trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba); |
386 | ||
387 | if (unlikely((slba + nlb) > ns->id_ns.nsze)) { | |
1753f3dc | 388 | block_acct_invalid(blk_get_stats(n->conf.blk), acct); |
1ee24514 | 389 | trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze); |
f3c507ad KB |
390 | return NVME_LBA_RANGE | NVME_DNR; |
391 | } | |
1753f3dc | 392 | |
b2b2b67a | 393 | if (nvme_map_prp(&req->qsg, &req->iov, prp1, prp2, data_size, n)) { |
1753f3dc | 394 | block_acct_invalid(blk_get_stats(n->conf.blk), acct); |
f3c507ad KB |
395 | return NVME_INVALID_FIELD | NVME_DNR; |
396 | } | |
1753f3dc | 397 | |
1753f3dc | 398 | dma_acct_start(n->conf.blk, &req->acct, &req->qsg, acct); |
b2b2b67a SB |
399 | if (req->qsg.nsg > 0) { |
400 | req->has_sg = true; | |
401 | req->aiocb = is_write ? | |
402 | dma_blk_write(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE, | |
403 | nvme_rw_cb, req) : | |
404 | dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE, | |
405 | nvme_rw_cb, req); | |
406 | } else { | |
407 | req->has_sg = false; | |
408 | req->aiocb = is_write ? | |
409 | blk_aio_pwritev(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb, | |
410 | req) : | |
411 | blk_aio_preadv(n->conf.blk, data_offset, &req->iov, 0, nvme_rw_cb, | |
412 | req); | |
413 | } | |
f3c507ad KB |
414 | |
415 | return NVME_NO_COMPLETE; | |
416 | } | |
417 | ||
418 | static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) | |
419 | { | |
420 | NvmeNamespace *ns; | |
421 | uint32_t nsid = le32_to_cpu(cmd->nsid); | |
422 | ||
1ee24514 DG |
423 | if (unlikely(nsid == 0 || nsid > n->num_namespaces)) { |
424 | trace_nvme_err_invalid_ns(nsid, n->num_namespaces); | |
f3c507ad KB |
425 | return NVME_INVALID_NSID | NVME_DNR; |
426 | } | |
427 | ||
428 | ns = &n->namespaces[nsid - 1]; | |
429 | switch (cmd->opcode) { | |
430 | case NVME_CMD_FLUSH: | |
8b9d74e0 | 431 | return nvme_flush(n, ns, cmd, req); |
c03e7ef1 CH |
432 | case NVME_CMD_WRITE_ZEROS: |
433 | return nvme_write_zeros(n, ns, cmd, req); | |
f3c507ad KB |
434 | case NVME_CMD_WRITE: |
435 | case NVME_CMD_READ: | |
436 | return nvme_rw(n, ns, cmd, req); | |
437 | default: | |
1ee24514 | 438 | trace_nvme_err_invalid_opc(cmd->opcode); |
f3c507ad KB |
439 | return NVME_INVALID_OPCODE | NVME_DNR; |
440 | } | |
441 | } | |
442 | ||
443 | static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n) | |
444 | { | |
445 | n->sq[sq->sqid] = NULL; | |
bc72ad67 AB |
446 | timer_del(sq->timer); |
447 | timer_free(sq->timer); | |
f3c507ad KB |
448 | g_free(sq->io_req); |
449 | if (sq->sqid) { | |
450 | g_free(sq); | |
451 | } | |
452 | } | |
453 | ||
454 | static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd) | |
455 | { | |
456 | NvmeDeleteQ *c = (NvmeDeleteQ *)cmd; | |
457 | NvmeRequest *req, *next; | |
458 | NvmeSQueue *sq; | |
459 | NvmeCQueue *cq; | |
460 | uint16_t qid = le16_to_cpu(c->qid); | |
461 | ||
1ee24514 DG |
462 | if (unlikely(!qid || nvme_check_sqid(n, qid))) { |
463 | trace_nvme_err_invalid_del_sq(qid); | |
f3c507ad KB |
464 | return NVME_INVALID_QID | NVME_DNR; |
465 | } | |
466 | ||
1ee24514 DG |
467 | trace_nvme_del_sq(qid); |
468 | ||
f3c507ad KB |
469 | sq = n->sq[qid]; |
470 | while (!QTAILQ_EMPTY(&sq->out_req_list)) { | |
471 | req = QTAILQ_FIRST(&sq->out_req_list); | |
472 | assert(req->aiocb); | |
4be74634 | 473 | blk_aio_cancel(req->aiocb); |
f3c507ad KB |
474 | } |
475 | if (!nvme_check_cqid(n, sq->cqid)) { | |
476 | cq = n->cq[sq->cqid]; | |
477 | QTAILQ_REMOVE(&cq->sq_list, sq, entry); | |
478 | ||
479 | nvme_post_cqes(cq); | |
480 | QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) { | |
481 | if (req->sq == sq) { | |
482 | QTAILQ_REMOVE(&cq->req_list, req, entry); | |
483 | QTAILQ_INSERT_TAIL(&sq->req_list, req, entry); | |
484 | } | |
485 | } | |
486 | } | |
487 | ||
488 | nvme_free_sq(sq, n); | |
489 | return NVME_SUCCESS; | |
490 | } | |
491 | ||
492 | static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr, | |
493 | uint16_t sqid, uint16_t cqid, uint16_t size) | |
494 | { | |
495 | int i; | |
496 | NvmeCQueue *cq; | |
497 | ||
498 | sq->ctrl = n; | |
499 | sq->dma_addr = dma_addr; | |
500 | sq->sqid = sqid; | |
501 | sq->size = size; | |
502 | sq->cqid = cqid; | |
503 | sq->head = sq->tail = 0; | |
02c4f26b | 504 | sq->io_req = g_new(NvmeRequest, sq->size); |
f3c507ad KB |
505 | |
506 | QTAILQ_INIT(&sq->req_list); | |
507 | QTAILQ_INIT(&sq->out_req_list); | |
508 | for (i = 0; i < sq->size; i++) { | |
509 | sq->io_req[i].sq = sq; | |
510 | QTAILQ_INSERT_TAIL(&(sq->req_list), &sq->io_req[i], entry); | |
511 | } | |
bc72ad67 | 512 | sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq); |
f3c507ad KB |
513 | |
514 | assert(n->cq[cqid]); | |
515 | cq = n->cq[cqid]; | |
516 | QTAILQ_INSERT_TAIL(&(cq->sq_list), sq, entry); | |
517 | n->sq[sqid] = sq; | |
518 | } | |
519 | ||
520 | static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd) | |
521 | { | |
522 | NvmeSQueue *sq; | |
523 | NvmeCreateSq *c = (NvmeCreateSq *)cmd; | |
524 | ||
525 | uint16_t cqid = le16_to_cpu(c->cqid); | |
526 | uint16_t sqid = le16_to_cpu(c->sqid); | |
527 | uint16_t qsize = le16_to_cpu(c->qsize); | |
528 | uint16_t qflags = le16_to_cpu(c->sq_flags); | |
529 | uint64_t prp1 = le64_to_cpu(c->prp1); | |
530 | ||
1ee24514 DG |
531 | trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags); |
532 | ||
533 | if (unlikely(!cqid || nvme_check_cqid(n, cqid))) { | |
534 | trace_nvme_err_invalid_create_sq_cqid(cqid); | |
f3c507ad KB |
535 | return NVME_INVALID_CQID | NVME_DNR; |
536 | } | |
1ee24514 DG |
537 | if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) { |
538 | trace_nvme_err_invalid_create_sq_sqid(sqid); | |
f3c507ad KB |
539 | return NVME_INVALID_QID | NVME_DNR; |
540 | } | |
1ee24514 DG |
541 | if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { |
542 | trace_nvme_err_invalid_create_sq_size(qsize); | |
f3c507ad KB |
543 | return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; |
544 | } | |
1ee24514 DG |
545 | if (unlikely(!prp1 || prp1 & (n->page_size - 1))) { |
546 | trace_nvme_err_invalid_create_sq_addr(prp1); | |
f3c507ad KB |
547 | return NVME_INVALID_FIELD | NVME_DNR; |
548 | } | |
1ee24514 DG |
549 | if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) { |
550 | trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags)); | |
f3c507ad KB |
551 | return NVME_INVALID_FIELD | NVME_DNR; |
552 | } | |
553 | sq = g_malloc0(sizeof(*sq)); | |
554 | nvme_init_sq(sq, n, prp1, sqid, cqid, qsize + 1); | |
555 | return NVME_SUCCESS; | |
556 | } | |
557 | ||
558 | static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n) | |
559 | { | |
560 | n->cq[cq->cqid] = NULL; | |
bc72ad67 AB |
561 | timer_del(cq->timer); |
562 | timer_free(cq->timer); | |
f3c507ad KB |
563 | msix_vector_unuse(&n->parent_obj, cq->vector); |
564 | if (cq->cqid) { | |
565 | g_free(cq); | |
566 | } | |
567 | } | |
568 | ||
569 | static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd) | |
570 | { | |
571 | NvmeDeleteQ *c = (NvmeDeleteQ *)cmd; | |
572 | NvmeCQueue *cq; | |
573 | uint16_t qid = le16_to_cpu(c->qid); | |
574 | ||
1ee24514 DG |
575 | if (unlikely(!qid || nvme_check_cqid(n, qid))) { |
576 | trace_nvme_err_invalid_del_cq_cqid(qid); | |
f3c507ad KB |
577 | return NVME_INVALID_CQID | NVME_DNR; |
578 | } | |
579 | ||
580 | cq = n->cq[qid]; | |
1ee24514 DG |
581 | if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) { |
582 | trace_nvme_err_invalid_del_cq_notempty(qid); | |
f3c507ad KB |
583 | return NVME_INVALID_QUEUE_DEL; |
584 | } | |
ad3a7e45 | 585 | nvme_irq_deassert(n, cq); |
1ee24514 | 586 | trace_nvme_del_cq(qid); |
f3c507ad KB |
587 | nvme_free_cq(cq, n); |
588 | return NVME_SUCCESS; | |
589 | } | |
590 | ||
591 | static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr, | |
592 | uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled) | |
593 | { | |
594 | cq->ctrl = n; | |
595 | cq->cqid = cqid; | |
596 | cq->size = size; | |
597 | cq->dma_addr = dma_addr; | |
598 | cq->phase = 1; | |
599 | cq->irq_enabled = irq_enabled; | |
600 | cq->vector = vector; | |
601 | cq->head = cq->tail = 0; | |
602 | QTAILQ_INIT(&cq->req_list); | |
603 | QTAILQ_INIT(&cq->sq_list); | |
604 | msix_vector_use(&n->parent_obj, cq->vector); | |
605 | n->cq[cqid] = cq; | |
bc72ad67 | 606 | cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq); |
f3c507ad KB |
607 | } |
608 | ||
609 | static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd) | |
610 | { | |
611 | NvmeCQueue *cq; | |
612 | NvmeCreateCq *c = (NvmeCreateCq *)cmd; | |
613 | uint16_t cqid = le16_to_cpu(c->cqid); | |
614 | uint16_t vector = le16_to_cpu(c->irq_vector); | |
615 | uint16_t qsize = le16_to_cpu(c->qsize); | |
616 | uint16_t qflags = le16_to_cpu(c->cq_flags); | |
617 | uint64_t prp1 = le64_to_cpu(c->prp1); | |
618 | ||
1ee24514 DG |
619 | trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags, |
620 | NVME_CQ_FLAGS_IEN(qflags) != 0); | |
621 | ||
622 | if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) { | |
623 | trace_nvme_err_invalid_create_cq_cqid(cqid); | |
f3c507ad KB |
624 | return NVME_INVALID_CQID | NVME_DNR; |
625 | } | |
1ee24514 DG |
626 | if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) { |
627 | trace_nvme_err_invalid_create_cq_size(qsize); | |
f3c507ad KB |
628 | return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR; |
629 | } | |
1ee24514 DG |
630 | if (unlikely(!prp1)) { |
631 | trace_nvme_err_invalid_create_cq_addr(prp1); | |
f3c507ad KB |
632 | return NVME_INVALID_FIELD | NVME_DNR; |
633 | } | |
1ee24514 DG |
634 | if (unlikely(vector > n->num_queues)) { |
635 | trace_nvme_err_invalid_create_cq_vector(vector); | |
f3c507ad KB |
636 | return NVME_INVALID_IRQ_VECTOR | NVME_DNR; |
637 | } | |
1ee24514 DG |
638 | if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) { |
639 | trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags)); | |
f3c507ad KB |
640 | return NVME_INVALID_FIELD | NVME_DNR; |
641 | } | |
642 | ||
643 | cq = g_malloc0(sizeof(*cq)); | |
644 | nvme_init_cq(cq, n, prp1, cqid, vector, qsize + 1, | |
645 | NVME_CQ_FLAGS_IEN(qflags)); | |
646 | return NVME_SUCCESS; | |
647 | } | |
648 | ||
03035a23 CH |
649 | static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c) |
650 | { | |
651 | uint64_t prp1 = le64_to_cpu(c->prp1); | |
652 | uint64_t prp2 = le64_to_cpu(c->prp2); | |
653 | ||
1ee24514 DG |
654 | trace_nvme_identify_ctrl(); |
655 | ||
03035a23 CH |
656 | return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl), |
657 | prp1, prp2); | |
658 | } | |
659 | ||
660 | static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c) | |
f3c507ad KB |
661 | { |
662 | NvmeNamespace *ns; | |
f3c507ad KB |
663 | uint32_t nsid = le32_to_cpu(c->nsid); |
664 | uint64_t prp1 = le64_to_cpu(c->prp1); | |
665 | uint64_t prp2 = le64_to_cpu(c->prp2); | |
666 | ||
1ee24514 DG |
667 | trace_nvme_identify_ns(nsid); |
668 | ||
669 | if (unlikely(nsid == 0 || nsid > n->num_namespaces)) { | |
670 | trace_nvme_err_invalid_ns(nsid, n->num_namespaces); | |
f3c507ad KB |
671 | return NVME_INVALID_NSID | NVME_DNR; |
672 | } | |
673 | ||
674 | ns = &n->namespaces[nsid - 1]; | |
1ee24514 | 675 | |
f3c507ad KB |
676 | return nvme_dma_read_prp(n, (uint8_t *)&ns->id_ns, sizeof(ns->id_ns), |
677 | prp1, prp2); | |
678 | } | |
679 | ||
03035a23 CH |
680 | static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c) |
681 | { | |
e8400cf3 | 682 | static const int data_len = 4 * KiB; |
03035a23 CH |
683 | uint32_t min_nsid = le32_to_cpu(c->nsid); |
684 | uint64_t prp1 = le64_to_cpu(c->prp1); | |
685 | uint64_t prp2 = le64_to_cpu(c->prp2); | |
686 | uint32_t *list; | |
687 | uint16_t ret; | |
688 | int i, j = 0; | |
689 | ||
1ee24514 DG |
690 | trace_nvme_identify_nslist(min_nsid); |
691 | ||
03035a23 CH |
692 | list = g_malloc0(data_len); |
693 | for (i = 0; i < n->num_namespaces; i++) { | |
694 | if (i < min_nsid) { | |
695 | continue; | |
696 | } | |
697 | list[j++] = cpu_to_le32(i + 1); | |
698 | if (j == data_len / sizeof(uint32_t)) { | |
699 | break; | |
700 | } | |
701 | } | |
702 | ret = nvme_dma_read_prp(n, (uint8_t *)list, data_len, prp1, prp2); | |
703 | g_free(list); | |
704 | return ret; | |
705 | } | |
706 | ||
03035a23 CH |
707 | static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd) |
708 | { | |
709 | NvmeIdentify *c = (NvmeIdentify *)cmd; | |
710 | ||
711 | switch (le32_to_cpu(c->cns)) { | |
712 | case 0x00: | |
713 | return nvme_identify_ns(n, c); | |
714 | case 0x01: | |
715 | return nvme_identify_ctrl(n, c); | |
716 | case 0x02: | |
717 | return nvme_identify_nslist(n, c); | |
718 | default: | |
1ee24514 | 719 | trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns)); |
03035a23 CH |
720 | return NVME_INVALID_FIELD | NVME_DNR; |
721 | } | |
722 | } | |
723 | ||
3036a626 KH |
724 | static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) |
725 | { | |
726 | trace_nvme_setfeat_timestamp(ts); | |
727 | ||
728 | n->host_timestamp = le64_to_cpu(ts); | |
729 | n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); | |
730 | } | |
731 | ||
732 | static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) | |
733 | { | |
734 | uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); | |
735 | uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; | |
736 | ||
737 | union nvme_timestamp { | |
738 | struct { | |
739 | uint64_t timestamp:48; | |
740 | uint64_t sync:1; | |
741 | uint64_t origin:3; | |
742 | uint64_t rsvd1:12; | |
743 | }; | |
744 | uint64_t all; | |
745 | }; | |
746 | ||
747 | union nvme_timestamp ts; | |
748 | ts.all = 0; | |
749 | ||
750 | /* | |
751 | * If the sum of the Timestamp value set by the host and the elapsed | |
752 | * time exceeds 2^48, the value returned should be reduced modulo 2^48. | |
753 | */ | |
754 | ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff; | |
755 | ||
756 | /* If the host timestamp is non-zero, set the timestamp origin */ | |
757 | ts.origin = n->host_timestamp ? 0x01 : 0x00; | |
758 | ||
759 | trace_nvme_getfeat_timestamp(ts.all); | |
760 | ||
761 | return cpu_to_le64(ts.all); | |
762 | } | |
763 | ||
764 | static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd) | |
765 | { | |
766 | uint64_t prp1 = le64_to_cpu(cmd->prp1); | |
767 | uint64_t prp2 = le64_to_cpu(cmd->prp2); | |
768 | ||
769 | uint64_t timestamp = nvme_get_timestamp(n); | |
770 | ||
771 | return nvme_dma_read_prp(n, (uint8_t *)×tamp, | |
772 | sizeof(timestamp), prp1, prp2); | |
773 | } | |
774 | ||
f3c507ad KB |
775 | static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) |
776 | { | |
777 | uint32_t dw10 = le32_to_cpu(cmd->cdw10); | |
30349fd0 | 778 | uint32_t result; |
f3c507ad KB |
779 | |
780 | switch (dw10) { | |
aacd5650 | 781 | case NVME_VOLATILE_WRITE_CACHE: |
30349fd0 | 782 | result = blk_enable_write_cache(n->conf.blk); |
1ee24514 | 783 | trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled"); |
30349fd0 CH |
784 | break; |
785 | case NVME_NUMBER_OF_QUEUES: | |
cdd34637 | 786 | result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); |
1ee24514 | 787 | trace_nvme_getfeat_numq(result); |
aacd5650 | 788 | break; |
3036a626 KH |
789 | case NVME_TIMESTAMP: |
790 | return nvme_get_feature_timestamp(n, cmd); | |
791 | break; | |
f3c507ad | 792 | default: |
1ee24514 | 793 | trace_nvme_err_invalid_getfeat(dw10); |
f3c507ad KB |
794 | return NVME_INVALID_FIELD | NVME_DNR; |
795 | } | |
30349fd0 CH |
796 | |
797 | req->cqe.result = result; | |
f3c507ad KB |
798 | return NVME_SUCCESS; |
799 | } | |
800 | ||
3036a626 KH |
801 | static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd) |
802 | { | |
803 | uint16_t ret; | |
804 | uint64_t timestamp; | |
805 | uint64_t prp1 = le64_to_cpu(cmd->prp1); | |
806 | uint64_t prp2 = le64_to_cpu(cmd->prp2); | |
807 | ||
808 | ret = nvme_dma_write_prp(n, (uint8_t *)×tamp, | |
809 | sizeof(timestamp), prp1, prp2); | |
810 | if (ret != NVME_SUCCESS) { | |
811 | return ret; | |
812 | } | |
813 | ||
814 | nvme_set_timestamp(n, timestamp); | |
815 | ||
816 | return NVME_SUCCESS; | |
817 | } | |
818 | ||
f3c507ad KB |
819 | static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) |
820 | { | |
821 | uint32_t dw10 = le32_to_cpu(cmd->cdw10); | |
30349fd0 | 822 | uint32_t dw11 = le32_to_cpu(cmd->cdw11); |
f3c507ad KB |
823 | |
824 | switch (dw10) { | |
30349fd0 CH |
825 | case NVME_VOLATILE_WRITE_CACHE: |
826 | blk_set_enable_write_cache(n->conf.blk, dw11 & 1); | |
827 | break; | |
f3c507ad | 828 | case NVME_NUMBER_OF_QUEUES: |
1ee24514 DG |
829 | trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1, |
830 | ((dw11 >> 16) & 0xFFFF) + 1, | |
831 | n->num_queues - 1, n->num_queues - 1); | |
e7026f19 | 832 | req->cqe.result = |
cdd34637 | 833 | cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); |
f3c507ad | 834 | break; |
3036a626 KH |
835 | |
836 | case NVME_TIMESTAMP: | |
837 | return nvme_set_feature_timestamp(n, cmd); | |
838 | break; | |
839 | ||
f3c507ad | 840 | default: |
1ee24514 | 841 | trace_nvme_err_invalid_setfeat(dw10); |
f3c507ad KB |
842 | return NVME_INVALID_FIELD | NVME_DNR; |
843 | } | |
844 | return NVME_SUCCESS; | |
845 | } | |
846 | ||
847 | static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) | |
848 | { | |
849 | switch (cmd->opcode) { | |
850 | case NVME_ADM_CMD_DELETE_SQ: | |
851 | return nvme_del_sq(n, cmd); | |
852 | case NVME_ADM_CMD_CREATE_SQ: | |
853 | return nvme_create_sq(n, cmd); | |
854 | case NVME_ADM_CMD_DELETE_CQ: | |
855 | return nvme_del_cq(n, cmd); | |
856 | case NVME_ADM_CMD_CREATE_CQ: | |
857 | return nvme_create_cq(n, cmd); | |
858 | case NVME_ADM_CMD_IDENTIFY: | |
859 | return nvme_identify(n, cmd); | |
860 | case NVME_ADM_CMD_SET_FEATURES: | |
861 | return nvme_set_feature(n, cmd, req); | |
862 | case NVME_ADM_CMD_GET_FEATURES: | |
863 | return nvme_get_feature(n, cmd, req); | |
864 | default: | |
1ee24514 | 865 | trace_nvme_err_invalid_admin_opc(cmd->opcode); |
f3c507ad KB |
866 | return NVME_INVALID_OPCODE | NVME_DNR; |
867 | } | |
868 | } | |
869 | ||
870 | static void nvme_process_sq(void *opaque) | |
871 | { | |
872 | NvmeSQueue *sq = opaque; | |
873 | NvmeCtrl *n = sq->ctrl; | |
874 | NvmeCQueue *cq = n->cq[sq->cqid]; | |
875 | ||
876 | uint16_t status; | |
877 | hwaddr addr; | |
878 | NvmeCmd cmd; | |
879 | NvmeRequest *req; | |
880 | ||
881 | while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) { | |
882 | addr = sq->dma_addr + sq->head * n->sqe_size; | |
a896f7f2 | 883 | nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd)); |
f3c507ad KB |
884 | nvme_inc_sq_head(sq); |
885 | ||
886 | req = QTAILQ_FIRST(&sq->req_list); | |
887 | QTAILQ_REMOVE(&sq->req_list, req, entry); | |
888 | QTAILQ_INSERT_TAIL(&sq->out_req_list, req, entry); | |
889 | memset(&req->cqe, 0, sizeof(req->cqe)); | |
890 | req->cqe.cid = cmd.cid; | |
891 | ||
892 | status = sq->sqid ? nvme_io_cmd(n, &cmd, req) : | |
893 | nvme_admin_cmd(n, &cmd, req); | |
894 | if (status != NVME_NO_COMPLETE) { | |
895 | req->status = status; | |
896 | nvme_enqueue_req_completion(cq, req); | |
897 | } | |
898 | } | |
899 | } | |
900 | ||
901 | static void nvme_clear_ctrl(NvmeCtrl *n) | |
902 | { | |
903 | int i; | |
904 | ||
6bf74636 ID |
905 | blk_drain(n->conf.blk); |
906 | ||
f3c507ad KB |
907 | for (i = 0; i < n->num_queues; i++) { |
908 | if (n->sq[i] != NULL) { | |
909 | nvme_free_sq(n->sq[i], n); | |
910 | } | |
911 | } | |
912 | for (i = 0; i < n->num_queues; i++) { | |
913 | if (n->cq[i] != NULL) { | |
914 | nvme_free_cq(n->cq[i], n); | |
915 | } | |
916 | } | |
917 | ||
4be74634 | 918 | blk_flush(n->conf.blk); |
f3c507ad KB |
919 | n->bar.cc = 0; |
920 | } | |
921 | ||
922 | static int nvme_start_ctrl(NvmeCtrl *n) | |
923 | { | |
924 | uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12; | |
925 | uint32_t page_size = 1 << page_bits; | |
926 | ||
1ee24514 DG |
927 | if (unlikely(n->cq[0])) { |
928 | trace_nvme_err_startfail_cq(); | |
929 | return -1; | |
930 | } | |
931 | if (unlikely(n->sq[0])) { | |
932 | trace_nvme_err_startfail_sq(); | |
933 | return -1; | |
934 | } | |
935 | if (unlikely(!n->bar.asq)) { | |
936 | trace_nvme_err_startfail_nbarasq(); | |
937 | return -1; | |
938 | } | |
939 | if (unlikely(!n->bar.acq)) { | |
940 | trace_nvme_err_startfail_nbaracq(); | |
941 | return -1; | |
942 | } | |
943 | if (unlikely(n->bar.asq & (page_size - 1))) { | |
944 | trace_nvme_err_startfail_asq_misaligned(n->bar.asq); | |
945 | return -1; | |
946 | } | |
947 | if (unlikely(n->bar.acq & (page_size - 1))) { | |
948 | trace_nvme_err_startfail_acq_misaligned(n->bar.acq); | |
949 | return -1; | |
950 | } | |
951 | if (unlikely(NVME_CC_MPS(n->bar.cc) < | |
952 | NVME_CAP_MPSMIN(n->bar.cap))) { | |
953 | trace_nvme_err_startfail_page_too_small( | |
954 | NVME_CC_MPS(n->bar.cc), | |
955 | NVME_CAP_MPSMIN(n->bar.cap)); | |
956 | return -1; | |
957 | } | |
958 | if (unlikely(NVME_CC_MPS(n->bar.cc) > | |
959 | NVME_CAP_MPSMAX(n->bar.cap))) { | |
960 | trace_nvme_err_startfail_page_too_large( | |
961 | NVME_CC_MPS(n->bar.cc), | |
962 | NVME_CAP_MPSMAX(n->bar.cap)); | |
963 | return -1; | |
964 | } | |
965 | if (unlikely(NVME_CC_IOCQES(n->bar.cc) < | |
966 | NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) { | |
967 | trace_nvme_err_startfail_cqent_too_small( | |
968 | NVME_CC_IOCQES(n->bar.cc), | |
969 | NVME_CTRL_CQES_MIN(n->bar.cap)); | |
970 | return -1; | |
971 | } | |
972 | if (unlikely(NVME_CC_IOCQES(n->bar.cc) > | |
973 | NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) { | |
974 | trace_nvme_err_startfail_cqent_too_large( | |
975 | NVME_CC_IOCQES(n->bar.cc), | |
976 | NVME_CTRL_CQES_MAX(n->bar.cap)); | |
977 | return -1; | |
978 | } | |
979 | if (unlikely(NVME_CC_IOSQES(n->bar.cc) < | |
980 | NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) { | |
981 | trace_nvme_err_startfail_sqent_too_small( | |
982 | NVME_CC_IOSQES(n->bar.cc), | |
983 | NVME_CTRL_SQES_MIN(n->bar.cap)); | |
984 | return -1; | |
985 | } | |
986 | if (unlikely(NVME_CC_IOSQES(n->bar.cc) > | |
987 | NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) { | |
988 | trace_nvme_err_startfail_sqent_too_large( | |
989 | NVME_CC_IOSQES(n->bar.cc), | |
990 | NVME_CTRL_SQES_MAX(n->bar.cap)); | |
991 | return -1; | |
992 | } | |
993 | if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) { | |
994 | trace_nvme_err_startfail_asqent_sz_zero(); | |
995 | return -1; | |
996 | } | |
997 | if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) { | |
998 | trace_nvme_err_startfail_acqent_sz_zero(); | |
f3c507ad KB |
999 | return -1; |
1000 | } | |
1001 | ||
1002 | n->page_bits = page_bits; | |
1003 | n->page_size = page_size; | |
1004 | n->max_prp_ents = n->page_size / sizeof(uint64_t); | |
1005 | n->cqe_size = 1 << NVME_CC_IOCQES(n->bar.cc); | |
1006 | n->sqe_size = 1 << NVME_CC_IOSQES(n->bar.cc); | |
1007 | nvme_init_cq(&n->admin_cq, n, n->bar.acq, 0, 0, | |
1008 | NVME_AQA_ACQS(n->bar.aqa) + 1, 1); | |
1009 | nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0, | |
1010 | NVME_AQA_ASQS(n->bar.aqa) + 1); | |
1011 | ||
3036a626 KH |
1012 | nvme_set_timestamp(n, 0ULL); |
1013 | ||
f3c507ad KB |
1014 | return 0; |
1015 | } | |
1016 | ||
1017 | static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data, | |
1018 | unsigned size) | |
1019 | { | |
1ee24514 DG |
1020 | if (unlikely(offset & (sizeof(uint32_t) - 1))) { |
1021 | NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32, | |
1022 | "MMIO write not 32-bit aligned," | |
1023 | " offset=0x%"PRIx64"", offset); | |
1024 | /* should be ignored, fall through for now */ | |
1025 | } | |
1026 | ||
1027 | if (unlikely(size < sizeof(uint32_t))) { | |
1028 | NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall, | |
1029 | "MMIO write smaller than 32-bits," | |
1030 | " offset=0x%"PRIx64", size=%u", | |
1031 | offset, size); | |
1032 | /* should be ignored, fall through for now */ | |
1033 | } | |
1034 | ||
f3c507ad | 1035 | switch (offset) { |
1ee24514 DG |
1036 | case 0xc: /* INTMS */ |
1037 | if (unlikely(msix_enabled(&(n->parent_obj)))) { | |
1038 | NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix, | |
1039 | "undefined access to interrupt mask set" | |
1040 | " when MSI-X is enabled"); | |
1041 | /* should be ignored, fall through for now */ | |
1042 | } | |
f3c507ad KB |
1043 | n->bar.intms |= data & 0xffffffff; |
1044 | n->bar.intmc = n->bar.intms; | |
1ee24514 DG |
1045 | trace_nvme_mmio_intm_set(data & 0xffffffff, |
1046 | n->bar.intmc); | |
5e9aa92e | 1047 | nvme_irq_check(n); |
f3c507ad | 1048 | break; |
1ee24514 DG |
1049 | case 0x10: /* INTMC */ |
1050 | if (unlikely(msix_enabled(&(n->parent_obj)))) { | |
1051 | NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix, | |
1052 | "undefined access to interrupt mask clr" | |
1053 | " when MSI-X is enabled"); | |
1054 | /* should be ignored, fall through for now */ | |
1055 | } | |
f3c507ad KB |
1056 | n->bar.intms &= ~(data & 0xffffffff); |
1057 | n->bar.intmc = n->bar.intms; | |
1ee24514 DG |
1058 | trace_nvme_mmio_intm_clr(data & 0xffffffff, |
1059 | n->bar.intmc); | |
5e9aa92e | 1060 | nvme_irq_check(n); |
f3c507ad | 1061 | break; |
1ee24514 DG |
1062 | case 0x14: /* CC */ |
1063 | trace_nvme_mmio_cfg(data & 0xffffffff); | |
4a4d614f DS |
1064 | /* Windows first sends data, then sends enable bit */ |
1065 | if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) && | |
1066 | !NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc)) | |
1067 | { | |
1068 | n->bar.cc = data; | |
1069 | } | |
1070 | ||
f3c507ad KB |
1071 | if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) { |
1072 | n->bar.cc = data; | |
1ee24514 DG |
1073 | if (unlikely(nvme_start_ctrl(n))) { |
1074 | trace_nvme_err_startfail(); | |
f3c507ad KB |
1075 | n->bar.csts = NVME_CSTS_FAILED; |
1076 | } else { | |
1ee24514 | 1077 | trace_nvme_mmio_start_success(); |
f3c507ad KB |
1078 | n->bar.csts = NVME_CSTS_READY; |
1079 | } | |
1080 | } else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) { | |
1ee24514 | 1081 | trace_nvme_mmio_stopped(); |
f3c507ad KB |
1082 | nvme_clear_ctrl(n); |
1083 | n->bar.csts &= ~NVME_CSTS_READY; | |
1084 | } | |
1085 | if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) { | |
1ee24514 DG |
1086 | trace_nvme_mmio_shutdown_set(); |
1087 | nvme_clear_ctrl(n); | |
1088 | n->bar.cc = data; | |
1089 | n->bar.csts |= NVME_CSTS_SHST_COMPLETE; | |
f3c507ad | 1090 | } else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) { |
1ee24514 DG |
1091 | trace_nvme_mmio_shutdown_cleared(); |
1092 | n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE; | |
1093 | n->bar.cc = data; | |
1094 | } | |
1095 | break; | |
1096 | case 0x1C: /* CSTS */ | |
1097 | if (data & (1 << 4)) { | |
1098 | NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported, | |
1099 | "attempted to W1C CSTS.NSSRO" | |
1100 | " but CAP.NSSRS is zero (not supported)"); | |
1101 | } else if (data != 0) { | |
1102 | NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts, | |
1103 | "attempted to set a read only bit" | |
1104 | " of controller status"); | |
1105 | } | |
1106 | break; | |
1107 | case 0x20: /* NSSR */ | |
1108 | if (data == 0x4E564D65) { | |
1109 | trace_nvme_ub_mmiowr_ssreset_unsupported(); | |
1110 | } else { | |
1111 | /* The spec says that writes of other values have no effect */ | |
1112 | return; | |
f3c507ad KB |
1113 | } |
1114 | break; | |
1ee24514 | 1115 | case 0x24: /* AQA */ |
f3c507ad | 1116 | n->bar.aqa = data & 0xffffffff; |
1ee24514 | 1117 | trace_nvme_mmio_aqattr(data & 0xffffffff); |
f3c507ad | 1118 | break; |
1ee24514 | 1119 | case 0x28: /* ASQ */ |
f3c507ad | 1120 | n->bar.asq = data; |
1ee24514 | 1121 | trace_nvme_mmio_asqaddr(data); |
f3c507ad | 1122 | break; |
1ee24514 | 1123 | case 0x2c: /* ASQ hi */ |
f3c507ad | 1124 | n->bar.asq |= data << 32; |
1ee24514 | 1125 | trace_nvme_mmio_asqaddr_hi(data, n->bar.asq); |
f3c507ad | 1126 | break; |
1ee24514 DG |
1127 | case 0x30: /* ACQ */ |
1128 | trace_nvme_mmio_acqaddr(data); | |
f3c507ad KB |
1129 | n->bar.acq = data; |
1130 | break; | |
1ee24514 | 1131 | case 0x34: /* ACQ hi */ |
f3c507ad | 1132 | n->bar.acq |= data << 32; |
1ee24514 | 1133 | trace_nvme_mmio_acqaddr_hi(data, n->bar.acq); |
f3c507ad | 1134 | break; |
1ee24514 DG |
1135 | case 0x38: /* CMBLOC */ |
1136 | NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved, | |
1137 | "invalid write to reserved CMBLOC" | |
1138 | " when CMBSZ is zero, ignored"); | |
1139 | return; | |
1140 | case 0x3C: /* CMBSZ */ | |
1141 | NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly, | |
1142 | "invalid write to read only CMBSZ, ignored"); | |
1143 | return; | |
f3c507ad | 1144 | default: |
1ee24514 DG |
1145 | NVME_GUEST_ERR(nvme_ub_mmiowr_invalid, |
1146 | "invalid MMIO write," | |
1147 | " offset=0x%"PRIx64", data=%"PRIx64"", | |
1148 | offset, data); | |
f3c507ad KB |
1149 | break; |
1150 | } | |
1151 | } | |
1152 | ||
1153 | static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size) | |
1154 | { | |
1155 | NvmeCtrl *n = (NvmeCtrl *)opaque; | |
1156 | uint8_t *ptr = (uint8_t *)&n->bar; | |
1157 | uint64_t val = 0; | |
1158 | ||
1ee24514 DG |
1159 | if (unlikely(addr & (sizeof(uint32_t) - 1))) { |
1160 | NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32, | |
1161 | "MMIO read not 32-bit aligned," | |
1162 | " offset=0x%"PRIx64"", addr); | |
1163 | /* should RAZ, fall through for now */ | |
1164 | } else if (unlikely(size < sizeof(uint32_t))) { | |
1165 | NVME_GUEST_ERR(nvme_ub_mmiord_toosmall, | |
1166 | "MMIO read smaller than 32-bits," | |
1167 | " offset=0x%"PRIx64"", addr); | |
1168 | /* should RAZ, fall through for now */ | |
1169 | } | |
1170 | ||
f3c507ad KB |
1171 | if (addr < sizeof(n->bar)) { |
1172 | memcpy(&val, ptr + addr, size); | |
1ee24514 DG |
1173 | } else { |
1174 | NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs, | |
1175 | "MMIO read beyond last register," | |
1176 | " offset=0x%"PRIx64", returning 0", addr); | |
f3c507ad | 1177 | } |
1ee24514 | 1178 | |
f3c507ad KB |
1179 | return val; |
1180 | } | |
1181 | ||
1182 | static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val) | |
1183 | { | |
1184 | uint32_t qid; | |
1185 | ||
1ee24514 DG |
1186 | if (unlikely(addr & ((1 << 2) - 1))) { |
1187 | NVME_GUEST_ERR(nvme_ub_db_wr_misaligned, | |
1188 | "doorbell write not 32-bit aligned," | |
1189 | " offset=0x%"PRIx64", ignoring", addr); | |
f3c507ad KB |
1190 | return; |
1191 | } | |
1192 | ||
1193 | if (((addr - 0x1000) >> 2) & 1) { | |
1ee24514 DG |
1194 | /* Completion queue doorbell write */ |
1195 | ||
f3c507ad KB |
1196 | uint16_t new_head = val & 0xffff; |
1197 | int start_sqs; | |
1198 | NvmeCQueue *cq; | |
1199 | ||
1200 | qid = (addr - (0x1000 + (1 << 2))) >> 3; | |
1ee24514 DG |
1201 | if (unlikely(nvme_check_cqid(n, qid))) { |
1202 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq, | |
1203 | "completion queue doorbell write" | |
1204 | " for nonexistent queue," | |
1205 | " sqid=%"PRIu32", ignoring", qid); | |
f3c507ad KB |
1206 | return; |
1207 | } | |
1208 | ||
1209 | cq = n->cq[qid]; | |
1ee24514 DG |
1210 | if (unlikely(new_head >= cq->size)) { |
1211 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead, | |
1212 | "completion queue doorbell write value" | |
1213 | " beyond queue size, sqid=%"PRIu32"," | |
1214 | " new_head=%"PRIu16", ignoring", | |
1215 | qid, new_head); | |
f3c507ad KB |
1216 | return; |
1217 | } | |
1218 | ||
1219 | start_sqs = nvme_cq_full(cq) ? 1 : 0; | |
1220 | cq->head = new_head; | |
1221 | if (start_sqs) { | |
1222 | NvmeSQueue *sq; | |
1223 | QTAILQ_FOREACH(sq, &cq->sq_list, entry) { | |
bc72ad67 | 1224 | timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
f3c507ad | 1225 | } |
bc72ad67 | 1226 | timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
f3c507ad KB |
1227 | } |
1228 | ||
5e9aa92e HN |
1229 | if (cq->tail == cq->head) { |
1230 | nvme_irq_deassert(n, cq); | |
f3c507ad KB |
1231 | } |
1232 | } else { | |
1ee24514 DG |
1233 | /* Submission queue doorbell write */ |
1234 | ||
f3c507ad KB |
1235 | uint16_t new_tail = val & 0xffff; |
1236 | NvmeSQueue *sq; | |
1237 | ||
1238 | qid = (addr - 0x1000) >> 3; | |
1ee24514 DG |
1239 | if (unlikely(nvme_check_sqid(n, qid))) { |
1240 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq, | |
1241 | "submission queue doorbell write" | |
1242 | " for nonexistent queue," | |
1243 | " sqid=%"PRIu32", ignoring", qid); | |
f3c507ad KB |
1244 | return; |
1245 | } | |
1246 | ||
1247 | sq = n->sq[qid]; | |
1ee24514 DG |
1248 | if (unlikely(new_tail >= sq->size)) { |
1249 | NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail, | |
1250 | "submission queue doorbell write value" | |
1251 | " beyond queue size, sqid=%"PRIu32"," | |
1252 | " new_tail=%"PRIu16", ignoring", | |
1253 | qid, new_tail); | |
f3c507ad KB |
1254 | return; |
1255 | } | |
1256 | ||
1257 | sq->tail = new_tail; | |
bc72ad67 | 1258 | timer_mod(sq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 500); |
f3c507ad KB |
1259 | } |
1260 | } | |
1261 | ||
1262 | static void nvme_mmio_write(void *opaque, hwaddr addr, uint64_t data, | |
1263 | unsigned size) | |
1264 | { | |
1265 | NvmeCtrl *n = (NvmeCtrl *)opaque; | |
1266 | if (addr < sizeof(n->bar)) { | |
1267 | nvme_write_bar(n, addr, data, size); | |
1268 | } else if (addr >= 0x1000) { | |
1269 | nvme_process_db(n, addr, data); | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | static const MemoryRegionOps nvme_mmio_ops = { | |
1274 | .read = nvme_mmio_read, | |
1275 | .write = nvme_mmio_write, | |
1276 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1277 | .impl = { | |
1278 | .min_access_size = 2, | |
1279 | .max_access_size = 8, | |
1280 | }, | |
1281 | }; | |
1282 | ||
a896f7f2 SB |
1283 | static void nvme_cmb_write(void *opaque, hwaddr addr, uint64_t data, |
1284 | unsigned size) | |
1285 | { | |
1286 | NvmeCtrl *n = (NvmeCtrl *)opaque; | |
71a86dde | 1287 | stn_le_p(&n->cmbuf[addr], size, data); |
a896f7f2 SB |
1288 | } |
1289 | ||
1290 | static uint64_t nvme_cmb_read(void *opaque, hwaddr addr, unsigned size) | |
1291 | { | |
a896f7f2 | 1292 | NvmeCtrl *n = (NvmeCtrl *)opaque; |
71a86dde | 1293 | return ldn_le_p(&n->cmbuf[addr], size); |
a896f7f2 SB |
1294 | } |
1295 | ||
1296 | static const MemoryRegionOps nvme_cmb_ops = { | |
1297 | .read = nvme_cmb_read, | |
1298 | .write = nvme_cmb_write, | |
1299 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1300 | .impl = { | |
87ad860c | 1301 | .min_access_size = 1, |
a896f7f2 SB |
1302 | .max_access_size = 8, |
1303 | }, | |
1304 | }; | |
1305 | ||
e01d6a41 | 1306 | static void nvme_realize(PCIDevice *pci_dev, Error **errp) |
f3c507ad KB |
1307 | { |
1308 | NvmeCtrl *n = NVME(pci_dev); | |
1309 | NvmeIdCtrl *id = &n->id_ctrl; | |
1310 | ||
1311 | int i; | |
1312 | int64_t bs_size; | |
1313 | uint8_t *pci_conf; | |
1314 | ||
2410e133 LQ |
1315 | if (!n->num_queues) { |
1316 | error_setg(errp, "num_queues can't be zero"); | |
1317 | return; | |
1318 | } | |
1319 | ||
4be74634 | 1320 | if (!n->conf.blk) { |
e01d6a41 MZ |
1321 | error_setg(errp, "drive property not set"); |
1322 | return; | |
f3c507ad KB |
1323 | } |
1324 | ||
4be74634 | 1325 | bs_size = blk_getlength(n->conf.blk); |
592408b8 | 1326 | if (bs_size < 0) { |
e01d6a41 MZ |
1327 | error_setg(errp, "could not get backing file size"); |
1328 | return; | |
f3c507ad KB |
1329 | } |
1330 | ||
f3c507ad | 1331 | if (!n->serial) { |
e01d6a41 MZ |
1332 | error_setg(errp, "serial property not set"); |
1333 | return; | |
f3c507ad | 1334 | } |
0eb28a42 | 1335 | blkconf_blocksizes(&n->conf); |
ceff3e1f MZ |
1336 | if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk), |
1337 | false, errp)) { | |
e01d6a41 | 1338 | return; |
a17c17a2 | 1339 | } |
f3c507ad KB |
1340 | |
1341 | pci_conf = pci_dev->config; | |
1342 | pci_conf[PCI_INTERRUPT_PIN] = 1; | |
1343 | pci_config_set_prog_interface(pci_dev->config, 0x2); | |
1344 | pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS); | |
a3d25ddd | 1345 | pcie_endpoint_cap_init(pci_dev, 0x80); |
f3c507ad KB |
1346 | |
1347 | n->num_namespaces = 1; | |
26efcec1 | 1348 | n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4); |
f3c507ad KB |
1349 | n->ns_size = bs_size / (uint64_t)n->num_namespaces; |
1350 | ||
02c4f26b MA |
1351 | n->namespaces = g_new0(NvmeNamespace, n->num_namespaces); |
1352 | n->sq = g_new0(NvmeSQueue *, n->num_queues); | |
1353 | n->cq = g_new0(NvmeCQueue *, n->num_queues); | |
f3c507ad | 1354 | |
2d256e6f PB |
1355 | memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, |
1356 | "nvme", n->reg_size); | |
a3d25ddd | 1357 | pci_register_bar(pci_dev, 0, |
f3c507ad KB |
1358 | PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64, |
1359 | &n->iomem); | |
a3d25ddd | 1360 | msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL); |
f3c507ad KB |
1361 | |
1362 | id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID)); | |
1363 | id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID)); | |
1364 | strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' '); | |
1365 | strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' '); | |
1366 | strpadcpy((char *)id->sn, sizeof(id->sn), n->serial, ' '); | |
1367 | id->rab = 6; | |
1368 | id->ieee[0] = 0x00; | |
1369 | id->ieee[1] = 0x02; | |
1370 | id->ieee[2] = 0xb3; | |
1371 | id->oacs = cpu_to_le16(0); | |
1372 | id->frmw = 7 << 1; | |
1373 | id->lpa = 1 << 0; | |
1374 | id->sqes = (0x6 << 4) | 0x6; | |
1375 | id->cqes = (0x4 << 4) | 0x4; | |
1376 | id->nn = cpu_to_le32(n->num_namespaces); | |
3036a626 | 1377 | id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP); |
f3c507ad KB |
1378 | id->psd[0].mp = cpu_to_le16(0x9c4); |
1379 | id->psd[0].enlat = cpu_to_le32(0x10); | |
1380 | id->psd[0].exlat = cpu_to_le32(0x4); | |
30349fd0 CH |
1381 | if (blk_enable_write_cache(n->conf.blk)) { |
1382 | id->vwc = 1; | |
1383 | } | |
f3c507ad KB |
1384 | |
1385 | n->bar.cap = 0; | |
1386 | NVME_CAP_SET_MQES(n->bar.cap, 0x7ff); | |
1387 | NVME_CAP_SET_CQR(n->bar.cap, 1); | |
f3c507ad KB |
1388 | NVME_CAP_SET_TO(n->bar.cap, 0xf); |
1389 | NVME_CAP_SET_CSS(n->bar.cap, 1); | |
be0677a9 | 1390 | NVME_CAP_SET_MPSMAX(n->bar.cap, 4); |
f3c507ad | 1391 | |
a896f7f2 | 1392 | n->bar.vs = 0x00010200; |
f3c507ad KB |
1393 | n->bar.intmc = n->bar.intms = 0; |
1394 | ||
a896f7f2 SB |
1395 | if (n->cmb_size_mb) { |
1396 | ||
1397 | NVME_CMBLOC_SET_BIR(n->bar.cmbloc, 2); | |
1398 | NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0); | |
1399 | ||
1400 | NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1); | |
1401 | NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0); | |
1402 | NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0); | |
b2b2b67a SB |
1403 | NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1); |
1404 | NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1); | |
a896f7f2 SB |
1405 | NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */ |
1406 | NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->cmb_size_mb); | |
1407 | ||
b2b2b67a SB |
1408 | n->cmbloc = n->bar.cmbloc; |
1409 | n->cmbsz = n->bar.cmbsz; | |
1410 | ||
a896f7f2 SB |
1411 | n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz)); |
1412 | memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n, | |
1413 | "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz)); | |
a3d25ddd | 1414 | pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc), |
a896f7f2 SB |
1415 | PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 | |
1416 | PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem); | |
1417 | ||
1418 | } | |
1419 | ||
f3c507ad KB |
1420 | for (i = 0; i < n->num_namespaces; i++) { |
1421 | NvmeNamespace *ns = &n->namespaces[i]; | |
1422 | NvmeIdNs *id_ns = &ns->id_ns; | |
1423 | id_ns->nsfeat = 0; | |
1424 | id_ns->nlbaf = 0; | |
1425 | id_ns->flbas = 0; | |
1426 | id_ns->mc = 0; | |
1427 | id_ns->dpc = 0; | |
1428 | id_ns->dps = 0; | |
1429 | id_ns->lbaf[0].ds = BDRV_SECTOR_BITS; | |
1430 | id_ns->ncap = id_ns->nuse = id_ns->nsze = | |
1431 | cpu_to_le64(n->ns_size >> | |
1432 | id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds); | |
1433 | } | |
f3c507ad KB |
1434 | } |
1435 | ||
1436 | static void nvme_exit(PCIDevice *pci_dev) | |
1437 | { | |
1438 | NvmeCtrl *n = NVME(pci_dev); | |
1439 | ||
1440 | nvme_clear_ctrl(n); | |
1441 | g_free(n->namespaces); | |
1442 | g_free(n->cq); | |
1443 | g_free(n->sq); | |
a896f7f2 | 1444 | |
a883d6a0 LQ |
1445 | if (n->cmb_size_mb) { |
1446 | g_free(n->cmbuf); | |
1447 | } | |
f3c507ad | 1448 | msix_uninit_exclusive_bar(pci_dev); |
f3c507ad KB |
1449 | } |
1450 | ||
1451 | static Property nvme_props[] = { | |
1452 | DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf), | |
1453 | DEFINE_PROP_STRING("serial", NvmeCtrl, serial), | |
a896f7f2 | 1454 | DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, cmb_size_mb, 0), |
7c895269 | 1455 | DEFINE_PROP_UINT32("num_queues", NvmeCtrl, num_queues, 64), |
f3c507ad KB |
1456 | DEFINE_PROP_END_OF_LIST(), |
1457 | }; | |
1458 | ||
1459 | static const VMStateDescription nvme_vmstate = { | |
1460 | .name = "nvme", | |
1461 | .unmigratable = 1, | |
1462 | }; | |
1463 | ||
1464 | static void nvme_class_init(ObjectClass *oc, void *data) | |
1465 | { | |
1466 | DeviceClass *dc = DEVICE_CLASS(oc); | |
1467 | PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc); | |
1468 | ||
e01d6a41 | 1469 | pc->realize = nvme_realize; |
f3c507ad KB |
1470 | pc->exit = nvme_exit; |
1471 | pc->class_id = PCI_CLASS_STORAGE_EXPRESS; | |
1472 | pc->vendor_id = PCI_VENDOR_ID_INTEL; | |
1473 | pc->device_id = 0x5845; | |
47989f14 | 1474 | pc->revision = 2; |
f3c507ad | 1475 | |
125ee0ed | 1476 | set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); |
f3c507ad KB |
1477 | dc->desc = "Non-Volatile Memory Express"; |
1478 | dc->props = nvme_props; | |
1479 | dc->vmsd = &nvme_vmstate; | |
1480 | } | |
1481 | ||
a907ec52 | 1482 | static void nvme_instance_init(Object *obj) |
33739c71 GA |
1483 | { |
1484 | NvmeCtrl *s = NVME(obj); | |
33739c71 | 1485 | |
a907ec52 LE |
1486 | device_add_bootindex_property(obj, &s->conf.bootindex, |
1487 | "bootindex", "/namespace@1,0", | |
1488 | DEVICE(obj), &error_abort); | |
33739c71 GA |
1489 | } |
1490 | ||
f3c507ad | 1491 | static const TypeInfo nvme_info = { |
08db59e1 | 1492 | .name = TYPE_NVME, |
f3c507ad KB |
1493 | .parent = TYPE_PCI_DEVICE, |
1494 | .instance_size = sizeof(NvmeCtrl), | |
1495 | .class_init = nvme_class_init, | |
33739c71 | 1496 | .instance_init = nvme_instance_init, |
71d78767 EH |
1497 | .interfaces = (InterfaceInfo[]) { |
1498 | { INTERFACE_PCIE_DEVICE }, | |
1499 | { } | |
1500 | }, | |
f3c507ad KB |
1501 | }; |
1502 | ||
1503 | static void nvme_register_types(void) | |
1504 | { | |
1505 | type_register_static(&nvme_info); | |
1506 | } | |
1507 | ||
1508 | type_init(nvme_register_types) |