]>
Commit | Line | Data |
---|---|---|
982388ea ZZ |
1 | /* |
2 | * Copyright (C) 2017 NXP Semiconductors | |
3 | * Copyright (C) 2017 Bin Meng <[email protected]> | |
4 | * | |
5 | * SPDX-License-Identifier: GPL-2.0+ | |
6 | */ | |
7 | ||
8 | #include <common.h> | |
9 | #include <dm.h> | |
10 | #include <errno.h> | |
11 | #include <memalign.h> | |
12 | #include <pci.h> | |
13 | #include <dm/device-internal.h> | |
14 | #include "nvme.h" | |
15 | ||
16 | struct nvme_info *nvme_info; | |
17 | ||
18 | #define NVME_Q_DEPTH 2 | |
19 | #define NVME_AQ_DEPTH 2 | |
20 | #define NVME_SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) | |
21 | #define NVME_CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) | |
22 | #define ADMIN_TIMEOUT 60 | |
23 | #define IO_TIMEOUT 30 | |
24 | #define MAX_PRP_POOL 512 | |
25 | ||
26 | /* | |
27 | * An NVM Express queue. Each device has at least two (one for admin | |
28 | * commands and one for I/O commands). | |
29 | */ | |
30 | struct nvme_queue { | |
31 | struct nvme_dev *dev; | |
32 | struct nvme_command *sq_cmds; | |
33 | struct nvme_completion *cqes; | |
34 | wait_queue_head_t sq_full; | |
35 | u32 __iomem *q_db; | |
36 | u16 q_depth; | |
37 | s16 cq_vector; | |
38 | u16 sq_head; | |
39 | u16 sq_tail; | |
40 | u16 cq_head; | |
41 | u16 qid; | |
42 | u8 cq_phase; | |
43 | u8 cqe_seen; | |
44 | unsigned long cmdid_data[]; | |
45 | }; | |
46 | ||
47 | static int nvme_wait_ready(struct nvme_dev *dev, bool enabled) | |
48 | { | |
49 | u32 bit = enabled ? NVME_CSTS_RDY : 0; | |
04d2a384 BM |
50 | int timeout; |
51 | ulong start; | |
982388ea | 52 | |
04d2a384 BM |
53 | /* Timeout field in the CAP register is in 500 millisecond units */ |
54 | timeout = NVME_CAP_TIMEOUT(dev->cap) * 500; | |
982388ea | 55 | |
04d2a384 BM |
56 | start = get_timer(0); |
57 | while (get_timer(start) < timeout) { | |
58 | if ((readl(&dev->bar->csts) & NVME_CSTS_RDY) == bit) | |
59 | return 0; | |
60 | } | |
61 | ||
62 | return -ETIME; | |
982388ea ZZ |
63 | } |
64 | ||
65 | static int nvme_setup_prps(struct nvme_dev *dev, u64 *prp2, | |
66 | int total_len, u64 dma_addr) | |
67 | { | |
68 | u32 page_size = dev->page_size; | |
69 | int offset = dma_addr & (page_size - 1); | |
70 | u64 *prp_pool; | |
71 | int length = total_len; | |
72 | int i, nprps; | |
73 | length -= (page_size - offset); | |
74 | ||
75 | if (length <= 0) { | |
76 | *prp2 = 0; | |
77 | return 0; | |
78 | } | |
79 | ||
80 | if (length) | |
81 | dma_addr += (page_size - offset); | |
82 | ||
83 | if (length <= page_size) { | |
84 | *prp2 = dma_addr; | |
85 | return 0; | |
86 | } | |
87 | ||
88 | nprps = DIV_ROUND_UP(length, page_size); | |
89 | ||
90 | if (nprps > dev->prp_entry_num) { | |
91 | free(dev->prp_pool); | |
92 | dev->prp_pool = malloc(nprps << 3); | |
93 | if (!dev->prp_pool) { | |
94 | printf("Error: malloc prp_pool fail\n"); | |
95 | return -ENOMEM; | |
96 | } | |
97 | dev->prp_entry_num = nprps; | |
98 | } | |
99 | ||
100 | prp_pool = dev->prp_pool; | |
101 | i = 0; | |
102 | while (nprps) { | |
103 | if (i == ((page_size >> 3) - 1)) { | |
104 | *(prp_pool + i) = cpu_to_le64((ulong)prp_pool + | |
105 | page_size); | |
106 | i = 0; | |
107 | prp_pool += page_size; | |
108 | } | |
109 | *(prp_pool + i++) = cpu_to_le64(dma_addr); | |
110 | dma_addr += page_size; | |
111 | nprps--; | |
112 | } | |
113 | *prp2 = (ulong)dev->prp_pool; | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
118 | static __le16 nvme_get_cmd_id(void) | |
119 | { | |
120 | static unsigned short cmdid; | |
121 | ||
122 | return cpu_to_le16((cmdid < USHRT_MAX) ? cmdid++ : 0); | |
123 | } | |
124 | ||
125 | static u16 nvme_read_completion_status(struct nvme_queue *nvmeq, u16 index) | |
126 | { | |
127 | u64 start = (ulong)&nvmeq->cqes[index]; | |
128 | u64 stop = start + sizeof(struct nvme_completion); | |
129 | ||
130 | invalidate_dcache_range(start, stop); | |
131 | ||
132 | return le16_to_cpu(readw(&(nvmeq->cqes[index].status))); | |
133 | } | |
134 | ||
135 | /** | |
136 | * nvme_submit_cmd() - copy a command into a queue and ring the doorbell | |
137 | * | |
138 | * @nvmeq: The queue to use | |
139 | * @cmd: The command to send | |
140 | */ | |
141 | static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) | |
142 | { | |
143 | u16 tail = nvmeq->sq_tail; | |
144 | ||
145 | memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd)); | |
146 | flush_dcache_range((ulong)&nvmeq->sq_cmds[tail], | |
147 | (ulong)&nvmeq->sq_cmds[tail] + sizeof(*cmd)); | |
148 | ||
149 | if (++tail == nvmeq->q_depth) | |
150 | tail = 0; | |
151 | writel(tail, nvmeq->q_db); | |
152 | nvmeq->sq_tail = tail; | |
153 | } | |
154 | ||
155 | static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, | |
156 | struct nvme_command *cmd, | |
157 | u32 *result, unsigned timeout) | |
158 | { | |
159 | u16 head = nvmeq->cq_head; | |
160 | u16 phase = nvmeq->cq_phase; | |
161 | u16 status; | |
162 | ulong start_time; | |
163 | ulong timeout_us = timeout * 100000; | |
164 | ||
165 | cmd->common.command_id = nvme_get_cmd_id(); | |
166 | nvme_submit_cmd(nvmeq, cmd); | |
167 | ||
168 | start_time = timer_get_us(); | |
169 | ||
170 | for (;;) { | |
171 | status = nvme_read_completion_status(nvmeq, head); | |
172 | if ((status & 0x01) == phase) | |
173 | break; | |
174 | if (timeout_us > 0 && (timer_get_us() - start_time) | |
175 | >= timeout_us) | |
176 | return -ETIMEDOUT; | |
177 | } | |
178 | ||
179 | status >>= 1; | |
180 | if (status) { | |
181 | printf("ERROR: status = %x, phase = %d, head = %d\n", | |
182 | status, phase, head); | |
183 | status = 0; | |
184 | if (++head == nvmeq->q_depth) { | |
185 | head = 0; | |
186 | phase = !phase; | |
187 | } | |
188 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | |
189 | nvmeq->cq_head = head; | |
190 | nvmeq->cq_phase = phase; | |
191 | ||
192 | return -EIO; | |
193 | } | |
194 | ||
195 | if (result) | |
196 | *result = le32_to_cpu(readl(&(nvmeq->cqes[head].result))); | |
197 | ||
198 | if (++head == nvmeq->q_depth) { | |
199 | head = 0; | |
200 | phase = !phase; | |
201 | } | |
202 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | |
203 | nvmeq->cq_head = head; | |
204 | nvmeq->cq_phase = phase; | |
205 | ||
206 | return status; | |
207 | } | |
208 | ||
209 | static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, | |
210 | u32 *result) | |
211 | { | |
212 | return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); | |
213 | } | |
214 | ||
215 | static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, | |
216 | int qid, int depth) | |
217 | { | |
218 | struct nvme_queue *nvmeq = malloc(sizeof(*nvmeq)); | |
219 | if (!nvmeq) | |
220 | return NULL; | |
221 | memset(nvmeq, 0, sizeof(*nvmeq)); | |
222 | ||
223 | nvmeq->cqes = (void *)memalign(4096, NVME_CQ_SIZE(depth)); | |
224 | if (!nvmeq->cqes) | |
225 | goto free_nvmeq; | |
226 | memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(depth)); | |
227 | ||
228 | nvmeq->sq_cmds = (void *)memalign(4096, NVME_SQ_SIZE(depth)); | |
229 | if (!nvmeq->sq_cmds) | |
230 | goto free_queue; | |
231 | memset((void *)nvmeq->sq_cmds, 0, NVME_SQ_SIZE(depth)); | |
232 | ||
233 | nvmeq->dev = dev; | |
234 | ||
235 | nvmeq->cq_head = 0; | |
236 | nvmeq->cq_phase = 1; | |
237 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | |
238 | nvmeq->q_depth = depth; | |
239 | nvmeq->qid = qid; | |
240 | dev->queue_count++; | |
241 | dev->queues[qid] = nvmeq; | |
242 | ||
243 | return nvmeq; | |
244 | ||
245 | free_queue: | |
246 | free((void *)nvmeq->cqes); | |
247 | free_nvmeq: | |
248 | free(nvmeq); | |
249 | ||
250 | return NULL; | |
251 | } | |
252 | ||
253 | static int nvme_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) | |
254 | { | |
255 | struct nvme_command c; | |
256 | ||
257 | memset(&c, 0, sizeof(c)); | |
258 | c.delete_queue.opcode = opcode; | |
259 | c.delete_queue.qid = cpu_to_le16(id); | |
260 | ||
261 | return nvme_submit_admin_cmd(dev, &c, NULL); | |
262 | } | |
263 | ||
264 | static int nvme_delete_sq(struct nvme_dev *dev, u16 sqid) | |
265 | { | |
266 | return nvme_delete_queue(dev, nvme_admin_delete_sq, sqid); | |
267 | } | |
268 | ||
269 | static int nvme_delete_cq(struct nvme_dev *dev, u16 cqid) | |
270 | { | |
271 | return nvme_delete_queue(dev, nvme_admin_delete_cq, cqid); | |
272 | } | |
273 | ||
274 | static int nvme_enable_ctrl(struct nvme_dev *dev) | |
275 | { | |
276 | dev->ctrl_config &= ~NVME_CC_SHN_MASK; | |
277 | dev->ctrl_config |= NVME_CC_ENABLE; | |
278 | writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc); | |
279 | ||
280 | return nvme_wait_ready(dev, true); | |
281 | } | |
282 | ||
283 | static int nvme_disable_ctrl(struct nvme_dev *dev) | |
284 | { | |
285 | dev->ctrl_config &= ~NVME_CC_SHN_MASK; | |
286 | dev->ctrl_config &= ~NVME_CC_ENABLE; | |
287 | writel(cpu_to_le32(dev->ctrl_config), &dev->bar->cc); | |
288 | ||
289 | return nvme_wait_ready(dev, false); | |
290 | } | |
291 | ||
292 | static void nvme_free_queue(struct nvme_queue *nvmeq) | |
293 | { | |
294 | free((void *)nvmeq->cqes); | |
295 | free(nvmeq->sq_cmds); | |
296 | free(nvmeq); | |
297 | } | |
298 | ||
299 | static void nvme_free_queues(struct nvme_dev *dev, int lowest) | |
300 | { | |
301 | int i; | |
302 | ||
303 | for (i = dev->queue_count - 1; i >= lowest; i--) { | |
304 | struct nvme_queue *nvmeq = dev->queues[i]; | |
305 | dev->queue_count--; | |
306 | dev->queues[i] = NULL; | |
307 | nvme_free_queue(nvmeq); | |
308 | } | |
309 | } | |
310 | ||
311 | static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) | |
312 | { | |
313 | struct nvme_dev *dev = nvmeq->dev; | |
314 | ||
315 | nvmeq->sq_tail = 0; | |
316 | nvmeq->cq_head = 0; | |
317 | nvmeq->cq_phase = 1; | |
318 | nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; | |
319 | memset((void *)nvmeq->cqes, 0, NVME_CQ_SIZE(nvmeq->q_depth)); | |
320 | flush_dcache_range((ulong)nvmeq->cqes, | |
321 | (ulong)nvmeq->cqes + NVME_CQ_SIZE(nvmeq->q_depth)); | |
322 | dev->online_queues++; | |
323 | } | |
324 | ||
325 | static int nvme_configure_admin_queue(struct nvme_dev *dev) | |
326 | { | |
327 | int result; | |
328 | u32 aqa; | |
b65c6921 | 329 | u64 cap = dev->cap; |
982388ea ZZ |
330 | struct nvme_queue *nvmeq; |
331 | /* most architectures use 4KB as the page size */ | |
332 | unsigned page_shift = 12; | |
333 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; | |
334 | unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; | |
335 | ||
336 | if (page_shift < dev_page_min) { | |
337 | debug("Device minimum page size (%u) too large for host (%u)\n", | |
338 | 1 << dev_page_min, 1 << page_shift); | |
339 | return -ENODEV; | |
340 | } | |
341 | ||
342 | if (page_shift > dev_page_max) { | |
343 | debug("Device maximum page size (%u) smaller than host (%u)\n", | |
344 | 1 << dev_page_max, 1 << page_shift); | |
345 | page_shift = dev_page_max; | |
346 | } | |
347 | ||
348 | result = nvme_disable_ctrl(dev); | |
349 | if (result < 0) | |
350 | return result; | |
351 | ||
352 | nvmeq = dev->queues[0]; | |
353 | if (!nvmeq) { | |
354 | nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); | |
355 | if (!nvmeq) | |
356 | return -ENOMEM; | |
357 | } | |
358 | ||
359 | aqa = nvmeq->q_depth - 1; | |
360 | aqa |= aqa << 16; | |
361 | aqa |= aqa << 16; | |
362 | ||
363 | dev->page_size = 1 << page_shift; | |
364 | ||
365 | dev->ctrl_config = NVME_CC_CSS_NVM; | |
366 | dev->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; | |
367 | dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; | |
368 | dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; | |
369 | ||
370 | writel(aqa, &dev->bar->aqa); | |
371 | nvme_writeq((ulong)nvmeq->sq_cmds, &dev->bar->asq); | |
372 | nvme_writeq((ulong)nvmeq->cqes, &dev->bar->acq); | |
373 | ||
374 | result = nvme_enable_ctrl(dev); | |
375 | if (result) | |
376 | goto free_nvmeq; | |
377 | ||
378 | nvmeq->cq_vector = 0; | |
379 | ||
380 | nvme_init_queue(dev->queues[0], 0); | |
381 | ||
382 | return result; | |
383 | ||
384 | free_nvmeq: | |
385 | nvme_free_queues(dev, 0); | |
386 | ||
387 | return result; | |
388 | } | |
389 | ||
390 | static int nvme_alloc_cq(struct nvme_dev *dev, u16 qid, | |
391 | struct nvme_queue *nvmeq) | |
392 | { | |
393 | struct nvme_command c; | |
394 | int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED; | |
395 | ||
396 | memset(&c, 0, sizeof(c)); | |
397 | c.create_cq.opcode = nvme_admin_create_cq; | |
398 | c.create_cq.prp1 = cpu_to_le64((ulong)nvmeq->cqes); | |
399 | c.create_cq.cqid = cpu_to_le16(qid); | |
400 | c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); | |
401 | c.create_cq.cq_flags = cpu_to_le16(flags); | |
402 | c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector); | |
403 | ||
404 | return nvme_submit_admin_cmd(dev, &c, NULL); | |
405 | } | |
406 | ||
407 | static int nvme_alloc_sq(struct nvme_dev *dev, u16 qid, | |
408 | struct nvme_queue *nvmeq) | |
409 | { | |
410 | struct nvme_command c; | |
411 | int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM; | |
412 | ||
413 | memset(&c, 0, sizeof(c)); | |
414 | c.create_sq.opcode = nvme_admin_create_sq; | |
415 | c.create_sq.prp1 = cpu_to_le64((ulong)nvmeq->sq_cmds); | |
416 | c.create_sq.sqid = cpu_to_le16(qid); | |
417 | c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); | |
418 | c.create_sq.sq_flags = cpu_to_le16(flags); | |
419 | c.create_sq.cqid = cpu_to_le16(qid); | |
420 | ||
421 | return nvme_submit_admin_cmd(dev, &c, NULL); | |
422 | } | |
423 | ||
424 | int nvme_identify(struct nvme_dev *dev, unsigned nsid, | |
425 | unsigned cns, dma_addr_t dma_addr) | |
426 | { | |
427 | struct nvme_command c; | |
428 | u32 page_size = dev->page_size; | |
429 | int offset = dma_addr & (page_size - 1); | |
430 | int length = sizeof(struct nvme_id_ctrl); | |
431 | ||
432 | memset(&c, 0, sizeof(c)); | |
433 | c.identify.opcode = nvme_admin_identify; | |
434 | c.identify.nsid = cpu_to_le32(nsid); | |
435 | c.identify.prp1 = cpu_to_le64(dma_addr); | |
436 | ||
437 | length -= (page_size - offset); | |
438 | if (length <= 0) { | |
439 | c.identify.prp2 = 0; | |
440 | } else { | |
441 | dma_addr += (page_size - offset); | |
3e185629 | 442 | c.identify.prp2 = cpu_to_le64(dma_addr); |
982388ea ZZ |
443 | } |
444 | ||
445 | c.identify.cns = cpu_to_le32(cns); | |
446 | ||
447 | return nvme_submit_admin_cmd(dev, &c, NULL); | |
448 | } | |
449 | ||
450 | int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, | |
451 | dma_addr_t dma_addr, u32 *result) | |
452 | { | |
453 | struct nvme_command c; | |
454 | ||
455 | memset(&c, 0, sizeof(c)); | |
456 | c.features.opcode = nvme_admin_get_features; | |
457 | c.features.nsid = cpu_to_le32(nsid); | |
458 | c.features.prp1 = cpu_to_le64(dma_addr); | |
459 | c.features.fid = cpu_to_le32(fid); | |
460 | ||
461 | return nvme_submit_admin_cmd(dev, &c, result); | |
462 | } | |
463 | ||
464 | int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, | |
465 | dma_addr_t dma_addr, u32 *result) | |
466 | { | |
467 | struct nvme_command c; | |
468 | ||
469 | memset(&c, 0, sizeof(c)); | |
470 | c.features.opcode = nvme_admin_set_features; | |
471 | c.features.prp1 = cpu_to_le64(dma_addr); | |
472 | c.features.fid = cpu_to_le32(fid); | |
473 | c.features.dword11 = cpu_to_le32(dword11); | |
474 | ||
475 | return nvme_submit_admin_cmd(dev, &c, result); | |
476 | } | |
477 | ||
478 | static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) | |
479 | { | |
480 | struct nvme_dev *dev = nvmeq->dev; | |
481 | int result; | |
482 | ||
483 | nvmeq->cq_vector = qid - 1; | |
484 | result = nvme_alloc_cq(dev, qid, nvmeq); | |
485 | if (result < 0) | |
486 | goto release_cq; | |
487 | ||
488 | result = nvme_alloc_sq(dev, qid, nvmeq); | |
489 | if (result < 0) | |
490 | goto release_sq; | |
491 | ||
492 | nvme_init_queue(nvmeq, qid); | |
493 | ||
494 | return result; | |
495 | ||
496 | release_sq: | |
497 | nvme_delete_sq(dev, qid); | |
498 | release_cq: | |
499 | nvme_delete_cq(dev, qid); | |
500 | ||
501 | return result; | |
502 | } | |
503 | ||
504 | static int nvme_set_queue_count(struct nvme_dev *dev, int count) | |
505 | { | |
506 | int status; | |
507 | u32 result; | |
508 | u32 q_count = (count - 1) | ((count - 1) << 16); | |
509 | ||
510 | status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, | |
511 | q_count, 0, &result); | |
512 | ||
513 | if (status < 0) | |
514 | return status; | |
515 | if (status > 1) | |
516 | return 0; | |
517 | ||
518 | return min(result & 0xffff, result >> 16) + 1; | |
519 | } | |
520 | ||
521 | static void nvme_create_io_queues(struct nvme_dev *dev) | |
522 | { | |
523 | unsigned int i; | |
524 | ||
525 | for (i = dev->queue_count; i <= dev->max_qid; i++) | |
526 | if (!nvme_alloc_queue(dev, i, dev->q_depth)) | |
527 | break; | |
528 | ||
529 | for (i = dev->online_queues; i <= dev->queue_count - 1; i++) | |
530 | if (nvme_create_queue(dev->queues[i], i)) | |
531 | break; | |
532 | } | |
533 | ||
534 | static int nvme_setup_io_queues(struct nvme_dev *dev) | |
535 | { | |
536 | int nr_io_queues; | |
537 | int result; | |
538 | ||
539 | nr_io_queues = 1; | |
540 | result = nvme_set_queue_count(dev, nr_io_queues); | |
541 | if (result <= 0) | |
542 | return result; | |
543 | ||
544 | if (result < nr_io_queues) | |
545 | nr_io_queues = result; | |
546 | ||
547 | dev->max_qid = nr_io_queues; | |
548 | ||
549 | /* Free previously allocated queues */ | |
550 | nvme_free_queues(dev, nr_io_queues + 1); | |
551 | nvme_create_io_queues(dev); | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
556 | static int nvme_get_info_from_identify(struct nvme_dev *dev) | |
557 | { | |
982388ea ZZ |
558 | struct nvme_id_ctrl buf, *ctrl = &buf; |
559 | int ret; | |
b65c6921 | 560 | int shift = NVME_CAP_MPSMIN(dev->cap) + 12; |
982388ea ZZ |
561 | |
562 | ret = nvme_identify(dev, 0, 1, (dma_addr_t)ctrl); | |
563 | if (ret) | |
564 | return -EIO; | |
565 | ||
566 | dev->nn = le32_to_cpu(ctrl->nn); | |
567 | dev->vwc = ctrl->vwc; | |
568 | memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); | |
569 | memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); | |
570 | memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); | |
571 | if (ctrl->mdts) | |
572 | dev->max_transfer_shift = (ctrl->mdts + shift); | |
beb5f521 BM |
573 | else { |
574 | /* | |
575 | * Maximum Data Transfer Size (MDTS) field indicates the maximum | |
576 | * data transfer size between the host and the controller. The | |
577 | * host should not submit a command that exceeds this transfer | |
578 | * size. The value is in units of the minimum memory page size | |
579 | * and is reported as a power of two (2^n). | |
580 | * | |
581 | * The spec also says: a value of 0h indicates no restrictions | |
582 | * on transfer size. But in nvme_blk_read/write() below we have | |
583 | * the following algorithm for maximum number of logic blocks | |
584 | * per transfer: | |
585 | * | |
586 | * u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); | |
587 | * | |
588 | * In order for lbas not to overflow, the maximum number is 15 | |
589 | * which means dev->max_transfer_shift = 15 + 9 (ns->lba_shift). | |
590 | * Let's use 20 which provides 1MB size. | |
591 | */ | |
592 | dev->max_transfer_shift = 20; | |
593 | } | |
982388ea | 594 | |
982388ea ZZ |
595 | return 0; |
596 | } | |
597 | ||
598 | int nvme_scan_namespace(void) | |
599 | { | |
600 | struct uclass *uc; | |
601 | struct udevice *dev; | |
602 | int ret; | |
603 | ||
604 | ret = uclass_get(UCLASS_NVME, &uc); | |
605 | if (ret) | |
606 | return ret; | |
607 | ||
608 | uclass_foreach_dev(dev, uc) { | |
609 | ret = device_probe(dev); | |
610 | if (ret) | |
611 | return ret; | |
612 | } | |
613 | ||
614 | return 0; | |
615 | } | |
616 | ||
617 | static int nvme_blk_probe(struct udevice *udev) | |
618 | { | |
619 | struct nvme_dev *ndev = dev_get_priv(udev->parent); | |
620 | struct blk_desc *desc = dev_get_uclass_platdata(udev); | |
621 | struct nvme_ns *ns = dev_get_priv(udev); | |
622 | u8 flbas; | |
982388ea | 623 | struct nvme_id_ns buf, *id = &buf; |
e5dc2d26 | 624 | struct pci_child_platdata *pplat; |
982388ea ZZ |
625 | |
626 | memset(ns, 0, sizeof(*ns)); | |
627 | ns->dev = ndev; | |
628 | ns->ns_id = desc->devnum - ndev->blk_dev_start + 1; | |
629 | if (nvme_identify(ndev, ns->ns_id, 0, (dma_addr_t)id)) | |
630 | return -EIO; | |
631 | ||
632 | flbas = id->flbas & NVME_NS_FLBAS_LBA_MASK; | |
633 | ns->flbas = flbas; | |
634 | ns->lba_shift = id->lbaf[flbas].ds; | |
f81d83d5 | 635 | ns->mode_select_num_blocks = le64_to_cpu(id->nsze); |
982388ea ZZ |
636 | ns->mode_select_block_len = 1 << ns->lba_shift; |
637 | list_add(&ns->list, &ndev->namespaces); | |
638 | ||
639 | desc->lba = ns->mode_select_num_blocks; | |
640 | desc->log2blksz = ns->lba_shift; | |
641 | desc->blksz = 1 << ns->lba_shift; | |
642 | desc->bdev = udev; | |
e5dc2d26 BM |
643 | pplat = dev_get_parent_platdata(udev->parent); |
644 | sprintf(desc->vendor, "0x%.4x", pplat->vendor); | |
982388ea ZZ |
645 | memcpy(desc->product, ndev->serial, sizeof(ndev->serial)); |
646 | memcpy(desc->revision, ndev->firmware_rev, sizeof(ndev->firmware_rev)); | |
647 | part_init(desc); | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
652 | static ulong nvme_blk_read(struct udevice *udev, lbaint_t blknr, | |
653 | lbaint_t blkcnt, void *buffer) | |
654 | { | |
655 | struct nvme_ns *ns = dev_get_priv(udev); | |
656 | struct nvme_dev *dev = ns->dev; | |
657 | struct nvme_command c; | |
658 | struct blk_desc *desc = dev_get_uclass_platdata(udev); | |
659 | int status; | |
660 | u64 prp2; | |
661 | u64 total_len = blkcnt << desc->log2blksz; | |
662 | u64 temp_len = total_len; | |
663 | ||
664 | u64 slba = blknr; | |
665 | u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); | |
666 | u64 total_lbas = blkcnt; | |
667 | ||
668 | c.rw.opcode = nvme_cmd_read; | |
669 | c.rw.flags = 0; | |
670 | c.rw.nsid = cpu_to_le32(ns->ns_id); | |
671 | c.rw.control = 0; | |
672 | c.rw.dsmgmt = 0; | |
673 | c.rw.reftag = 0; | |
674 | c.rw.apptag = 0; | |
675 | c.rw.appmask = 0; | |
676 | c.rw.metadata = 0; | |
677 | ||
678 | while (total_lbas) { | |
679 | if (total_lbas < lbas) { | |
680 | lbas = (u16)total_lbas; | |
681 | total_lbas = 0; | |
682 | } else { | |
683 | total_lbas -= lbas; | |
684 | } | |
685 | ||
686 | if (nvme_setup_prps | |
687 | (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer)) | |
688 | return -EIO; | |
689 | c.rw.slba = cpu_to_le64(slba); | |
690 | slba += lbas; | |
691 | c.rw.length = cpu_to_le16(lbas - 1); | |
692 | c.rw.prp1 = cpu_to_le64((ulong)buffer); | |
693 | c.rw.prp2 = cpu_to_le64(prp2); | |
694 | status = nvme_submit_sync_cmd(dev->queues[1], | |
695 | &c, NULL, IO_TIMEOUT); | |
696 | if (status) | |
697 | break; | |
698 | temp_len -= lbas << ns->lba_shift; | |
699 | buffer += lbas << ns->lba_shift; | |
700 | } | |
701 | ||
702 | return (total_len - temp_len) >> desc->log2blksz; | |
703 | } | |
704 | ||
705 | static ulong nvme_blk_write(struct udevice *udev, lbaint_t blknr, | |
706 | lbaint_t blkcnt, const void *buffer) | |
707 | { | |
708 | struct nvme_ns *ns = dev_get_priv(udev); | |
709 | struct nvme_dev *dev = ns->dev; | |
710 | struct nvme_command c; | |
711 | struct blk_desc *desc = dev_get_uclass_platdata(udev); | |
712 | int status; | |
713 | u64 prp2; | |
714 | u64 total_len = blkcnt << desc->log2blksz; | |
715 | u64 temp_len = total_len; | |
716 | ||
717 | u64 slba = blknr; | |
718 | u16 lbas = 1 << (dev->max_transfer_shift - ns->lba_shift); | |
719 | u64 total_lbas = blkcnt; | |
720 | ||
721 | c.rw.opcode = nvme_cmd_write; | |
722 | c.rw.flags = 0; | |
723 | c.rw.nsid = cpu_to_le32(ns->ns_id); | |
724 | c.rw.control = 0; | |
725 | c.rw.dsmgmt = 0; | |
726 | c.rw.reftag = 0; | |
727 | c.rw.apptag = 0; | |
728 | c.rw.appmask = 0; | |
729 | c.rw.metadata = 0; | |
730 | ||
731 | while (total_lbas) { | |
732 | if (total_lbas < lbas) { | |
733 | lbas = (u16)total_lbas; | |
734 | total_lbas = 0; | |
735 | } else { | |
736 | total_lbas -= lbas; | |
737 | } | |
738 | ||
739 | if (nvme_setup_prps | |
740 | (dev, &prp2, lbas << ns->lba_shift, (ulong)buffer)) | |
741 | return -EIO; | |
742 | c.rw.slba = cpu_to_le64(slba); | |
743 | slba += lbas; | |
744 | c.rw.length = cpu_to_le16(lbas - 1); | |
745 | c.rw.prp1 = cpu_to_le64((ulong)buffer); | |
746 | c.rw.prp2 = cpu_to_le64(prp2); | |
747 | status = nvme_submit_sync_cmd(dev->queues[1], | |
748 | &c, NULL, IO_TIMEOUT); | |
749 | if (status) | |
750 | break; | |
751 | temp_len -= lbas << ns->lba_shift; | |
752 | buffer += lbas << ns->lba_shift; | |
753 | } | |
754 | ||
755 | return (total_len - temp_len) >> desc->log2blksz; | |
756 | } | |
757 | ||
758 | static const struct blk_ops nvme_blk_ops = { | |
759 | .read = nvme_blk_read, | |
760 | .write = nvme_blk_write, | |
761 | }; | |
762 | ||
763 | U_BOOT_DRIVER(nvme_blk) = { | |
764 | .name = "nvme-blk", | |
765 | .id = UCLASS_BLK, | |
766 | .probe = nvme_blk_probe, | |
767 | .ops = &nvme_blk_ops, | |
768 | .priv_auto_alloc_size = sizeof(struct nvme_ns), | |
769 | }; | |
770 | ||
771 | static int nvme_bind(struct udevice *udev) | |
772 | { | |
773 | char name[20]; | |
774 | sprintf(name, "nvme#%d", nvme_info->ndev_num++); | |
775 | ||
776 | return device_set_name(udev, name); | |
777 | } | |
778 | ||
779 | static int nvme_probe(struct udevice *udev) | |
780 | { | |
781 | int ret; | |
782 | struct nvme_dev *ndev = dev_get_priv(udev); | |
982388ea | 783 | |
982388ea ZZ |
784 | ndev->instance = trailing_strtol(udev->name); |
785 | ||
786 | INIT_LIST_HEAD(&ndev->namespaces); | |
787 | ndev->bar = dm_pci_map_bar(udev, PCI_BASE_ADDRESS_0, | |
788 | PCI_REGION_MEM); | |
789 | if (readl(&ndev->bar->csts) == -1) { | |
790 | ret = -ENODEV; | |
791 | printf("Error: %s: Out of memory!\n", udev->name); | |
792 | goto free_nvme; | |
793 | } | |
794 | ||
099c2015 | 795 | ndev->queues = malloc(2 * sizeof(struct nvme_queue *)); |
982388ea ZZ |
796 | if (!ndev->queues) { |
797 | ret = -ENOMEM; | |
798 | printf("Error: %s: Out of memory!\n", udev->name); | |
799 | goto free_nvme; | |
800 | } | |
099c2015 | 801 | memset(ndev->queues, 0, sizeof(2 * sizeof(struct nvme_queue *))); |
982388ea ZZ |
802 | |
803 | ndev->prp_pool = malloc(MAX_PRP_POOL); | |
804 | if (!ndev->prp_pool) { | |
805 | ret = -ENOMEM; | |
806 | printf("Error: %s: Out of memory!\n", udev->name); | |
807 | goto free_nvme; | |
808 | } | |
809 | ndev->prp_entry_num = MAX_PRP_POOL >> 3; | |
810 | ||
b65c6921 BM |
811 | ndev->cap = nvme_readq(&ndev->bar->cap); |
812 | ndev->q_depth = min_t(int, NVME_CAP_MQES(ndev->cap) + 1, NVME_Q_DEPTH); | |
813 | ndev->db_stride = 1 << NVME_CAP_STRIDE(ndev->cap); | |
982388ea ZZ |
814 | ndev->dbs = ((void __iomem *)ndev->bar) + 4096; |
815 | ||
816 | ret = nvme_configure_admin_queue(ndev); | |
817 | if (ret) | |
818 | goto free_queue; | |
819 | ||
820 | ret = nvme_setup_io_queues(ndev); | |
821 | if (ret) | |
822 | goto free_queue; | |
823 | ||
824 | nvme_get_info_from_identify(ndev); | |
825 | ndev->blk_dev_start = nvme_info->ns_num; | |
826 | list_add(&ndev->node, &nvme_info->dev_list); | |
827 | ||
828 | return 0; | |
829 | ||
830 | free_queue: | |
831 | free((void *)ndev->queues); | |
832 | free_nvme: | |
833 | return ret; | |
834 | } | |
835 | ||
836 | U_BOOT_DRIVER(nvme) = { | |
837 | .name = "nvme", | |
838 | .id = UCLASS_NVME, | |
839 | .bind = nvme_bind, | |
840 | .probe = nvme_probe, | |
841 | .priv_auto_alloc_size = sizeof(struct nvme_dev), | |
842 | }; | |
843 | ||
844 | struct pci_device_id nvme_supported[] = { | |
0deb9131 | 845 | { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, ~0) }, |
982388ea ZZ |
846 | {} |
847 | }; | |
848 | ||
849 | U_BOOT_PCI_DEVICE(nvme, nvme_supported); |