]>
Commit | Line | Data |
---|---|---|
bc50ad75 | 1 | // SPDX-License-Identifier: GPL-2.0 |
21d34711 CH |
2 | /* |
3 | * NVM Express device driver | |
4 | * Copyright (c) 2011-2014, Intel Corporation. | |
21d34711 CH |
5 | */ |
6 | ||
7 | #include <linux/blkdev.h> | |
8 | #include <linux/blk-mq.h> | |
5fd4ce1b | 9 | #include <linux/delay.h> |
21d34711 | 10 | #include <linux/errno.h> |
1673f1f0 | 11 | #include <linux/hdreg.h> |
21d34711 | 12 | #include <linux/kernel.h> |
5bae7f73 | 13 | #include <linux/module.h> |
958f2a0f | 14 | #include <linux/backing-dev.h> |
5bae7f73 | 15 | #include <linux/list_sort.h> |
21d34711 CH |
16 | #include <linux/slab.h> |
17 | #include <linux/types.h> | |
1673f1f0 CH |
18 | #include <linux/pr.h> |
19 | #include <linux/ptrace.h> | |
20 | #include <linux/nvme_ioctl.h> | |
21 | #include <linux/t10-pi.h> | |
c5552fde | 22 | #include <linux/pm_qos.h> |
1673f1f0 | 23 | #include <asm/unaligned.h> |
21d34711 CH |
24 | |
25 | #include "nvme.h" | |
038bd4cb | 26 | #include "fabrics.h" |
21d34711 | 27 | |
35fe0d12 HR |
28 | #define CREATE_TRACE_POINTS |
29 | #include "trace.h" | |
30 | ||
f3ca80fc CH |
31 | #define NVME_MINORS (1U << MINORBITS) |
32 | ||
8ae4e447 MO |
33 | unsigned int admin_timeout = 60; |
34 | module_param(admin_timeout, uint, 0644); | |
ba0ba7d3 | 35 | MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands"); |
576d55d6 | 36 | EXPORT_SYMBOL_GPL(admin_timeout); |
ba0ba7d3 | 37 | |
8ae4e447 MO |
38 | unsigned int nvme_io_timeout = 30; |
39 | module_param_named(io_timeout, nvme_io_timeout, uint, 0644); | |
ba0ba7d3 | 40 | MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); |
576d55d6 | 41 | EXPORT_SYMBOL_GPL(nvme_io_timeout); |
ba0ba7d3 | 42 | |
b3b1b0b0 | 43 | static unsigned char shutdown_timeout = 5; |
ba0ba7d3 ML |
44 | module_param(shutdown_timeout, byte, 0644); |
45 | MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); | |
46 | ||
44e44b29 CH |
47 | static u8 nvme_max_retries = 5; |
48 | module_param_named(max_retries, nvme_max_retries, byte, 0644); | |
f80ec966 | 49 | MODULE_PARM_DESC(max_retries, "max number of retries a command may have"); |
5bae7f73 | 50 | |
9947d6a0 | 51 | static unsigned long default_ps_max_latency_us = 100000; |
c5552fde AL |
52 | module_param(default_ps_max_latency_us, ulong, 0644); |
53 | MODULE_PARM_DESC(default_ps_max_latency_us, | |
54 | "max power saving latency for new devices; use PM QOS to change per device"); | |
55 | ||
c35e30b4 AL |
56 | static bool force_apst; |
57 | module_param(force_apst, bool, 0644); | |
58 | MODULE_PARM_DESC(force_apst, "allow APST for newly enumerated devices even if quirked off"); | |
59 | ||
f5d11840 JA |
60 | static bool streams; |
61 | module_param(streams, bool, 0644); | |
62 | MODULE_PARM_DESC(streams, "turn on support for Streams write directives"); | |
63 | ||
b227c59b RS |
64 | /* |
65 | * nvme_wq - hosts nvme related works that are not reset or delete | |
66 | * nvme_reset_wq - hosts nvme reset works | |
67 | * nvme_delete_wq - hosts nvme delete works | |
68 | * | |
69 | * nvme_wq will host works such are scan, aen handling, fw activation, | |
70 | * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq | |
71 | * runs reset works which also flush works hosted on nvme_wq for | |
72 | * serialization purposes. nvme_delete_wq host controller deletion | |
73 | * works which flush reset works for serialization. | |
74 | */ | |
9a6327d2 SG |
75 | struct workqueue_struct *nvme_wq; |
76 | EXPORT_SYMBOL_GPL(nvme_wq); | |
77 | ||
b227c59b RS |
78 | struct workqueue_struct *nvme_reset_wq; |
79 | EXPORT_SYMBOL_GPL(nvme_reset_wq); | |
80 | ||
81 | struct workqueue_struct *nvme_delete_wq; | |
82 | EXPORT_SYMBOL_GPL(nvme_delete_wq); | |
83 | ||
ab9e00cc CH |
84 | static DEFINE_IDA(nvme_subsystems_ida); |
85 | static LIST_HEAD(nvme_subsystems); | |
86 | static DEFINE_MUTEX(nvme_subsystems_lock); | |
1673f1f0 | 87 | |
9843f685 | 88 | static DEFINE_IDA(nvme_instance_ida); |
a6a5149b | 89 | static dev_t nvme_chr_devt; |
f3ca80fc | 90 | static struct class *nvme_class; |
ab9e00cc | 91 | static struct class *nvme_subsys_class; |
f3ca80fc | 92 | |
84fef62d | 93 | static int nvme_revalidate_disk(struct gendisk *disk); |
12d9f070 | 94 | static void nvme_put_subsystem(struct nvme_subsystem *subsys); |
cf39a6bc SB |
95 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
96 | unsigned nsid); | |
97 | ||
98 | static void nvme_set_queue_dying(struct nvme_ns *ns) | |
99 | { | |
100 | /* | |
101 | * Revalidating a dead namespace sets capacity to 0. This will end | |
102 | * buffered writers dirtying pages that can't be synced. | |
103 | */ | |
104 | if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) | |
105 | return; | |
106 | revalidate_disk(ns->disk); | |
107 | blk_set_queue_dying(ns->queue); | |
108 | /* Forcibly unquiesce queues to avoid blocking dispatch */ | |
109 | blk_mq_unquiesce_queue(ns->queue); | |
110 | } | |
f3ca80fc | 111 | |
50e8d8ee CH |
112 | static void nvme_queue_scan(struct nvme_ctrl *ctrl) |
113 | { | |
114 | /* | |
115 | * Only new queue scan work when admin and IO queues are both alive | |
116 | */ | |
117 | if (ctrl->state == NVME_CTRL_LIVE) | |
118 | queue_work(nvme_wq, &ctrl->scan_work); | |
119 | } | |
120 | ||
d86c4d8e CH |
121 | int nvme_reset_ctrl(struct nvme_ctrl *ctrl) |
122 | { | |
123 | if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) | |
124 | return -EBUSY; | |
b227c59b | 125 | if (!queue_work(nvme_reset_wq, &ctrl->reset_work)) |
d86c4d8e CH |
126 | return -EBUSY; |
127 | return 0; | |
128 | } | |
129 | EXPORT_SYMBOL_GPL(nvme_reset_ctrl); | |
130 | ||
79c48ccf | 131 | int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl) |
d86c4d8e CH |
132 | { |
133 | int ret; | |
134 | ||
135 | ret = nvme_reset_ctrl(ctrl); | |
8000d1fd | 136 | if (!ret) { |
d86c4d8e | 137 | flush_work(&ctrl->reset_work); |
4e50d9eb CM |
138 | if (ctrl->state != NVME_CTRL_LIVE && |
139 | ctrl->state != NVME_CTRL_ADMIN_ONLY) | |
8000d1fd NC |
140 | ret = -ENETRESET; |
141 | } | |
142 | ||
d86c4d8e CH |
143 | return ret; |
144 | } | |
79c48ccf | 145 | EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync); |
d86c4d8e | 146 | |
a686ed75 | 147 | static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl) |
c5017e85 | 148 | { |
77d0612d MG |
149 | dev_info(ctrl->device, |
150 | "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn); | |
151 | ||
4054637c | 152 | flush_work(&ctrl->reset_work); |
6cd53d14 CH |
153 | nvme_stop_ctrl(ctrl); |
154 | nvme_remove_namespaces(ctrl); | |
c5017e85 | 155 | ctrl->ops->delete_ctrl(ctrl); |
6cd53d14 CH |
156 | nvme_uninit_ctrl(ctrl); |
157 | nvme_put_ctrl(ctrl); | |
c5017e85 CH |
158 | } |
159 | ||
a686ed75 BVA |
160 | static void nvme_delete_ctrl_work(struct work_struct *work) |
161 | { | |
162 | struct nvme_ctrl *ctrl = | |
163 | container_of(work, struct nvme_ctrl, delete_work); | |
164 | ||
165 | nvme_do_delete_ctrl(ctrl); | |
166 | } | |
167 | ||
c5017e85 CH |
168 | int nvme_delete_ctrl(struct nvme_ctrl *ctrl) |
169 | { | |
170 | if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) | |
171 | return -EBUSY; | |
b227c59b | 172 | if (!queue_work(nvme_delete_wq, &ctrl->delete_work)) |
c5017e85 CH |
173 | return -EBUSY; |
174 | return 0; | |
175 | } | |
176 | EXPORT_SYMBOL_GPL(nvme_delete_ctrl); | |
177 | ||
d84c4b02 | 178 | static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) |
c5017e85 CH |
179 | { |
180 | int ret = 0; | |
181 | ||
182 | /* | |
01fc08ff YY |
183 | * Keep a reference until nvme_do_delete_ctrl() complete, |
184 | * since ->delete_ctrl can free the controller. | |
c5017e85 CH |
185 | */ |
186 | nvme_get_ctrl(ctrl); | |
b9c77583 BVA |
187 | if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) |
188 | ret = -EBUSY; | |
c5017e85 | 189 | if (!ret) |
b9c77583 | 190 | nvme_do_delete_ctrl(ctrl); |
c5017e85 CH |
191 | nvme_put_ctrl(ctrl); |
192 | return ret; | |
193 | } | |
c5017e85 | 194 | |
715ea9e0 CH |
195 | static inline bool nvme_ns_has_pi(struct nvme_ns *ns) |
196 | { | |
197 | return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); | |
198 | } | |
199 | ||
2f9c1736 | 200 | static blk_status_t nvme_error_status(u16 status) |
27fa9bc5 | 201 | { |
2f9c1736 | 202 | switch (status & 0x7ff) { |
27fa9bc5 | 203 | case NVME_SC_SUCCESS: |
2a842aca | 204 | return BLK_STS_OK; |
27fa9bc5 | 205 | case NVME_SC_CAP_EXCEEDED: |
2a842aca | 206 | return BLK_STS_NOSPC; |
e96fef2c KB |
207 | case NVME_SC_LBA_RANGE: |
208 | return BLK_STS_TARGET; | |
209 | case NVME_SC_BAD_ATTRIBUTES: | |
e02ab023 | 210 | case NVME_SC_ONCS_NOT_SUPPORTED: |
e96fef2c KB |
211 | case NVME_SC_INVALID_OPCODE: |
212 | case NVME_SC_INVALID_FIELD: | |
213 | case NVME_SC_INVALID_NS: | |
2a842aca | 214 | return BLK_STS_NOTSUPP; |
e02ab023 JG |
215 | case NVME_SC_WRITE_FAULT: |
216 | case NVME_SC_READ_ERROR: | |
217 | case NVME_SC_UNWRITTEN_BLOCK: | |
a751da33 CH |
218 | case NVME_SC_ACCESS_DENIED: |
219 | case NVME_SC_READ_ONLY: | |
e96fef2c | 220 | case NVME_SC_COMPARE_FAILED: |
2a842aca | 221 | return BLK_STS_MEDIUM; |
a751da33 CH |
222 | case NVME_SC_GUARD_CHECK: |
223 | case NVME_SC_APPTAG_CHECK: | |
224 | case NVME_SC_REFTAG_CHECK: | |
225 | case NVME_SC_INVALID_PI: | |
226 | return BLK_STS_PROTECTION; | |
227 | case NVME_SC_RESERVATION_CONFLICT: | |
228 | return BLK_STS_NEXUS; | |
1c0d12c0 SG |
229 | case NVME_SC_HOST_PATH_ERROR: |
230 | return BLK_STS_TRANSPORT; | |
2a842aca CH |
231 | default: |
232 | return BLK_STS_IOERR; | |
27fa9bc5 CH |
233 | } |
234 | } | |
27fa9bc5 | 235 | |
f6324b1b | 236 | static inline bool nvme_req_needs_retry(struct request *req) |
77f02a7a | 237 | { |
f6324b1b CH |
238 | if (blk_noretry_request(req)) |
239 | return false; | |
27fa9bc5 | 240 | if (nvme_req(req)->status & NVME_SC_DNR) |
f6324b1b | 241 | return false; |
44e44b29 | 242 | if (nvme_req(req)->retries >= nvme_max_retries) |
f6324b1b CH |
243 | return false; |
244 | return true; | |
77f02a7a CH |
245 | } |
246 | ||
49cd84b6 KB |
247 | static void nvme_retry_req(struct request *req) |
248 | { | |
249 | struct nvme_ns *ns = req->q->queuedata; | |
250 | unsigned long delay = 0; | |
251 | u16 crd; | |
252 | ||
253 | /* The mask and shift result must be <= 3 */ | |
254 | crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; | |
255 | if (ns && crd) | |
256 | delay = ns->ctrl->crdt[crd - 1] * 100; | |
257 | ||
258 | nvme_req(req)->retries++; | |
259 | blk_mq_requeue_request(req, false); | |
260 | blk_mq_delay_kick_requeue_list(req->q, delay); | |
261 | } | |
262 | ||
77f02a7a CH |
263 | void nvme_complete_rq(struct request *req) |
264 | { | |
2f9c1736 | 265 | blk_status_t status = nvme_error_status(nvme_req(req)->status); |
908e4564 | 266 | |
ca5554a6 JT |
267 | trace_nvme_complete_rq(req); |
268 | ||
6e3ca03e SG |
269 | if (nvme_req(req)->ctrl->kas) |
270 | nvme_req(req)->ctrl->comp_seen = true; | |
271 | ||
908e4564 | 272 | if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { |
8decf5d5 CH |
273 | if ((req->cmd_flags & REQ_NVME_MPATH) && |
274 | blk_path_error(status)) { | |
32acab31 CH |
275 | nvme_failover_req(req); |
276 | return; | |
277 | } | |
278 | ||
279 | if (!blk_queue_dying(req->q)) { | |
49cd84b6 | 280 | nvme_retry_req(req); |
32acab31 CH |
281 | return; |
282 | } | |
77f02a7a | 283 | } |
35fe0d12 HR |
284 | |
285 | nvme_trace_bio_complete(req, status); | |
908e4564 | 286 | blk_mq_end_request(req, status); |
77f02a7a CH |
287 | } |
288 | EXPORT_SYMBOL_GPL(nvme_complete_rq); | |
289 | ||
7baa8572 | 290 | bool nvme_cancel_request(struct request *req, void *data, bool reserved) |
c55a2fd4 | 291 | { |
c55a2fd4 ML |
292 | dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device, |
293 | "Cancelling I/O %d", req->tag); | |
294 | ||
78ca4072 ML |
295 | /* don't abort one completed request */ |
296 | if (blk_mq_request_completed(req)) | |
297 | return true; | |
298 | ||
1c0d12c0 | 299 | nvme_req(req)->status = NVME_SC_HOST_PATH_ERROR; |
a87ccce0 | 300 | blk_mq_complete_request(req); |
7baa8572 | 301 | return true; |
c55a2fd4 ML |
302 | } |
303 | EXPORT_SYMBOL_GPL(nvme_cancel_request); | |
304 | ||
bb8d261e CH |
305 | bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, |
306 | enum nvme_ctrl_state new_state) | |
307 | { | |
f6b6a28e | 308 | enum nvme_ctrl_state old_state; |
0a72bbba | 309 | unsigned long flags; |
bb8d261e CH |
310 | bool changed = false; |
311 | ||
0a72bbba | 312 | spin_lock_irqsave(&ctrl->lock, flags); |
f6b6a28e GKB |
313 | |
314 | old_state = ctrl->state; | |
bb8d261e | 315 | switch (new_state) { |
2b1b7e78 JW |
316 | case NVME_CTRL_ADMIN_ONLY: |
317 | switch (old_state) { | |
ad6a0a52 | 318 | case NVME_CTRL_CONNECTING: |
2b1b7e78 JW |
319 | changed = true; |
320 | /* FALLTHRU */ | |
321 | default: | |
322 | break; | |
323 | } | |
324 | break; | |
bb8d261e CH |
325 | case NVME_CTRL_LIVE: |
326 | switch (old_state) { | |
7d2e8008 | 327 | case NVME_CTRL_NEW: |
bb8d261e | 328 | case NVME_CTRL_RESETTING: |
ad6a0a52 | 329 | case NVME_CTRL_CONNECTING: |
bb8d261e CH |
330 | changed = true; |
331 | /* FALLTHRU */ | |
332 | default: | |
333 | break; | |
334 | } | |
335 | break; | |
336 | case NVME_CTRL_RESETTING: | |
337 | switch (old_state) { | |
338 | case NVME_CTRL_NEW: | |
def61eca | 339 | case NVME_CTRL_LIVE: |
2b1b7e78 | 340 | case NVME_CTRL_ADMIN_ONLY: |
def61eca CH |
341 | changed = true; |
342 | /* FALLTHRU */ | |
343 | default: | |
344 | break; | |
345 | } | |
346 | break; | |
ad6a0a52 | 347 | case NVME_CTRL_CONNECTING: |
def61eca | 348 | switch (old_state) { |
b754a32c | 349 | case NVME_CTRL_NEW: |
3cec7f9d | 350 | case NVME_CTRL_RESETTING: |
bb8d261e CH |
351 | changed = true; |
352 | /* FALLTHRU */ | |
353 | default: | |
354 | break; | |
355 | } | |
356 | break; | |
357 | case NVME_CTRL_DELETING: | |
358 | switch (old_state) { | |
359 | case NVME_CTRL_LIVE: | |
2b1b7e78 | 360 | case NVME_CTRL_ADMIN_ONLY: |
bb8d261e | 361 | case NVME_CTRL_RESETTING: |
ad6a0a52 | 362 | case NVME_CTRL_CONNECTING: |
bb8d261e CH |
363 | changed = true; |
364 | /* FALLTHRU */ | |
365 | default: | |
366 | break; | |
367 | } | |
368 | break; | |
0ff9d4e1 KB |
369 | case NVME_CTRL_DEAD: |
370 | switch (old_state) { | |
371 | case NVME_CTRL_DELETING: | |
372 | changed = true; | |
373 | /* FALLTHRU */ | |
374 | default: | |
375 | break; | |
376 | } | |
377 | break; | |
bb8d261e CH |
378 | default: |
379 | break; | |
380 | } | |
bb8d261e CH |
381 | |
382 | if (changed) | |
383 | ctrl->state = new_state; | |
384 | ||
0a72bbba | 385 | spin_unlock_irqrestore(&ctrl->lock, flags); |
32acab31 CH |
386 | if (changed && ctrl->state == NVME_CTRL_LIVE) |
387 | nvme_kick_requeue_lists(ctrl); | |
bb8d261e CH |
388 | return changed; |
389 | } | |
390 | EXPORT_SYMBOL_GPL(nvme_change_ctrl_state); | |
391 | ||
ed754e5d CH |
392 | static void nvme_free_ns_head(struct kref *ref) |
393 | { | |
394 | struct nvme_ns_head *head = | |
395 | container_of(ref, struct nvme_ns_head, ref); | |
396 | ||
32acab31 | 397 | nvme_mpath_remove_disk(head); |
ed754e5d CH |
398 | ida_simple_remove(&head->subsys->ns_ida, head->instance); |
399 | list_del_init(&head->entry); | |
f5ad3991 | 400 | cleanup_srcu_struct(&head->srcu); |
12d9f070 | 401 | nvme_put_subsystem(head->subsys); |
ed754e5d CH |
402 | kfree(head); |
403 | } | |
404 | ||
405 | static void nvme_put_ns_head(struct nvme_ns_head *head) | |
406 | { | |
407 | kref_put(&head->ref, nvme_free_ns_head); | |
408 | } | |
409 | ||
1673f1f0 CH |
410 | static void nvme_free_ns(struct kref *kref) |
411 | { | |
412 | struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); | |
413 | ||
b0b4e09c MB |
414 | if (ns->ndev) |
415 | nvme_nvm_unregister(ns); | |
1673f1f0 | 416 | |
1673f1f0 | 417 | put_disk(ns->disk); |
ed754e5d | 418 | nvme_put_ns_head(ns->head); |
075790eb | 419 | nvme_put_ctrl(ns->ctrl); |
1673f1f0 CH |
420 | kfree(ns); |
421 | } | |
422 | ||
5bae7f73 | 423 | static void nvme_put_ns(struct nvme_ns *ns) |
1673f1f0 CH |
424 | { |
425 | kref_put(&ns->kref, nvme_free_ns); | |
426 | } | |
427 | ||
bb06ec31 JS |
428 | static inline void nvme_clear_nvme_request(struct request *req) |
429 | { | |
430 | if (!(req->rq_flags & RQF_DONTPREP)) { | |
431 | nvme_req(req)->retries = 0; | |
432 | nvme_req(req)->flags = 0; | |
433 | req->rq_flags |= RQF_DONTPREP; | |
434 | } | |
435 | } | |
436 | ||
4160982e | 437 | struct request *nvme_alloc_request(struct request_queue *q, |
9a95e4ef | 438 | struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid) |
21d34711 | 439 | { |
aebf526b | 440 | unsigned op = nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; |
21d34711 | 441 | struct request *req; |
21d34711 | 442 | |
eb71f435 | 443 | if (qid == NVME_QID_ANY) { |
aebf526b | 444 | req = blk_mq_alloc_request(q, op, flags); |
eb71f435 | 445 | } else { |
aebf526b | 446 | req = blk_mq_alloc_request_hctx(q, op, flags, |
eb71f435 CH |
447 | qid ? qid - 1 : 0); |
448 | } | |
21d34711 | 449 | if (IS_ERR(req)) |
4160982e | 450 | return req; |
21d34711 | 451 | |
21d34711 | 452 | req->cmd_flags |= REQ_FAILFAST_DRIVER; |
bb06ec31 | 453 | nvme_clear_nvme_request(req); |
d49187e9 | 454 | nvme_req(req)->cmd = cmd; |
21d34711 | 455 | |
4160982e CH |
456 | return req; |
457 | } | |
576d55d6 | 458 | EXPORT_SYMBOL_GPL(nvme_alloc_request); |
4160982e | 459 | |
f5d11840 JA |
460 | static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable) |
461 | { | |
462 | struct nvme_command c; | |
463 | ||
464 | memset(&c, 0, sizeof(c)); | |
465 | ||
466 | c.directive.opcode = nvme_admin_directive_send; | |
62346eae | 467 | c.directive.nsid = cpu_to_le32(NVME_NSID_ALL); |
f5d11840 JA |
468 | c.directive.doper = NVME_DIR_SND_ID_OP_ENABLE; |
469 | c.directive.dtype = NVME_DIR_IDENTIFY; | |
470 | c.directive.tdtype = NVME_DIR_STREAMS; | |
471 | c.directive.endir = enable ? NVME_DIR_ENDIR : 0; | |
472 | ||
473 | return nvme_submit_sync_cmd(ctrl->admin_q, &c, NULL, 0); | |
474 | } | |
475 | ||
476 | static int nvme_disable_streams(struct nvme_ctrl *ctrl) | |
477 | { | |
478 | return nvme_toggle_streams(ctrl, false); | |
479 | } | |
480 | ||
481 | static int nvme_enable_streams(struct nvme_ctrl *ctrl) | |
482 | { | |
483 | return nvme_toggle_streams(ctrl, true); | |
484 | } | |
485 | ||
486 | static int nvme_get_stream_params(struct nvme_ctrl *ctrl, | |
487 | struct streams_directive_params *s, u32 nsid) | |
488 | { | |
489 | struct nvme_command c; | |
490 | ||
491 | memset(&c, 0, sizeof(c)); | |
492 | memset(s, 0, sizeof(*s)); | |
493 | ||
494 | c.directive.opcode = nvme_admin_directive_recv; | |
495 | c.directive.nsid = cpu_to_le32(nsid); | |
a082b426 | 496 | c.directive.numd = cpu_to_le32((sizeof(*s) >> 2) - 1); |
f5d11840 JA |
497 | c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; |
498 | c.directive.dtype = NVME_DIR_STREAMS; | |
499 | ||
500 | return nvme_submit_sync_cmd(ctrl->admin_q, &c, s, sizeof(*s)); | |
501 | } | |
502 | ||
503 | static int nvme_configure_directives(struct nvme_ctrl *ctrl) | |
504 | { | |
505 | struct streams_directive_params s; | |
506 | int ret; | |
507 | ||
508 | if (!(ctrl->oacs & NVME_CTRL_OACS_DIRECTIVES)) | |
509 | return 0; | |
510 | if (!streams) | |
511 | return 0; | |
512 | ||
513 | ret = nvme_enable_streams(ctrl); | |
514 | if (ret) | |
515 | return ret; | |
516 | ||
62346eae | 517 | ret = nvme_get_stream_params(ctrl, &s, NVME_NSID_ALL); |
f5d11840 JA |
518 | if (ret) |
519 | return ret; | |
520 | ||
521 | ctrl->nssa = le16_to_cpu(s.nssa); | |
522 | if (ctrl->nssa < BLK_MAX_WRITE_HINTS - 1) { | |
523 | dev_info(ctrl->device, "too few streams (%u) available\n", | |
524 | ctrl->nssa); | |
525 | nvme_disable_streams(ctrl); | |
526 | return 0; | |
527 | } | |
528 | ||
529 | ctrl->nr_streams = min_t(unsigned, ctrl->nssa, BLK_MAX_WRITE_HINTS - 1); | |
530 | dev_info(ctrl->device, "Using %u streams\n", ctrl->nr_streams); | |
531 | return 0; | |
532 | } | |
533 | ||
534 | /* | |
535 | * Check if 'req' has a write hint associated with it. If it does, assign | |
536 | * a valid namespace stream to the write. | |
537 | */ | |
538 | static void nvme_assign_write_stream(struct nvme_ctrl *ctrl, | |
539 | struct request *req, u16 *control, | |
540 | u32 *dsmgmt) | |
541 | { | |
542 | enum rw_hint streamid = req->write_hint; | |
543 | ||
544 | if (streamid == WRITE_LIFE_NOT_SET || streamid == WRITE_LIFE_NONE) | |
545 | streamid = 0; | |
546 | else { | |
547 | streamid--; | |
548 | if (WARN_ON_ONCE(streamid > ctrl->nr_streams)) | |
549 | return; | |
550 | ||
551 | *control |= NVME_RW_DTYPE_STREAMS; | |
552 | *dsmgmt |= streamid << 16; | |
553 | } | |
554 | ||
555 | if (streamid < ARRAY_SIZE(req->q->write_hints)) | |
556 | req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; | |
557 | } | |
558 | ||
8093f7ca ML |
559 | static inline void nvme_setup_flush(struct nvme_ns *ns, |
560 | struct nvme_command *cmnd) | |
561 | { | |
8093f7ca | 562 | cmnd->common.opcode = nvme_cmd_flush; |
ed754e5d | 563 | cmnd->common.nsid = cpu_to_le32(ns->head->ns_id); |
8093f7ca ML |
564 | } |
565 | ||
fc17b653 | 566 | static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, |
8093f7ca ML |
567 | struct nvme_command *cmnd) |
568 | { | |
b35ba01e | 569 | unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; |
8093f7ca | 570 | struct nvme_dsm_range *range; |
b35ba01e | 571 | struct bio *bio; |
8093f7ca | 572 | |
cb5b7262 JA |
573 | range = kmalloc_array(segments, sizeof(*range), |
574 | GFP_ATOMIC | __GFP_NOWARN); | |
575 | if (!range) { | |
576 | /* | |
577 | * If we fail allocation our range, fallback to the controller | |
578 | * discard page. If that's also busy, it's safe to return | |
579 | * busy, as we know we can make progress once that's freed. | |
580 | */ | |
581 | if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy)) | |
582 | return BLK_STS_RESOURCE; | |
583 | ||
584 | range = page_address(ns->ctrl->discard_page); | |
585 | } | |
8093f7ca | 586 | |
b35ba01e CH |
587 | __rq_for_each_bio(bio, req) { |
588 | u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); | |
589 | u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; | |
590 | ||
8cb6af7b KB |
591 | if (n < segments) { |
592 | range[n].cattr = cpu_to_le32(0); | |
593 | range[n].nlb = cpu_to_le32(nlb); | |
594 | range[n].slba = cpu_to_le64(slba); | |
595 | } | |
b35ba01e CH |
596 | n++; |
597 | } | |
598 | ||
599 | if (WARN_ON_ONCE(n != segments)) { | |
cb5b7262 JA |
600 | if (virt_to_page(range) == ns->ctrl->discard_page) |
601 | clear_bit_unlock(0, &ns->ctrl->discard_page_busy); | |
602 | else | |
603 | kfree(range); | |
fc17b653 | 604 | return BLK_STS_IOERR; |
b35ba01e | 605 | } |
8093f7ca | 606 | |
8093f7ca | 607 | cmnd->dsm.opcode = nvme_cmd_dsm; |
ed754e5d | 608 | cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id); |
f1dd03a8 | 609 | cmnd->dsm.nr = cpu_to_le32(segments - 1); |
8093f7ca ML |
610 | cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); |
611 | ||
f9d03f96 CH |
612 | req->special_vec.bv_page = virt_to_page(range); |
613 | req->special_vec.bv_offset = offset_in_page(range); | |
b35ba01e | 614 | req->special_vec.bv_len = sizeof(*range) * segments; |
f9d03f96 | 615 | req->rq_flags |= RQF_SPECIAL_PAYLOAD; |
8093f7ca | 616 | |
fc17b653 | 617 | return BLK_STS_OK; |
8093f7ca | 618 | } |
8093f7ca | 619 | |
6e02318e CK |
620 | static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, |
621 | struct request *req, struct nvme_command *cmnd) | |
622 | { | |
623 | if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) | |
624 | return nvme_setup_discard(ns, req, cmnd); | |
625 | ||
626 | cmnd->write_zeroes.opcode = nvme_cmd_write_zeroes; | |
627 | cmnd->write_zeroes.nsid = cpu_to_le32(ns->head->ns_id); | |
628 | cmnd->write_zeroes.slba = | |
629 | cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); | |
630 | cmnd->write_zeroes.length = | |
631 | cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); | |
632 | cmnd->write_zeroes.control = 0; | |
633 | return BLK_STS_OK; | |
634 | } | |
635 | ||
ebe6d874 CH |
636 | static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, |
637 | struct request *req, struct nvme_command *cmnd) | |
8093f7ca | 638 | { |
f5d11840 | 639 | struct nvme_ctrl *ctrl = ns->ctrl; |
8093f7ca ML |
640 | u16 control = 0; |
641 | u32 dsmgmt = 0; | |
642 | ||
643 | if (req->cmd_flags & REQ_FUA) | |
644 | control |= NVME_RW_FUA; | |
645 | if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD)) | |
646 | control |= NVME_RW_LR; | |
647 | ||
648 | if (req->cmd_flags & REQ_RAHEAD) | |
649 | dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; | |
650 | ||
8093f7ca | 651 | cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read); |
ed754e5d | 652 | cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); |
8093f7ca ML |
653 | cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); |
654 | cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); | |
655 | ||
f5d11840 JA |
656 | if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams) |
657 | nvme_assign_write_stream(ctrl, req, &control, &dsmgmt); | |
658 | ||
8093f7ca | 659 | if (ns->ms) { |
715ea9e0 CH |
660 | /* |
661 | * If formated with metadata, the block layer always provides a | |
662 | * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else | |
663 | * we enable the PRACT bit for protection information or set the | |
664 | * namespace capacity to zero to prevent any I/O. | |
665 | */ | |
666 | if (!blk_integrity_rq(req)) { | |
667 | if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) | |
668 | return BLK_STS_NOTSUPP; | |
669 | control |= NVME_RW_PRINFO_PRACT; | |
f7f1fc36 MG |
670 | } else if (req_op(req) == REQ_OP_WRITE) { |
671 | t10_pi_prepare(req, ns->pi_type); | |
715ea9e0 CH |
672 | } |
673 | ||
8093f7ca ML |
674 | switch (ns->pi_type) { |
675 | case NVME_NS_DPS_PI_TYPE3: | |
676 | control |= NVME_RW_PRINFO_PRCHK_GUARD; | |
677 | break; | |
678 | case NVME_NS_DPS_PI_TYPE1: | |
679 | case NVME_NS_DPS_PI_TYPE2: | |
680 | control |= NVME_RW_PRINFO_PRCHK_GUARD | | |
681 | NVME_RW_PRINFO_PRCHK_REF; | |
ddd0bc75 | 682 | cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); |
8093f7ca ML |
683 | break; |
684 | } | |
8093f7ca ML |
685 | } |
686 | ||
687 | cmnd->rw.control = cpu_to_le16(control); | |
688 | cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); | |
ebe6d874 | 689 | return 0; |
8093f7ca ML |
690 | } |
691 | ||
f7f1fc36 MG |
692 | void nvme_cleanup_cmd(struct request *req) |
693 | { | |
694 | if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && | |
695 | nvme_req(req)->status == 0) { | |
696 | struct nvme_ns *ns = req->rq_disk->private_data; | |
697 | ||
698 | t10_pi_complete(req, ns->pi_type, | |
699 | blk_rq_bytes(req) >> ns->lba_shift); | |
700 | } | |
701 | if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { | |
cb5b7262 JA |
702 | struct nvme_ns *ns = req->rq_disk->private_data; |
703 | struct page *page = req->special_vec.bv_page; | |
704 | ||
705 | if (page == ns->ctrl->discard_page) | |
706 | clear_bit_unlock(0, &ns->ctrl->discard_page_busy); | |
707 | else | |
708 | kfree(page_address(page) + req->special_vec.bv_offset); | |
f7f1fc36 MG |
709 | } |
710 | } | |
711 | EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); | |
712 | ||
fc17b653 | 713 | blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, |
8093f7ca ML |
714 | struct nvme_command *cmd) |
715 | { | |
fc17b653 | 716 | blk_status_t ret = BLK_STS_OK; |
8093f7ca | 717 | |
bb06ec31 | 718 | nvme_clear_nvme_request(req); |
987f699a | 719 | |
11902035 | 720 | memset(cmd, 0, sizeof(*cmd)); |
aebf526b CH |
721 | switch (req_op(req)) { |
722 | case REQ_OP_DRV_IN: | |
723 | case REQ_OP_DRV_OUT: | |
d49187e9 | 724 | memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd)); |
aebf526b CH |
725 | break; |
726 | case REQ_OP_FLUSH: | |
8093f7ca | 727 | nvme_setup_flush(ns, cmd); |
aebf526b | 728 | break; |
e850fd16 | 729 | case REQ_OP_WRITE_ZEROES: |
6e02318e CK |
730 | ret = nvme_setup_write_zeroes(ns, req, cmd); |
731 | break; | |
aebf526b | 732 | case REQ_OP_DISCARD: |
8093f7ca | 733 | ret = nvme_setup_discard(ns, req, cmd); |
aebf526b CH |
734 | break; |
735 | case REQ_OP_READ: | |
736 | case REQ_OP_WRITE: | |
ebe6d874 | 737 | ret = nvme_setup_rw(ns, req, cmd); |
aebf526b CH |
738 | break; |
739 | default: | |
740 | WARN_ON_ONCE(1); | |
fc17b653 | 741 | return BLK_STS_IOERR; |
aebf526b | 742 | } |
8093f7ca | 743 | |
721b3917 | 744 | cmd->common.command_id = req->tag; |
5d87eb94 | 745 | trace_nvme_setup_cmd(req, cmd); |
8093f7ca ML |
746 | return ret; |
747 | } | |
748 | EXPORT_SYMBOL_GPL(nvme_setup_cmd); | |
749 | ||
6287b51c SG |
750 | static void nvme_end_sync_rq(struct request *rq, blk_status_t error) |
751 | { | |
752 | struct completion *waiting = rq->end_io_data; | |
753 | ||
754 | rq->end_io_data = NULL; | |
755 | complete(waiting); | |
756 | } | |
757 | ||
758 | static void nvme_execute_rq_polled(struct request_queue *q, | |
759 | struct gendisk *bd_disk, struct request *rq, int at_head) | |
760 | { | |
761 | DECLARE_COMPLETION_ONSTACK(wait); | |
762 | ||
763 | WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); | |
764 | ||
765 | rq->cmd_flags |= REQ_HIPRI; | |
766 | rq->end_io_data = &wait; | |
767 | blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); | |
768 | ||
769 | while (!completion_done(&wait)) { | |
770 | blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); | |
771 | cond_resched(); | |
772 | } | |
773 | } | |
774 | ||
4160982e CH |
775 | /* |
776 | * Returns 0 on success. If the result is negative, it's a Linux error code; | |
777 | * if the result is positive, it's an NVM Express status code | |
778 | */ | |
779 | int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |
d49187e9 | 780 | union nvme_result *result, void *buffer, unsigned bufflen, |
9a95e4ef | 781 | unsigned timeout, int qid, int at_head, |
6287b51c | 782 | blk_mq_req_flags_t flags, bool poll) |
4160982e CH |
783 | { |
784 | struct request *req; | |
785 | int ret; | |
786 | ||
eb71f435 | 787 | req = nvme_alloc_request(q, cmd, flags, qid); |
4160982e CH |
788 | if (IS_ERR(req)) |
789 | return PTR_ERR(req); | |
790 | ||
791 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; | |
792 | ||
21d34711 CH |
793 | if (buffer && bufflen) { |
794 | ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); | |
795 | if (ret) | |
796 | goto out; | |
4160982e CH |
797 | } |
798 | ||
6287b51c SG |
799 | if (poll) |
800 | nvme_execute_rq_polled(req->q, NULL, req, at_head); | |
801 | else | |
802 | blk_execute_rq(req->q, NULL, req, at_head); | |
d49187e9 CH |
803 | if (result) |
804 | *result = nvme_req(req)->result; | |
27fa9bc5 CH |
805 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
806 | ret = -EINTR; | |
807 | else | |
808 | ret = nvme_req(req)->status; | |
4160982e CH |
809 | out: |
810 | blk_mq_free_request(req); | |
811 | return ret; | |
812 | } | |
eb71f435 | 813 | EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd); |
4160982e CH |
814 | |
815 | int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, | |
816 | void *buffer, unsigned bufflen) | |
817 | { | |
eb71f435 | 818 | return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, |
6287b51c | 819 | NVME_QID_ANY, 0, 0, false); |
4160982e | 820 | } |
576d55d6 | 821 | EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd); |
4160982e | 822 | |
1cad6562 CH |
823 | static void *nvme_add_user_metadata(struct bio *bio, void __user *ubuf, |
824 | unsigned len, u32 seed, bool write) | |
825 | { | |
826 | struct bio_integrity_payload *bip; | |
827 | int ret = -ENOMEM; | |
828 | void *buf; | |
829 | ||
830 | buf = kmalloc(len, GFP_KERNEL); | |
831 | if (!buf) | |
832 | goto out; | |
833 | ||
834 | ret = -EFAULT; | |
835 | if (write && copy_from_user(buf, ubuf, len)) | |
836 | goto out_free_meta; | |
837 | ||
838 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); | |
839 | if (IS_ERR(bip)) { | |
840 | ret = PTR_ERR(bip); | |
841 | goto out_free_meta; | |
842 | } | |
843 | ||
844 | bip->bip_iter.bi_size = len; | |
845 | bip->bip_iter.bi_sector = seed; | |
846 | ret = bio_integrity_add_page(bio, virt_to_page(buf), len, | |
847 | offset_in_page(buf)); | |
848 | if (ret == len) | |
849 | return buf; | |
850 | ret = -ENOMEM; | |
851 | out_free_meta: | |
852 | kfree(buf); | |
853 | out: | |
854 | return ERR_PTR(ret); | |
855 | } | |
856 | ||
63263d60 | 857 | static int nvme_submit_user_cmd(struct request_queue *q, |
485783ca KB |
858 | struct nvme_command *cmd, void __user *ubuffer, |
859 | unsigned bufflen, void __user *meta_buffer, unsigned meta_len, | |
860 | u32 meta_seed, u32 *result, unsigned timeout) | |
4160982e | 861 | { |
7a5abb4b | 862 | bool write = nvme_is_write(cmd); |
0b7f1f26 KB |
863 | struct nvme_ns *ns = q->queuedata; |
864 | struct gendisk *disk = ns ? ns->disk : NULL; | |
4160982e | 865 | struct request *req; |
0b7f1f26 KB |
866 | struct bio *bio = NULL; |
867 | void *meta = NULL; | |
4160982e CH |
868 | int ret; |
869 | ||
eb71f435 | 870 | req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); |
4160982e CH |
871 | if (IS_ERR(req)) |
872 | return PTR_ERR(req); | |
873 | ||
874 | req->timeout = timeout ? timeout : ADMIN_TIMEOUT; | |
bb06ec31 | 875 | nvme_req(req)->flags |= NVME_REQ_USERCMD; |
4160982e CH |
876 | |
877 | if (ubuffer && bufflen) { | |
21d34711 CH |
878 | ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, |
879 | GFP_KERNEL); | |
880 | if (ret) | |
881 | goto out; | |
882 | bio = req->bio; | |
74d46992 | 883 | bio->bi_disk = disk; |
1cad6562 CH |
884 | if (disk && meta_buffer && meta_len) { |
885 | meta = nvme_add_user_metadata(bio, meta_buffer, meta_len, | |
886 | meta_seed, write); | |
887 | if (IS_ERR(meta)) { | |
888 | ret = PTR_ERR(meta); | |
0b7f1f26 KB |
889 | goto out_unmap; |
890 | } | |
f31a2110 | 891 | req->cmd_flags |= REQ_INTEGRITY; |
0b7f1f26 KB |
892 | } |
893 | } | |
1cad6562 | 894 | |
0b7f1f26 | 895 | blk_execute_rq(req->q, disk, req, 0); |
27fa9bc5 CH |
896 | if (nvme_req(req)->flags & NVME_REQ_CANCELLED) |
897 | ret = -EINTR; | |
898 | else | |
899 | ret = nvme_req(req)->status; | |
21d34711 | 900 | if (result) |
d49187e9 | 901 | *result = le32_to_cpu(nvme_req(req)->result.u32); |
0b7f1f26 KB |
902 | if (meta && !ret && !write) { |
903 | if (copy_to_user(meta_buffer, meta, meta_len)) | |
904 | ret = -EFAULT; | |
905 | } | |
0b7f1f26 KB |
906 | kfree(meta); |
907 | out_unmap: | |
74d46992 | 908 | if (bio) |
0b7f1f26 | 909 | blk_rq_unmap_user(bio); |
21d34711 CH |
910 | out: |
911 | blk_mq_free_request(req); | |
912 | return ret; | |
913 | } | |
914 | ||
2a842aca | 915 | static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status) |
038bd4cb SG |
916 | { |
917 | struct nvme_ctrl *ctrl = rq->end_io_data; | |
86880d64 JS |
918 | unsigned long flags; |
919 | bool startka = false; | |
038bd4cb SG |
920 | |
921 | blk_mq_free_request(rq); | |
922 | ||
2a842aca | 923 | if (status) { |
038bd4cb | 924 | dev_err(ctrl->device, |
2a842aca CH |
925 | "failed nvme_keep_alive_end_io error=%d\n", |
926 | status); | |
038bd4cb SG |
927 | return; |
928 | } | |
929 | ||
6e3ca03e | 930 | ctrl->comp_seen = false; |
86880d64 JS |
931 | spin_lock_irqsave(&ctrl->lock, flags); |
932 | if (ctrl->state == NVME_CTRL_LIVE || | |
933 | ctrl->state == NVME_CTRL_CONNECTING) | |
934 | startka = true; | |
935 | spin_unlock_irqrestore(&ctrl->lock, flags); | |
936 | if (startka) | |
937 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | |
038bd4cb SG |
938 | } |
939 | ||
940 | static int nvme_keep_alive(struct nvme_ctrl *ctrl) | |
941 | { | |
038bd4cb SG |
942 | struct request *rq; |
943 | ||
0a34e466 | 944 | rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED, |
038bd4cb SG |
945 | NVME_QID_ANY); |
946 | if (IS_ERR(rq)) | |
947 | return PTR_ERR(rq); | |
948 | ||
949 | rq->timeout = ctrl->kato * HZ; | |
950 | rq->end_io_data = ctrl; | |
951 | ||
952 | blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); | |
953 | ||
954 | return 0; | |
955 | } | |
956 | ||
957 | static void nvme_keep_alive_work(struct work_struct *work) | |
958 | { | |
959 | struct nvme_ctrl *ctrl = container_of(to_delayed_work(work), | |
960 | struct nvme_ctrl, ka_work); | |
6e3ca03e SG |
961 | bool comp_seen = ctrl->comp_seen; |
962 | ||
963 | if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) { | |
964 | dev_dbg(ctrl->device, | |
965 | "reschedule traffic based keep-alive timer\n"); | |
966 | ctrl->comp_seen = false; | |
967 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); | |
968 | return; | |
969 | } | |
038bd4cb SG |
970 | |
971 | if (nvme_keep_alive(ctrl)) { | |
972 | /* allocation failure, reset the controller */ | |
973 | dev_err(ctrl->device, "keep-alive failed\n"); | |
39bdc590 | 974 | nvme_reset_ctrl(ctrl); |
038bd4cb SG |
975 | return; |
976 | } | |
977 | } | |
978 | ||
00b683db | 979 | static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) |
038bd4cb SG |
980 | { |
981 | if (unlikely(ctrl->kato == 0)) | |
982 | return; | |
983 | ||
038bd4cb SG |
984 | schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); |
985 | } | |
038bd4cb SG |
986 | |
987 | void nvme_stop_keep_alive(struct nvme_ctrl *ctrl) | |
988 | { | |
989 | if (unlikely(ctrl->kato == 0)) | |
990 | return; | |
991 | ||
992 | cancel_delayed_work_sync(&ctrl->ka_work); | |
993 | } | |
994 | EXPORT_SYMBOL_GPL(nvme_stop_keep_alive); | |
995 | ||
3f7f25a9 | 996 | static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) |
21d34711 CH |
997 | { |
998 | struct nvme_command c = { }; | |
999 | int error; | |
1000 | ||
1001 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ | |
1002 | c.identify.opcode = nvme_admin_identify; | |
986994a2 | 1003 | c.identify.cns = NVME_ID_CNS_CTRL; |
21d34711 CH |
1004 | |
1005 | *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); | |
1006 | if (!*id) | |
1007 | return -ENOMEM; | |
1008 | ||
1009 | error = nvme_submit_sync_cmd(dev->admin_q, &c, *id, | |
1010 | sizeof(struct nvme_id_ctrl)); | |
1011 | if (error) | |
1012 | kfree(*id); | |
1013 | return error; | |
1014 | } | |
1015 | ||
cdbff4f2 | 1016 | static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid, |
002fab04 | 1017 | struct nvme_ns_ids *ids) |
3b22ba26 JT |
1018 | { |
1019 | struct nvme_command c = { }; | |
1020 | int status; | |
1021 | void *data; | |
1022 | int pos; | |
1023 | int len; | |
1024 | ||
1025 | c.identify.opcode = nvme_admin_identify; | |
1026 | c.identify.nsid = cpu_to_le32(nsid); | |
1027 | c.identify.cns = NVME_ID_CNS_NS_DESC_LIST; | |
1028 | ||
1029 | data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); | |
1030 | if (!data) | |
1031 | return -ENOMEM; | |
1032 | ||
cdbff4f2 | 1033 | status = nvme_submit_sync_cmd(ctrl->admin_q, &c, data, |
3b22ba26 JT |
1034 | NVME_IDENTIFY_DATA_SIZE); |
1035 | if (status) | |
1036 | goto free_data; | |
1037 | ||
1038 | for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) { | |
1039 | struct nvme_ns_id_desc *cur = data + pos; | |
1040 | ||
1041 | if (cur->nidl == 0) | |
1042 | break; | |
1043 | ||
1044 | switch (cur->nidt) { | |
1045 | case NVME_NIDT_EUI64: | |
1046 | if (cur->nidl != NVME_NIDT_EUI64_LEN) { | |
cdbff4f2 | 1047 | dev_warn(ctrl->device, |
3b22ba26 JT |
1048 | "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n", |
1049 | cur->nidl); | |
1050 | goto free_data; | |
1051 | } | |
1052 | len = NVME_NIDT_EUI64_LEN; | |
002fab04 | 1053 | memcpy(ids->eui64, data + pos + sizeof(*cur), len); |
3b22ba26 JT |
1054 | break; |
1055 | case NVME_NIDT_NGUID: | |
1056 | if (cur->nidl != NVME_NIDT_NGUID_LEN) { | |
cdbff4f2 | 1057 | dev_warn(ctrl->device, |
3b22ba26 JT |
1058 | "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n", |
1059 | cur->nidl); | |
1060 | goto free_data; | |
1061 | } | |
1062 | len = NVME_NIDT_NGUID_LEN; | |
002fab04 | 1063 | memcpy(ids->nguid, data + pos + sizeof(*cur), len); |
3b22ba26 JT |
1064 | break; |
1065 | case NVME_NIDT_UUID: | |
1066 | if (cur->nidl != NVME_NIDT_UUID_LEN) { | |
cdbff4f2 | 1067 | dev_warn(ctrl->device, |
3b22ba26 JT |
1068 | "ctrl returned bogus length: %d for NVME_NIDT_UUID\n", |
1069 | cur->nidl); | |
1070 | goto free_data; | |
1071 | } | |
1072 | len = NVME_NIDT_UUID_LEN; | |
002fab04 | 1073 | uuid_copy(&ids->uuid, data + pos + sizeof(*cur)); |
3b22ba26 JT |
1074 | break; |
1075 | default: | |
53b3a661 | 1076 | /* Skip unknown types */ |
3b22ba26 JT |
1077 | len = cur->nidl; |
1078 | break; | |
1079 | } | |
1080 | ||
1081 | len += sizeof(*cur); | |
1082 | } | |
1083 | free_data: | |
1084 | kfree(data); | |
1085 | return status; | |
1086 | } | |
1087 | ||
540c801c KB |
1088 | static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *ns_list) |
1089 | { | |
1090 | struct nvme_command c = { }; | |
1091 | ||
1092 | c.identify.opcode = nvme_admin_identify; | |
986994a2 | 1093 | c.identify.cns = NVME_ID_CNS_NS_ACTIVE_LIST; |
540c801c | 1094 | c.identify.nsid = cpu_to_le32(nsid); |
42595eb7 MI |
1095 | return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, |
1096 | NVME_IDENTIFY_DATA_SIZE); | |
540c801c KB |
1097 | } |
1098 | ||
331813f6 SG |
1099 | static int nvme_identify_ns(struct nvme_ctrl *ctrl, |
1100 | unsigned nsid, struct nvme_id_ns **id) | |
21d34711 CH |
1101 | { |
1102 | struct nvme_command c = { }; | |
1103 | int error; | |
1104 | ||
1105 | /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ | |
778f067c MG |
1106 | c.identify.opcode = nvme_admin_identify; |
1107 | c.identify.nsid = cpu_to_le32(nsid); | |
986994a2 | 1108 | c.identify.cns = NVME_ID_CNS_NS; |
21d34711 | 1109 | |
331813f6 SG |
1110 | *id = kmalloc(sizeof(**id), GFP_KERNEL); |
1111 | if (!*id) | |
1112 | return -ENOMEM; | |
21d34711 | 1113 | |
331813f6 | 1114 | error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); |
cdbff4f2 | 1115 | if (error) { |
d0de579c | 1116 | dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); |
331813f6 | 1117 | kfree(*id); |
cdbff4f2 CH |
1118 | } |
1119 | ||
331813f6 | 1120 | return error; |
21d34711 CH |
1121 | } |
1122 | ||
1a87ee65 KB |
1123 | static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, |
1124 | unsigned int dword11, void *buffer, size_t buflen, u32 *result) | |
21d34711 CH |
1125 | { |
1126 | struct nvme_command c; | |
d49187e9 | 1127 | union nvme_result res; |
1cb3cce5 | 1128 | int ret; |
21d34711 CH |
1129 | |
1130 | memset(&c, 0, sizeof(c)); | |
1a87ee65 | 1131 | c.features.opcode = op; |
21d34711 CH |
1132 | c.features.fid = cpu_to_le32(fid); |
1133 | c.features.dword11 = cpu_to_le32(dword11); | |
1134 | ||
d49187e9 | 1135 | ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res, |
6287b51c | 1136 | buffer, buflen, 0, NVME_QID_ANY, 0, 0, false); |
9b47f77a | 1137 | if (ret >= 0 && result) |
d49187e9 | 1138 | *result = le32_to_cpu(res.u32); |
1cb3cce5 | 1139 | return ret; |
21d34711 CH |
1140 | } |
1141 | ||
1a87ee65 KB |
1142 | int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, |
1143 | unsigned int dword11, void *buffer, size_t buflen, | |
1144 | u32 *result) | |
1145 | { | |
1146 | return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, | |
1147 | buflen, result); | |
1148 | } | |
1149 | EXPORT_SYMBOL_GPL(nvme_set_features); | |
1150 | ||
1151 | int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, | |
1152 | unsigned int dword11, void *buffer, size_t buflen, | |
1153 | u32 *result) | |
1154 | { | |
1155 | return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, | |
1156 | buflen, result); | |
1157 | } | |
1158 | EXPORT_SYMBOL_GPL(nvme_get_features); | |
1159 | ||
9a0be7ab CH |
1160 | int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) |
1161 | { | |
1162 | u32 q_count = (*count - 1) | ((*count - 1) << 16); | |
1163 | u32 result; | |
1164 | int status, nr_io_queues; | |
1165 | ||
1a6fe74d | 1166 | status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, |
9a0be7ab | 1167 | &result); |
f5fa90dc | 1168 | if (status < 0) |
9a0be7ab CH |
1169 | return status; |
1170 | ||
f5fa90dc CH |
1171 | /* |
1172 | * Degraded controllers might return an error when setting the queue | |
1173 | * count. We still want to be able to bring them online and offer | |
1174 | * access to the admin queue, as that might be only way to fix them up. | |
1175 | */ | |
1176 | if (status > 0) { | |
f0425db0 | 1177 | dev_err(ctrl->device, "Could not set queue count (%d)\n", status); |
f5fa90dc CH |
1178 | *count = 0; |
1179 | } else { | |
1180 | nr_io_queues = min(result & 0xffff, result >> 16) + 1; | |
1181 | *count = min(*count, nr_io_queues); | |
1182 | } | |
1183 | ||
9a0be7ab CH |
1184 | return 0; |
1185 | } | |
576d55d6 | 1186 | EXPORT_SYMBOL_GPL(nvme_set_queue_count); |
9a0be7ab | 1187 | |
c0561f82 | 1188 | #define NVME_AEN_SUPPORTED \ |
0d0b660f | 1189 | (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE) |
c0561f82 HR |
1190 | |
1191 | static void nvme_enable_aen(struct nvme_ctrl *ctrl) | |
1192 | { | |
fa441b71 | 1193 | u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED; |
c0561f82 HR |
1194 | int status; |
1195 | ||
fa441b71 WZ |
1196 | if (!supported_aens) |
1197 | return; | |
1198 | ||
1199 | status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens, | |
1200 | NULL, 0, &result); | |
c0561f82 HR |
1201 | if (status) |
1202 | dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", | |
fa441b71 | 1203 | supported_aens); |
c0561f82 HR |
1204 | } |
1205 | ||
1673f1f0 CH |
1206 | static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) |
1207 | { | |
1208 | struct nvme_user_io io; | |
1209 | struct nvme_command c; | |
1210 | unsigned length, meta_len; | |
1211 | void __user *metadata; | |
1212 | ||
1213 | if (copy_from_user(&io, uio, sizeof(io))) | |
1214 | return -EFAULT; | |
63088ec7 KB |
1215 | if (io.flags) |
1216 | return -EINVAL; | |
1673f1f0 CH |
1217 | |
1218 | switch (io.opcode) { | |
1219 | case nvme_cmd_write: | |
1220 | case nvme_cmd_read: | |
1221 | case nvme_cmd_compare: | |
1222 | break; | |
1223 | default: | |
1224 | return -EINVAL; | |
1225 | } | |
1226 | ||
1227 | length = (io.nblocks + 1) << ns->lba_shift; | |
1228 | meta_len = (io.nblocks + 1) * ns->ms; | |
1229 | metadata = (void __user *)(uintptr_t)io.metadata; | |
1230 | ||
1231 | if (ns->ext) { | |
1232 | length += meta_len; | |
1233 | meta_len = 0; | |
1234 | } else if (meta_len) { | |
1235 | if ((io.metadata & 3) || !io.metadata) | |
1236 | return -EINVAL; | |
1237 | } | |
1238 | ||
1239 | memset(&c, 0, sizeof(c)); | |
1240 | c.rw.opcode = io.opcode; | |
1241 | c.rw.flags = io.flags; | |
ed754e5d | 1242 | c.rw.nsid = cpu_to_le32(ns->head->ns_id); |
1673f1f0 CH |
1243 | c.rw.slba = cpu_to_le64(io.slba); |
1244 | c.rw.length = cpu_to_le16(io.nblocks); | |
1245 | c.rw.control = cpu_to_le16(io.control); | |
1246 | c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); | |
1247 | c.rw.reftag = cpu_to_le32(io.reftag); | |
1248 | c.rw.apptag = cpu_to_le16(io.apptag); | |
1249 | c.rw.appmask = cpu_to_le16(io.appmask); | |
1250 | ||
63263d60 | 1251 | return nvme_submit_user_cmd(ns->queue, &c, |
1673f1f0 | 1252 | (void __user *)(uintptr_t)io.addr, length, |
202359c0 | 1253 | metadata, meta_len, lower_32_bits(io.slba), NULL, 0); |
1673f1f0 CH |
1254 | } |
1255 | ||
84fef62d KB |
1256 | static u32 nvme_known_admin_effects(u8 opcode) |
1257 | { | |
1258 | switch (opcode) { | |
1259 | case nvme_admin_format_nvm: | |
1260 | return NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC | | |
1261 | NVME_CMD_EFFECTS_CSE_MASK; | |
1262 | case nvme_admin_sanitize_nvm: | |
1263 | return NVME_CMD_EFFECTS_CSE_MASK; | |
1264 | default: | |
1265 | break; | |
1266 | } | |
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |
1271 | u8 opcode) | |
1272 | { | |
1273 | u32 effects = 0; | |
1274 | ||
1275 | if (ns) { | |
1276 | if (ctrl->effects) | |
1277 | effects = le32_to_cpu(ctrl->effects->iocs[opcode]); | |
415df90b | 1278 | if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC)) |
84fef62d KB |
1279 | dev_warn(ctrl->device, |
1280 | "IO command:%02x has unhandled effects:%08x\n", | |
1281 | opcode, effects); | |
1282 | return 0; | |
1283 | } | |
1284 | ||
1285 | if (ctrl->effects) | |
62843c2e | 1286 | effects = le32_to_cpu(ctrl->effects->acs[opcode]); |
6fa0321a | 1287 | effects |= nvme_known_admin_effects(opcode); |
84fef62d KB |
1288 | |
1289 | /* | |
1290 | * For simplicity, IO to all namespaces is quiesced even if the command | |
1291 | * effects say only one namespace is affected. | |
1292 | */ | |
1293 | if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { | |
e7ad43c3 | 1294 | mutex_lock(&ctrl->scan_lock); |
84fef62d KB |
1295 | nvme_start_freeze(ctrl); |
1296 | nvme_wait_freeze(ctrl); | |
1297 | } | |
1298 | return effects; | |
1299 | } | |
1300 | ||
1301 | static void nvme_update_formats(struct nvme_ctrl *ctrl) | |
1302 | { | |
cf39a6bc | 1303 | struct nvme_ns *ns; |
84fef62d | 1304 | |
cf39a6bc SB |
1305 | down_read(&ctrl->namespaces_rwsem); |
1306 | list_for_each_entry(ns, &ctrl->namespaces, list) | |
1307 | if (ns->disk && nvme_revalidate_disk(ns->disk)) | |
1308 | nvme_set_queue_dying(ns); | |
1309 | up_read(&ctrl->namespaces_rwsem); | |
3fd176b7 | 1310 | |
cf39a6bc | 1311 | nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL); |
84fef62d KB |
1312 | } |
1313 | ||
1314 | static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) | |
1315 | { | |
1316 | /* | |
1317 | * Revalidate LBA changes prior to unfreezing. This is necessary to | |
1318 | * prevent memory corruption if a logical block size was changed by | |
1319 | * this command. | |
1320 | */ | |
1321 | if (effects & NVME_CMD_EFFECTS_LBCC) | |
1322 | nvme_update_formats(ctrl); | |
e7ad43c3 | 1323 | if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { |
84fef62d | 1324 | nvme_unfreeze(ctrl); |
e7ad43c3 KB |
1325 | mutex_unlock(&ctrl->scan_lock); |
1326 | } | |
84fef62d KB |
1327 | if (effects & NVME_CMD_EFFECTS_CCC) |
1328 | nvme_init_identify(ctrl); | |
1329 | if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) | |
1330 | nvme_queue_scan(ctrl); | |
1331 | } | |
1332 | ||
f3ca80fc | 1333 | static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, |
1673f1f0 CH |
1334 | struct nvme_passthru_cmd __user *ucmd) |
1335 | { | |
1336 | struct nvme_passthru_cmd cmd; | |
1337 | struct nvme_command c; | |
1338 | unsigned timeout = 0; | |
84fef62d | 1339 | u32 effects; |
1673f1f0 CH |
1340 | int status; |
1341 | ||
1342 | if (!capable(CAP_SYS_ADMIN)) | |
1343 | return -EACCES; | |
1344 | if (copy_from_user(&cmd, ucmd, sizeof(cmd))) | |
1345 | return -EFAULT; | |
63088ec7 KB |
1346 | if (cmd.flags) |
1347 | return -EINVAL; | |
1673f1f0 CH |
1348 | |
1349 | memset(&c, 0, sizeof(c)); | |
1350 | c.common.opcode = cmd.opcode; | |
1351 | c.common.flags = cmd.flags; | |
1352 | c.common.nsid = cpu_to_le32(cmd.nsid); | |
1353 | c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); | |
1354 | c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); | |
b7c8f366 CK |
1355 | c.common.cdw10 = cpu_to_le32(cmd.cdw10); |
1356 | c.common.cdw11 = cpu_to_le32(cmd.cdw11); | |
1357 | c.common.cdw12 = cpu_to_le32(cmd.cdw12); | |
1358 | c.common.cdw13 = cpu_to_le32(cmd.cdw13); | |
1359 | c.common.cdw14 = cpu_to_le32(cmd.cdw14); | |
1360 | c.common.cdw15 = cpu_to_le32(cmd.cdw15); | |
1673f1f0 CH |
1361 | |
1362 | if (cmd.timeout_ms) | |
1363 | timeout = msecs_to_jiffies(cmd.timeout_ms); | |
1364 | ||
84fef62d | 1365 | effects = nvme_passthru_start(ctrl, ns, cmd.opcode); |
1673f1f0 | 1366 | status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, |
d1ea7be5 | 1367 | (void __user *)(uintptr_t)cmd.addr, cmd.data_len, |
9b382768 | 1368 | (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len, |
63263d60 | 1369 | 0, &cmd.result, timeout); |
84fef62d KB |
1370 | nvme_passthru_end(ctrl, effects); |
1371 | ||
1673f1f0 CH |
1372 | if (status >= 0) { |
1373 | if (put_user(cmd.result, &ucmd->result)) | |
1374 | return -EFAULT; | |
1375 | } | |
1376 | ||
1377 | return status; | |
1378 | } | |
1379 | ||
32acab31 CH |
1380 | /* |
1381 | * Issue ioctl requests on the first available path. Note that unlike normal | |
1382 | * block layer requests we will not retry failed request on another controller. | |
1383 | */ | |
1384 | static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, | |
1385 | struct nvme_ns_head **head, int *srcu_idx) | |
1673f1f0 | 1386 | { |
32acab31 CH |
1387 | #ifdef CONFIG_NVME_MULTIPATH |
1388 | if (disk->fops == &nvme_ns_head_ops) { | |
100c815c CH |
1389 | struct nvme_ns *ns; |
1390 | ||
32acab31 CH |
1391 | *head = disk->private_data; |
1392 | *srcu_idx = srcu_read_lock(&(*head)->srcu); | |
100c815c CH |
1393 | ns = nvme_find_path(*head); |
1394 | if (!ns) | |
1395 | srcu_read_unlock(&(*head)->srcu, *srcu_idx); | |
1396 | return ns; | |
32acab31 CH |
1397 | } |
1398 | #endif | |
1399 | *head = NULL; | |
1400 | *srcu_idx = -1; | |
1401 | return disk->private_data; | |
1402 | } | |
1673f1f0 | 1403 | |
32acab31 CH |
1404 | static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) |
1405 | { | |
1406 | if (head) | |
1407 | srcu_read_unlock(&head->srcu, idx); | |
1408 | } | |
1673f1f0 | 1409 | |
32acab31 CH |
1410 | static int nvme_ioctl(struct block_device *bdev, fmode_t mode, |
1411 | unsigned int cmd, unsigned long arg) | |
1673f1f0 | 1412 | { |
32acab31 | 1413 | struct nvme_ns_head *head = NULL; |
90ec611a | 1414 | void __user *argp = (void __user *)arg; |
32acab31 CH |
1415 | struct nvme_ns *ns; |
1416 | int srcu_idx, ret; | |
1417 | ||
1418 | ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); | |
1419 | if (unlikely(!ns)) | |
100c815c CH |
1420 | return -EWOULDBLOCK; |
1421 | ||
5fb4aac7 CH |
1422 | /* |
1423 | * Handle ioctls that apply to the controller instead of the namespace | |
1424 | * seperately and drop the ns SRCU reference early. This avoids a | |
1425 | * deadlock when deleting namespaces using the passthrough interface. | |
1426 | */ | |
1427 | if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { | |
1428 | struct nvme_ctrl *ctrl = ns->ctrl; | |
1429 | ||
1430 | nvme_get_ctrl(ns->ctrl); | |
1431 | nvme_put_ns_from_disk(head, srcu_idx); | |
1432 | ||
1433 | if (cmd == NVME_IOCTL_ADMIN_CMD) | |
1434 | ret = nvme_user_cmd(ctrl, NULL, argp); | |
1435 | else | |
1436 | ret = sed_ioctl(ctrl->opal_dev, cmd, argp); | |
1437 | ||
1438 | nvme_put_ctrl(ctrl); | |
1439 | return ret; | |
1440 | } | |
1441 | ||
90ec611a CH |
1442 | switch (cmd) { |
1443 | case NVME_IOCTL_ID: | |
1444 | force_successful_syscall_return(); | |
1445 | ret = ns->head->ns_id; | |
1446 | break; | |
90ec611a CH |
1447 | case NVME_IOCTL_IO_CMD: |
1448 | ret = nvme_user_cmd(ns->ctrl, ns, argp); | |
1449 | break; | |
1450 | case NVME_IOCTL_SUBMIT_IO: | |
1451 | ret = nvme_submit_io(ns, argp); | |
1452 | break; | |
1453 | default: | |
1454 | if (ns->ndev) | |
1455 | ret = nvme_nvm_ioctl(ns, cmd, arg); | |
90ec611a CH |
1456 | else |
1457 | ret = -ENOTTY; | |
1458 | } | |
1459 | ||
32acab31 CH |
1460 | nvme_put_ns_from_disk(head, srcu_idx); |
1461 | return ret; | |
1673f1f0 | 1462 | } |
1673f1f0 CH |
1463 | |
1464 | static int nvme_open(struct block_device *bdev, fmode_t mode) | |
1465 | { | |
c6424a90 CH |
1466 | struct nvme_ns *ns = bdev->bd_disk->private_data; |
1467 | ||
32acab31 CH |
1468 | #ifdef CONFIG_NVME_MULTIPATH |
1469 | /* should never be called due to GENHD_FL_HIDDEN */ | |
1470 | if (WARN_ON_ONCE(ns->head->disk)) | |
85088c4a | 1471 | goto fail; |
32acab31 | 1472 | #endif |
c6424a90 | 1473 | if (!kref_get_unless_zero(&ns->kref)) |
85088c4a NC |
1474 | goto fail; |
1475 | if (!try_module_get(ns->ctrl->ops->module)) | |
1476 | goto fail_put_ns; | |
1477 | ||
c6424a90 | 1478 | return 0; |
85088c4a NC |
1479 | |
1480 | fail_put_ns: | |
1481 | nvme_put_ns(ns); | |
1482 | fail: | |
1483 | return -ENXIO; | |
1673f1f0 CH |
1484 | } |
1485 | ||
1486 | static void nvme_release(struct gendisk *disk, fmode_t mode) | |
1487 | { | |
85088c4a NC |
1488 | struct nvme_ns *ns = disk->private_data; |
1489 | ||
1490 | module_put(ns->ctrl->ops->module); | |
1491 | nvme_put_ns(ns); | |
1673f1f0 CH |
1492 | } |
1493 | ||
1494 | static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
1495 | { | |
1496 | /* some standard values */ | |
1497 | geo->heads = 1 << 6; | |
1498 | geo->sectors = 1 << 5; | |
1499 | geo->cylinders = get_capacity(bdev->bd_disk) >> 11; | |
1500 | return 0; | |
1501 | } | |
1502 | ||
1503 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
39b7baa4 | 1504 | static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) |
1673f1f0 CH |
1505 | { |
1506 | struct blk_integrity integrity; | |
1507 | ||
fa9a89fc | 1508 | memset(&integrity, 0, sizeof(integrity)); |
39b7baa4 | 1509 | switch (pi_type) { |
1673f1f0 CH |
1510 | case NVME_NS_DPS_PI_TYPE3: |
1511 | integrity.profile = &t10_pi_type3_crc; | |
ba36c21b NB |
1512 | integrity.tag_size = sizeof(u16) + sizeof(u32); |
1513 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; | |
1673f1f0 CH |
1514 | break; |
1515 | case NVME_NS_DPS_PI_TYPE1: | |
1516 | case NVME_NS_DPS_PI_TYPE2: | |
1517 | integrity.profile = &t10_pi_type1_crc; | |
ba36c21b NB |
1518 | integrity.tag_size = sizeof(u16); |
1519 | integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; | |
1673f1f0 CH |
1520 | break; |
1521 | default: | |
1522 | integrity.profile = NULL; | |
1523 | break; | |
1524 | } | |
39b7baa4 CH |
1525 | integrity.tuple_size = ms; |
1526 | blk_integrity_register(disk, &integrity); | |
1527 | blk_queue_max_integrity_segments(disk->queue, 1); | |
1673f1f0 CH |
1528 | } |
1529 | #else | |
39b7baa4 | 1530 | static void nvme_init_integrity(struct gendisk *disk, u16 ms, u8 pi_type) |
1673f1f0 CH |
1531 | { |
1532 | } | |
1533 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | |
1534 | ||
6b8190d6 SB |
1535 | static void nvme_set_chunk_size(struct nvme_ns *ns) |
1536 | { | |
1537 | u32 chunk_size = (((u32)ns->noiob) << (ns->lba_shift - 9)); | |
1538 | blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); | |
1539 | } | |
1540 | ||
26318571 | 1541 | static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) |
1673f1f0 | 1542 | { |
3831761e | 1543 | struct nvme_ctrl *ctrl = ns->ctrl; |
26318571 | 1544 | struct request_queue *queue = disk->queue; |
30e5e929 CH |
1545 | u32 size = queue_logical_block_size(queue); |
1546 | ||
3831761e JA |
1547 | if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { |
1548 | blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue); | |
1549 | return; | |
1550 | } | |
1551 | ||
1552 | if (ctrl->nr_streams && ns->sws && ns->sgs) | |
1553 | size *= ns->sws * ns->sgs; | |
08095e70 | 1554 | |
b35ba01e CH |
1555 | BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < |
1556 | NVME_DSM_MAX_RANGES); | |
1557 | ||
b224f613 | 1558 | queue->limits.discard_alignment = 0; |
30e5e929 | 1559 | queue->limits.discard_granularity = size; |
f5d11840 | 1560 | |
3831761e JA |
1561 | /* If discard is already enabled, don't reset queue limits */ |
1562 | if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue)) | |
1563 | return; | |
1564 | ||
30e5e929 CH |
1565 | blk_queue_max_discard_sectors(queue, UINT_MAX); |
1566 | blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); | |
e850fd16 CH |
1567 | |
1568 | if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) | |
30e5e929 | 1569 | blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); |
1673f1f0 CH |
1570 | } |
1571 | ||
9f0916ab | 1572 | static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns) |
6e02318e CK |
1573 | { |
1574 | u32 max_sectors; | |
1575 | unsigned short bs = 1 << ns->lba_shift; | |
1576 | ||
7b210e4e CH |
1577 | if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) || |
1578 | (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES)) | |
6e02318e CK |
1579 | return; |
1580 | /* | |
1581 | * Even though NVMe spec explicitly states that MDTS is not | |
1582 | * applicable to the write-zeroes:- "The restriction does not apply to | |
1583 | * commands that do not transfer data between the host and the | |
1584 | * controller (e.g., Write Uncorrectable ro Write Zeroes command).". | |
1585 | * In order to be more cautious use controller's max_hw_sectors value | |
1586 | * to configure the maximum sectors for the write-zeroes which is | |
1587 | * configured based on the controller's MDTS field in the | |
1588 | * nvme_init_identify() if available. | |
1589 | */ | |
1590 | if (ns->ctrl->max_hw_sectors == UINT_MAX) | |
1591 | max_sectors = ((u32)(USHRT_MAX + 1) * bs) >> 9; | |
1592 | else | |
1593 | max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; | |
1594 | ||
9f0916ab | 1595 | blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors); |
6e02318e CK |
1596 | } |
1597 | ||
538af88e | 1598 | static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, |
002fab04 | 1599 | struct nvme_id_ns *id, struct nvme_ns_ids *ids) |
1673f1f0 | 1600 | { |
538af88e SG |
1601 | int ret = 0; |
1602 | ||
002fab04 CH |
1603 | memset(ids, 0, sizeof(*ids)); |
1604 | ||
cdbff4f2 | 1605 | if (ctrl->vs >= NVME_VS(1, 1, 0)) |
002fab04 | 1606 | memcpy(ids->eui64, id->eui64, sizeof(id->eui64)); |
cdbff4f2 | 1607 | if (ctrl->vs >= NVME_VS(1, 2, 0)) |
002fab04 | 1608 | memcpy(ids->nguid, id->nguid, sizeof(id->nguid)); |
cdbff4f2 | 1609 | if (ctrl->vs >= NVME_VS(1, 3, 0)) { |
3b22ba26 JT |
1610 | /* Don't treat error as fatal we potentially |
1611 | * already have a NGUID or EUI-64 | |
1612 | */ | |
538af88e SG |
1613 | ret = nvme_identify_ns_descs(ctrl, nsid, ids); |
1614 | if (ret) | |
cdbff4f2 | 1615 | dev_warn(ctrl->device, |
538af88e | 1616 | "Identify Descriptors failed (%d)\n", ret); |
3b22ba26 | 1617 | } |
538af88e | 1618 | return ret; |
ac81bfa9 MB |
1619 | } |
1620 | ||
ed754e5d CH |
1621 | static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids) |
1622 | { | |
1623 | return !uuid_is_null(&ids->uuid) || | |
1624 | memchr_inv(ids->nguid, 0, sizeof(ids->nguid)) || | |
1625 | memchr_inv(ids->eui64, 0, sizeof(ids->eui64)); | |
1626 | } | |
1627 | ||
002fab04 CH |
1628 | static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) |
1629 | { | |
1630 | return uuid_equal(&a->uuid, &b->uuid) && | |
1631 | memcmp(&a->nguid, &b->nguid, sizeof(a->nguid)) == 0 && | |
1632 | memcmp(&a->eui64, &b->eui64, sizeof(a->eui64)) == 0; | |
1633 | } | |
1634 | ||
24b0b58c CH |
1635 | static void nvme_update_disk_info(struct gendisk *disk, |
1636 | struct nvme_ns *ns, struct nvme_id_ns *id) | |
1637 | { | |
43e2d08d | 1638 | sector_t capacity = le64_to_cpu(id->nsze) << (ns->lba_shift - 9); |
cee160fd | 1639 | unsigned short bs = 1 << ns->lba_shift; |
81adb863 | 1640 | u32 atomic_bs, phys_bs, io_opt; |
24b0b58c | 1641 | |
01fa0174 SG |
1642 | if (ns->lba_shift > PAGE_SHIFT) { |
1643 | /* unsupported block size, set capacity to 0 later */ | |
1644 | bs = (1 << 9); | |
1645 | } | |
24b0b58c CH |
1646 | blk_mq_freeze_queue(disk->queue); |
1647 | blk_integrity_unregister(disk); | |
1648 | ||
81adb863 BVA |
1649 | if (id->nabo == 0) { |
1650 | /* | |
1651 | * Bit 1 indicates whether NAWUPF is defined for this namespace | |
1652 | * and whether it should be used instead of AWUPF. If NAWUPF == | |
1653 | * 0 then AWUPF must be used instead. | |
1654 | */ | |
1655 | if (id->nsfeat & (1 << 1) && id->nawupf) | |
1656 | atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; | |
1657 | else | |
1658 | atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; | |
1659 | } else { | |
1660 | atomic_bs = bs; | |
1661 | } | |
1662 | phys_bs = bs; | |
1663 | io_opt = bs; | |
1664 | if (id->nsfeat & (1 << 4)) { | |
1665 | /* NPWG = Namespace Preferred Write Granularity */ | |
1666 | phys_bs *= 1 + le16_to_cpu(id->npwg); | |
1667 | /* NOWS = Namespace Optimal Write Size */ | |
1668 | io_opt *= 1 + le16_to_cpu(id->nows); | |
1669 | } | |
1670 | ||
cee160fd | 1671 | blk_queue_logical_block_size(disk->queue, bs); |
81adb863 BVA |
1672 | /* |
1673 | * Linux filesystems assume writing a single physical block is | |
1674 | * an atomic operation. Hence limit the physical block size to the | |
1675 | * value of the Atomic Write Unit Power Fail parameter. | |
1676 | */ | |
1677 | blk_queue_physical_block_size(disk->queue, min(phys_bs, atomic_bs)); | |
1678 | blk_queue_io_min(disk->queue, phys_bs); | |
1679 | blk_queue_io_opt(disk->queue, io_opt); | |
cee160fd | 1680 | |
24b0b58c CH |
1681 | if (ns->ms && !ns->ext && |
1682 | (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) | |
1683 | nvme_init_integrity(disk, ns->ms, ns->pi_type); | |
01fa0174 SG |
1684 | if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) || |
1685 | ns->lba_shift > PAGE_SHIFT) | |
24b0b58c | 1686 | capacity = 0; |
24b0b58c | 1687 | |
3831761e | 1688 | set_capacity(disk, capacity); |
b1aafb35 | 1689 | |
26318571 | 1690 | nvme_config_discard(disk, ns); |
9f0916ab | 1691 | nvme_config_write_zeroes(disk, ns); |
1293477f CK |
1692 | |
1693 | if (id->nsattr & (1 << 0)) | |
1694 | set_disk_ro(disk, true); | |
1695 | else | |
1696 | set_disk_ro(disk, false); | |
1697 | ||
24b0b58c CH |
1698 | blk_mq_unfreeze_queue(disk->queue); |
1699 | } | |
1700 | ||
ac81bfa9 MB |
1701 | static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) |
1702 | { | |
1703 | struct nvme_ns *ns = disk->private_data; | |
1673f1f0 CH |
1704 | |
1705 | /* | |
1706 | * If identify namespace failed, use default 512 byte block size so | |
1707 | * block layer can use before failing read/write for 0 capacity. | |
1708 | */ | |
c81bfba9 | 1709 | ns->lba_shift = id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ds; |
1673f1f0 CH |
1710 | if (ns->lba_shift == 0) |
1711 | ns->lba_shift = 9; | |
6b8190d6 | 1712 | ns->noiob = le16_to_cpu(id->noiob); |
b5be3b39 | 1713 | ns->ms = le16_to_cpu(id->lbaf[id->flbas & NVME_NS_FLBAS_LBA_MASK].ms); |
c97f414c | 1714 | ns->ext = ns->ms && (id->flbas & NVME_NS_FLBAS_META_EXT); |
b5be3b39 CH |
1715 | /* the PI implementation requires metadata equal t10 pi tuple size */ |
1716 | if (ns->ms == sizeof(struct t10_pi_tuple)) | |
1717 | ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; | |
1718 | else | |
1719 | ns->pi_type = 0; | |
1673f1f0 | 1720 | |
6b8190d6 SB |
1721 | if (ns->noiob) |
1722 | nvme_set_chunk_size(ns); | |
24b0b58c | 1723 | nvme_update_disk_info(disk, ns, id); |
32acab31 | 1724 | #ifdef CONFIG_NVME_MULTIPATH |
8f676b85 | 1725 | if (ns->head->disk) { |
32acab31 | 1726 | nvme_update_disk_info(ns->head->disk, ns, id); |
8f676b85 SG |
1727 | blk_queue_stack_limits(ns->head->disk->queue, ns->queue); |
1728 | } | |
32acab31 | 1729 | #endif |
ac81bfa9 | 1730 | } |
1673f1f0 | 1731 | |
ac81bfa9 MB |
1732 | static int nvme_revalidate_disk(struct gendisk *disk) |
1733 | { | |
1734 | struct nvme_ns *ns = disk->private_data; | |
cdbff4f2 CH |
1735 | struct nvme_ctrl *ctrl = ns->ctrl; |
1736 | struct nvme_id_ns *id; | |
002fab04 | 1737 | struct nvme_ns_ids ids; |
cdbff4f2 | 1738 | int ret = 0; |
ac81bfa9 MB |
1739 | |
1740 | if (test_bit(NVME_NS_DEAD, &ns->flags)) { | |
1741 | set_capacity(disk, 0); | |
1742 | return -ENODEV; | |
1743 | } | |
1744 | ||
331813f6 SG |
1745 | ret = nvme_identify_ns(ctrl, ns->head->ns_id, &id); |
1746 | if (ret) | |
1747 | goto out; | |
ac81bfa9 | 1748 | |
cdbff4f2 CH |
1749 | if (id->ncap == 0) { |
1750 | ret = -ENODEV; | |
331813f6 | 1751 | goto free_id; |
cdbff4f2 | 1752 | } |
ac81bfa9 | 1753 | |
5e0fab57 | 1754 | __nvme_revalidate_disk(disk, id); |
538af88e SG |
1755 | ret = nvme_report_ns_ids(ctrl, ns->head->ns_id, id, &ids); |
1756 | if (ret) | |
1757 | goto free_id; | |
1758 | ||
ed754e5d | 1759 | if (!nvme_ns_ids_equal(&ns->head->ids, &ids)) { |
1d5df6af | 1760 | dev_err(ctrl->device, |
ed754e5d | 1761 | "identifiers changed for nsid %d\n", ns->head->ns_id); |
1d5df6af CH |
1762 | ret = -ENODEV; |
1763 | } | |
1764 | ||
331813f6 | 1765 | free_id: |
cdbff4f2 | 1766 | kfree(id); |
331813f6 | 1767 | out: |
205da243 SG |
1768 | /* |
1769 | * Only fail the function if we got a fatal error back from the | |
1770 | * device, otherwise ignore the error and just move on. | |
1771 | */ | |
1772 | if (ret == -ENOMEM || (ret > 0 && !(ret & NVME_SC_DNR))) | |
1773 | ret = 0; | |
1774 | else if (ret > 0) | |
331813f6 | 1775 | ret = blk_status_to_errno(nvme_error_status(ret)); |
cdbff4f2 | 1776 | return ret; |
1673f1f0 CH |
1777 | } |
1778 | ||
1779 | static char nvme_pr_type(enum pr_type type) | |
1780 | { | |
1781 | switch (type) { | |
1782 | case PR_WRITE_EXCLUSIVE: | |
1783 | return 1; | |
1784 | case PR_EXCLUSIVE_ACCESS: | |
1785 | return 2; | |
1786 | case PR_WRITE_EXCLUSIVE_REG_ONLY: | |
1787 | return 3; | |
1788 | case PR_EXCLUSIVE_ACCESS_REG_ONLY: | |
1789 | return 4; | |
1790 | case PR_WRITE_EXCLUSIVE_ALL_REGS: | |
1791 | return 5; | |
1792 | case PR_EXCLUSIVE_ACCESS_ALL_REGS: | |
1793 | return 6; | |
1794 | default: | |
1795 | return 0; | |
1796 | } | |
1797 | }; | |
1798 | ||
1799 | static int nvme_pr_command(struct block_device *bdev, u32 cdw10, | |
1800 | u64 key, u64 sa_key, u8 op) | |
1801 | { | |
32acab31 CH |
1802 | struct nvme_ns_head *head = NULL; |
1803 | struct nvme_ns *ns; | |
1673f1f0 | 1804 | struct nvme_command c; |
32acab31 | 1805 | int srcu_idx, ret; |
1673f1f0 CH |
1806 | u8 data[16] = { 0, }; |
1807 | ||
b0d61d58 KB |
1808 | ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); |
1809 | if (unlikely(!ns)) | |
1810 | return -EWOULDBLOCK; | |
1811 | ||
1673f1f0 CH |
1812 | put_unaligned_le64(key, &data[0]); |
1813 | put_unaligned_le64(sa_key, &data[8]); | |
1814 | ||
1815 | memset(&c, 0, sizeof(c)); | |
1816 | c.common.opcode = op; | |
b0d61d58 | 1817 | c.common.nsid = cpu_to_le32(ns->head->ns_id); |
b7c8f366 | 1818 | c.common.cdw10 = cpu_to_le32(cdw10); |
1673f1f0 | 1819 | |
b0d61d58 | 1820 | ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16); |
32acab31 CH |
1821 | nvme_put_ns_from_disk(head, srcu_idx); |
1822 | return ret; | |
1673f1f0 CH |
1823 | } |
1824 | ||
1825 | static int nvme_pr_register(struct block_device *bdev, u64 old, | |
1826 | u64 new, unsigned flags) | |
1827 | { | |
1828 | u32 cdw10; | |
1829 | ||
1830 | if (flags & ~PR_FL_IGNORE_KEY) | |
1831 | return -EOPNOTSUPP; | |
1832 | ||
1833 | cdw10 = old ? 2 : 0; | |
1834 | cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; | |
1835 | cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ | |
1836 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); | |
1837 | } | |
1838 | ||
1839 | static int nvme_pr_reserve(struct block_device *bdev, u64 key, | |
1840 | enum pr_type type, unsigned flags) | |
1841 | { | |
1842 | u32 cdw10; | |
1843 | ||
1844 | if (flags & ~PR_FL_IGNORE_KEY) | |
1845 | return -EOPNOTSUPP; | |
1846 | ||
1847 | cdw10 = nvme_pr_type(type) << 8; | |
1848 | cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); | |
1849 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); | |
1850 | } | |
1851 | ||
1852 | static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, | |
1853 | enum pr_type type, bool abort) | |
1854 | { | |
e9a9853c | 1855 | u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); |
1673f1f0 CH |
1856 | return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); |
1857 | } | |
1858 | ||
1859 | static int nvme_pr_clear(struct block_device *bdev, u64 key) | |
1860 | { | |
8c0b3915 | 1861 | u32 cdw10 = 1 | (key ? 1 << 3 : 0); |
1673f1f0 CH |
1862 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register); |
1863 | } | |
1864 | ||
1865 | static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) | |
1866 | { | |
e9a9853c | 1867 | u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 1 << 3 : 0); |
1673f1f0 CH |
1868 | return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); |
1869 | } | |
1870 | ||
1871 | static const struct pr_ops nvme_pr_ops = { | |
1872 | .pr_register = nvme_pr_register, | |
1873 | .pr_reserve = nvme_pr_reserve, | |
1874 | .pr_release = nvme_pr_release, | |
1875 | .pr_preempt = nvme_pr_preempt, | |
1876 | .pr_clear = nvme_pr_clear, | |
1877 | }; | |
1878 | ||
a98e58e5 | 1879 | #ifdef CONFIG_BLK_SED_OPAL |
4f1244c8 CH |
1880 | int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, |
1881 | bool send) | |
a98e58e5 | 1882 | { |
4f1244c8 | 1883 | struct nvme_ctrl *ctrl = data; |
a98e58e5 | 1884 | struct nvme_command cmd; |
a98e58e5 SB |
1885 | |
1886 | memset(&cmd, 0, sizeof(cmd)); | |
1887 | if (send) | |
1888 | cmd.common.opcode = nvme_admin_security_send; | |
1889 | else | |
1890 | cmd.common.opcode = nvme_admin_security_recv; | |
a98e58e5 | 1891 | cmd.common.nsid = 0; |
b7c8f366 CK |
1892 | cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); |
1893 | cmd.common.cdw11 = cpu_to_le32(len); | |
a98e58e5 SB |
1894 | |
1895 | return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, | |
6287b51c | 1896 | ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false); |
a98e58e5 SB |
1897 | } |
1898 | EXPORT_SYMBOL_GPL(nvme_sec_submit); | |
1899 | #endif /* CONFIG_BLK_SED_OPAL */ | |
1900 | ||
5bae7f73 | 1901 | static const struct block_device_operations nvme_fops = { |
1673f1f0 CH |
1902 | .owner = THIS_MODULE, |
1903 | .ioctl = nvme_ioctl, | |
761f2e1e | 1904 | .compat_ioctl = nvme_ioctl, |
1673f1f0 CH |
1905 | .open = nvme_open, |
1906 | .release = nvme_release, | |
1907 | .getgeo = nvme_getgeo, | |
1908 | .revalidate_disk= nvme_revalidate_disk, | |
1909 | .pr_ops = &nvme_pr_ops, | |
1910 | }; | |
1911 | ||
32acab31 CH |
1912 | #ifdef CONFIG_NVME_MULTIPATH |
1913 | static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode) | |
1914 | { | |
1915 | struct nvme_ns_head *head = bdev->bd_disk->private_data; | |
1916 | ||
1917 | if (!kref_get_unless_zero(&head->ref)) | |
1918 | return -ENXIO; | |
1919 | return 0; | |
1920 | } | |
1921 | ||
1922 | static void nvme_ns_head_release(struct gendisk *disk, fmode_t mode) | |
1923 | { | |
1924 | nvme_put_ns_head(disk->private_data); | |
1925 | } | |
1926 | ||
1927 | const struct block_device_operations nvme_ns_head_ops = { | |
1928 | .owner = THIS_MODULE, | |
1929 | .open = nvme_ns_head_open, | |
1930 | .release = nvme_ns_head_release, | |
1931 | .ioctl = nvme_ioctl, | |
1932 | .compat_ioctl = nvme_ioctl, | |
1933 | .getgeo = nvme_getgeo, | |
1934 | .pr_ops = &nvme_pr_ops, | |
1935 | }; | |
1936 | #endif /* CONFIG_NVME_MULTIPATH */ | |
1937 | ||
5fd4ce1b CH |
1938 | static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) |
1939 | { | |
1940 | unsigned long timeout = | |
1941 | ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; | |
1942 | u32 csts, bit = enabled ? NVME_CSTS_RDY : 0; | |
1943 | int ret; | |
1944 | ||
1945 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { | |
0df1e4f5 KB |
1946 | if (csts == ~0) |
1947 | return -ENODEV; | |
5fd4ce1b CH |
1948 | if ((csts & NVME_CSTS_RDY) == bit) |
1949 | break; | |
1950 | ||
1951 | msleep(100); | |
1952 | if (fatal_signal_pending(current)) | |
1953 | return -EINTR; | |
1954 | if (time_after(jiffies, timeout)) { | |
1b3c47c1 | 1955 | dev_err(ctrl->device, |
5fd4ce1b CH |
1956 | "Device not ready; aborting %s\n", enabled ? |
1957 | "initialisation" : "reset"); | |
1958 | return -ENODEV; | |
1959 | } | |
1960 | } | |
1961 | ||
1962 | return ret; | |
1963 | } | |
1964 | ||
1965 | /* | |
1966 | * If the device has been passed off to us in an enabled state, just clear | |
1967 | * the enabled bit. The spec says we should set the 'shutdown notification | |
1968 | * bits', but doing so may cause the device to complete commands to the | |
1969 | * admin queue ... and we don't know what memory that might be pointing at! | |
1970 | */ | |
b5b05048 | 1971 | int nvme_disable_ctrl(struct nvme_ctrl *ctrl) |
5fd4ce1b CH |
1972 | { |
1973 | int ret; | |
1974 | ||
1975 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; | |
1976 | ctrl->ctrl_config &= ~NVME_CC_ENABLE; | |
1977 | ||
1978 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); | |
1979 | if (ret) | |
1980 | return ret; | |
54adc010 | 1981 | |
b5a10c5f | 1982 | if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) |
54adc010 GP |
1983 | msleep(NVME_QUIRK_DELAY_AMOUNT); |
1984 | ||
b5b05048 | 1985 | return nvme_wait_ready(ctrl, ctrl->cap, false); |
5fd4ce1b | 1986 | } |
576d55d6 | 1987 | EXPORT_SYMBOL_GPL(nvme_disable_ctrl); |
5fd4ce1b | 1988 | |
c0f2f45b | 1989 | int nvme_enable_ctrl(struct nvme_ctrl *ctrl) |
5fd4ce1b CH |
1990 | { |
1991 | /* | |
1992 | * Default to a 4K page size, with the intention to update this | |
1993 | * path in the future to accomodate architectures with differing | |
1994 | * kernel and IO page sizes. | |
1995 | */ | |
c0f2f45b | 1996 | unsigned dev_page_min, page_shift = 12; |
5fd4ce1b CH |
1997 | int ret; |
1998 | ||
c0f2f45b SG |
1999 | ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); |
2000 | if (ret) { | |
2001 | dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret); | |
2002 | return ret; | |
2003 | } | |
2004 | dev_page_min = NVME_CAP_MPSMIN(ctrl->cap) + 12; | |
2005 | ||
5fd4ce1b | 2006 | if (page_shift < dev_page_min) { |
1b3c47c1 | 2007 | dev_err(ctrl->device, |
5fd4ce1b CH |
2008 | "Minimum device page size %u too large for host (%u)\n", |
2009 | 1 << dev_page_min, 1 << page_shift); | |
2010 | return -ENODEV; | |
2011 | } | |
2012 | ||
2013 | ctrl->page_size = 1 << page_shift; | |
2014 | ||
2015 | ctrl->ctrl_config = NVME_CC_CSS_NVM; | |
2016 | ctrl->ctrl_config |= (page_shift - 12) << NVME_CC_MPS_SHIFT; | |
60b43f62 | 2017 | ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; |
5fd4ce1b CH |
2018 | ctrl->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; |
2019 | ctrl->ctrl_config |= NVME_CC_ENABLE; | |
2020 | ||
2021 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); | |
2022 | if (ret) | |
2023 | return ret; | |
c0f2f45b | 2024 | return nvme_wait_ready(ctrl, ctrl->cap, true); |
5fd4ce1b | 2025 | } |
576d55d6 | 2026 | EXPORT_SYMBOL_GPL(nvme_enable_ctrl); |
5fd4ce1b CH |
2027 | |
2028 | int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) | |
2029 | { | |
07fbd32a | 2030 | unsigned long timeout = jiffies + (ctrl->shutdown_timeout * HZ); |
5fd4ce1b CH |
2031 | u32 csts; |
2032 | int ret; | |
2033 | ||
2034 | ctrl->ctrl_config &= ~NVME_CC_SHN_MASK; | |
2035 | ctrl->ctrl_config |= NVME_CC_SHN_NORMAL; | |
2036 | ||
2037 | ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); | |
2038 | if (ret) | |
2039 | return ret; | |
2040 | ||
2041 | while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { | |
2042 | if ((csts & NVME_CSTS_SHST_MASK) == NVME_CSTS_SHST_CMPLT) | |
2043 | break; | |
2044 | ||
2045 | msleep(100); | |
2046 | if (fatal_signal_pending(current)) | |
2047 | return -EINTR; | |
2048 | if (time_after(jiffies, timeout)) { | |
1b3c47c1 | 2049 | dev_err(ctrl->device, |
5fd4ce1b CH |
2050 | "Device shutdown incomplete; abort shutdown\n"); |
2051 | return -ENODEV; | |
2052 | } | |
2053 | } | |
2054 | ||
2055 | return ret; | |
2056 | } | |
576d55d6 | 2057 | EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl); |
5fd4ce1b | 2058 | |
da35825d CH |
2059 | static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, |
2060 | struct request_queue *q) | |
2061 | { | |
7c88cb00 JA |
2062 | bool vwc = false; |
2063 | ||
da35825d | 2064 | if (ctrl->max_hw_sectors) { |
45686b61 CH |
2065 | u32 max_segments = |
2066 | (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1; | |
2067 | ||
943e942e | 2068 | max_segments = min_not_zero(max_segments, ctrl->max_segments); |
da35825d | 2069 | blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); |
45686b61 | 2070 | blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); |
da35825d | 2071 | } |
249159c5 KB |
2072 | if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && |
2073 | is_power_of_2(ctrl->max_hw_sectors)) | |
e6282aef | 2074 | blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); |
da35825d | 2075 | blk_queue_virt_boundary(q, ctrl->page_size - 1); |
7c88cb00 JA |
2076 | if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) |
2077 | vwc = true; | |
2078 | blk_queue_write_cache(q, vwc, vwc); | |
da35825d CH |
2079 | } |
2080 | ||
dbf86b39 JD |
2081 | static int nvme_configure_timestamp(struct nvme_ctrl *ctrl) |
2082 | { | |
2083 | __le64 ts; | |
2084 | int ret; | |
2085 | ||
2086 | if (!(ctrl->oncs & NVME_CTRL_ONCS_TIMESTAMP)) | |
2087 | return 0; | |
2088 | ||
2089 | ts = cpu_to_le64(ktime_to_ms(ktime_get_real())); | |
2090 | ret = nvme_set_features(ctrl, NVME_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), | |
2091 | NULL); | |
2092 | if (ret) | |
2093 | dev_warn_once(ctrl->device, | |
2094 | "could not set timestamp (%d)\n", ret); | |
2095 | return ret; | |
2096 | } | |
2097 | ||
49cd84b6 KB |
2098 | static int nvme_configure_acre(struct nvme_ctrl *ctrl) |
2099 | { | |
2100 | struct nvme_feat_host_behavior *host; | |
2101 | int ret; | |
2102 | ||
2103 | /* Don't bother enabling the feature if retry delay is not reported */ | |
2104 | if (!ctrl->crdt[0]) | |
2105 | return 0; | |
2106 | ||
2107 | host = kzalloc(sizeof(*host), GFP_KERNEL); | |
2108 | if (!host) | |
2109 | return 0; | |
2110 | ||
2111 | host->acre = NVME_ENABLE_ACRE; | |
2112 | ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0, | |
2113 | host, sizeof(*host), NULL); | |
2114 | kfree(host); | |
2115 | return ret; | |
2116 | } | |
2117 | ||
634b8325 | 2118 | static int nvme_configure_apst(struct nvme_ctrl *ctrl) |
c5552fde AL |
2119 | { |
2120 | /* | |
2121 | * APST (Autonomous Power State Transition) lets us program a | |
2122 | * table of power state transitions that the controller will | |
2123 | * perform automatically. We configure it with a simple | |
2124 | * heuristic: we are willing to spend at most 2% of the time | |
2125 | * transitioning between power states. Therefore, when running | |
2126 | * in any given state, we will enter the next lower-power | |
76e4ad09 | 2127 | * non-operational state after waiting 50 * (enlat + exlat) |
da87591b | 2128 | * microseconds, as long as that state's exit latency is under |
c5552fde AL |
2129 | * the requested maximum latency. |
2130 | * | |
2131 | * We will not autonomously enter any non-operational state for | |
2132 | * which the total latency exceeds ps_max_latency_us. Users | |
2133 | * can set ps_max_latency_us to zero to turn off APST. | |
2134 | */ | |
2135 | ||
2136 | unsigned apste; | |
2137 | struct nvme_feat_auto_pst *table; | |
fb0dc399 AL |
2138 | u64 max_lat_us = 0; |
2139 | int max_ps = -1; | |
c5552fde AL |
2140 | int ret; |
2141 | ||
2142 | /* | |
2143 | * If APST isn't supported or if we haven't been initialized yet, | |
2144 | * then don't do anything. | |
2145 | */ | |
2146 | if (!ctrl->apsta) | |
634b8325 | 2147 | return 0; |
c5552fde AL |
2148 | |
2149 | if (ctrl->npss > 31) { | |
2150 | dev_warn(ctrl->device, "NPSS is invalid; not using APST\n"); | |
634b8325 | 2151 | return 0; |
c5552fde AL |
2152 | } |
2153 | ||
2154 | table = kzalloc(sizeof(*table), GFP_KERNEL); | |
2155 | if (!table) | |
634b8325 | 2156 | return 0; |
c5552fde | 2157 | |
76a5af84 | 2158 | if (!ctrl->apst_enabled || ctrl->ps_max_latency_us == 0) { |
c5552fde AL |
2159 | /* Turn off APST. */ |
2160 | apste = 0; | |
fb0dc399 | 2161 | dev_dbg(ctrl->device, "APST disabled\n"); |
c5552fde AL |
2162 | } else { |
2163 | __le64 target = cpu_to_le64(0); | |
2164 | int state; | |
2165 | ||
2166 | /* | |
2167 | * Walk through all states from lowest- to highest-power. | |
2168 | * According to the spec, lower-numbered states use more | |
2169 | * power. NPSS, despite the name, is the index of the | |
2170 | * lowest-power state, not the number of states. | |
2171 | */ | |
2172 | for (state = (int)ctrl->npss; state >= 0; state--) { | |
da87591b | 2173 | u64 total_latency_us, exit_latency_us, transition_ms; |
c5552fde AL |
2174 | |
2175 | if (target) | |
2176 | table->entries[state] = target; | |
2177 | ||
ff5350a8 AL |
2178 | /* |
2179 | * Don't allow transitions to the deepest state | |
2180 | * if it's quirked off. | |
2181 | */ | |
2182 | if (state == ctrl->npss && | |
2183 | (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) | |
2184 | continue; | |
2185 | ||
c5552fde AL |
2186 | /* |
2187 | * Is this state a useful non-operational state for | |
2188 | * higher-power states to autonomously transition to? | |
2189 | */ | |
2190 | if (!(ctrl->psd[state].flags & | |
2191 | NVME_PS_FLAGS_NON_OP_STATE)) | |
2192 | continue; | |
2193 | ||
da87591b KHF |
2194 | exit_latency_us = |
2195 | (u64)le32_to_cpu(ctrl->psd[state].exit_lat); | |
2196 | if (exit_latency_us > ctrl->ps_max_latency_us) | |
c5552fde AL |
2197 | continue; |
2198 | ||
da87591b KHF |
2199 | total_latency_us = |
2200 | exit_latency_us + | |
2201 | le32_to_cpu(ctrl->psd[state].entry_lat); | |
2202 | ||
c5552fde AL |
2203 | /* |
2204 | * This state is good. Use it as the APST idle | |
2205 | * target for higher power states. | |
2206 | */ | |
2207 | transition_ms = total_latency_us + 19; | |
2208 | do_div(transition_ms, 20); | |
2209 | if (transition_ms > (1 << 24) - 1) | |
2210 | transition_ms = (1 << 24) - 1; | |
2211 | ||
2212 | target = cpu_to_le64((state << 3) | | |
2213 | (transition_ms << 8)); | |
fb0dc399 AL |
2214 | |
2215 | if (max_ps == -1) | |
2216 | max_ps = state; | |
2217 | ||
2218 | if (total_latency_us > max_lat_us) | |
2219 | max_lat_us = total_latency_us; | |
c5552fde AL |
2220 | } |
2221 | ||
2222 | apste = 1; | |
fb0dc399 AL |
2223 | |
2224 | if (max_ps == -1) { | |
2225 | dev_dbg(ctrl->device, "APST enabled but no non-operational states are available\n"); | |
2226 | } else { | |
2227 | dev_dbg(ctrl->device, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n", | |
2228 | max_ps, max_lat_us, (int)sizeof(*table), table); | |
2229 | } | |
c5552fde AL |
2230 | } |
2231 | ||
2232 | ret = nvme_set_features(ctrl, NVME_FEAT_AUTO_PST, apste, | |
2233 | table, sizeof(*table), NULL); | |
2234 | if (ret) | |
2235 | dev_err(ctrl->device, "failed to set APST feature (%d)\n", ret); | |
2236 | ||
2237 | kfree(table); | |
634b8325 | 2238 | return ret; |
c5552fde AL |
2239 | } |
2240 | ||
2241 | static void nvme_set_latency_tolerance(struct device *dev, s32 val) | |
2242 | { | |
2243 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
2244 | u64 latency; | |
2245 | ||
2246 | switch (val) { | |
2247 | case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT: | |
2248 | case PM_QOS_LATENCY_ANY: | |
2249 | latency = U64_MAX; | |
2250 | break; | |
2251 | ||
2252 | default: | |
2253 | latency = val; | |
2254 | } | |
2255 | ||
2256 | if (ctrl->ps_max_latency_us != latency) { | |
2257 | ctrl->ps_max_latency_us = latency; | |
2258 | nvme_configure_apst(ctrl); | |
2259 | } | |
2260 | } | |
2261 | ||
bd4da3ab AL |
2262 | struct nvme_core_quirk_entry { |
2263 | /* | |
2264 | * NVMe model and firmware strings are padded with spaces. For | |
2265 | * simplicity, strings in the quirk table are padded with NULLs | |
2266 | * instead. | |
2267 | */ | |
2268 | u16 vid; | |
2269 | const char *mn; | |
2270 | const char *fr; | |
2271 | unsigned long quirks; | |
2272 | }; | |
2273 | ||
2274 | static const struct nvme_core_quirk_entry core_quirks[] = { | |
c5552fde | 2275 | { |
be56945c AL |
2276 | /* |
2277 | * This Toshiba device seems to die using any APST states. See: | |
2278 | * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11 | |
2279 | */ | |
2280 | .vid = 0x1179, | |
2281 | .mn = "THNSF5256GPUK TOSHIBA", | |
c5552fde | 2282 | .quirks = NVME_QUIRK_NO_APST, |
be56945c | 2283 | } |
bd4da3ab AL |
2284 | }; |
2285 | ||
2286 | /* match is null-terminated but idstr is space-padded. */ | |
2287 | static bool string_matches(const char *idstr, const char *match, size_t len) | |
2288 | { | |
2289 | size_t matchlen; | |
2290 | ||
2291 | if (!match) | |
2292 | return true; | |
2293 | ||
2294 | matchlen = strlen(match); | |
2295 | WARN_ON_ONCE(matchlen > len); | |
2296 | ||
2297 | if (memcmp(idstr, match, matchlen)) | |
2298 | return false; | |
2299 | ||
2300 | for (; matchlen < len; matchlen++) | |
2301 | if (idstr[matchlen] != ' ') | |
2302 | return false; | |
2303 | ||
2304 | return true; | |
2305 | } | |
2306 | ||
2307 | static bool quirk_matches(const struct nvme_id_ctrl *id, | |
2308 | const struct nvme_core_quirk_entry *q) | |
2309 | { | |
2310 | return q->vid == le16_to_cpu(id->vid) && | |
2311 | string_matches(id->mn, q->mn, sizeof(id->mn)) && | |
2312 | string_matches(id->fr, q->fr, sizeof(id->fr)); | |
2313 | } | |
2314 | ||
ab9e00cc CH |
2315 | static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, |
2316 | struct nvme_id_ctrl *id) | |
180de007 CH |
2317 | { |
2318 | size_t nqnlen; | |
2319 | int off; | |
2320 | ||
6299358d JD |
2321 | if(!(ctrl->quirks & NVME_QUIRK_IGNORE_DEV_SUBNQN)) { |
2322 | nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE); | |
2323 | if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) { | |
2324 | strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE); | |
2325 | return; | |
2326 | } | |
180de007 | 2327 | |
6299358d JD |
2328 | if (ctrl->vs >= NVME_VS(1, 2, 1)) |
2329 | dev_warn(ctrl->device, "missing or invalid SUBNQN field.\n"); | |
2330 | } | |
180de007 CH |
2331 | |
2332 | /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */ | |
ab9e00cc | 2333 | off = snprintf(subsys->subnqn, NVMF_NQN_SIZE, |
3da584f5 | 2334 | "nqn.2014.08.org.nvmexpress:%04x%04x", |
180de007 | 2335 | le16_to_cpu(id->vid), le16_to_cpu(id->ssvid)); |
ab9e00cc | 2336 | memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn)); |
180de007 | 2337 | off += sizeof(id->sn); |
ab9e00cc | 2338 | memcpy(subsys->subnqn + off, id->mn, sizeof(id->mn)); |
180de007 | 2339 | off += sizeof(id->mn); |
ab9e00cc CH |
2340 | memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off); |
2341 | } | |
2342 | ||
e654dfd3 | 2343 | static void nvme_release_subsystem(struct device *dev) |
ab9e00cc | 2344 | { |
e654dfd3 LG |
2345 | struct nvme_subsystem *subsys = |
2346 | container_of(dev, struct nvme_subsystem, dev); | |
2347 | ||
ab9e00cc CH |
2348 | ida_simple_remove(&nvme_subsystems_ida, subsys->instance); |
2349 | kfree(subsys); | |
2350 | } | |
2351 | ||
ab9e00cc CH |
2352 | static void nvme_destroy_subsystem(struct kref *ref) |
2353 | { | |
2354 | struct nvme_subsystem *subsys = | |
2355 | container_of(ref, struct nvme_subsystem, ref); | |
2356 | ||
2357 | mutex_lock(&nvme_subsystems_lock); | |
2358 | list_del(&subsys->entry); | |
2359 | mutex_unlock(&nvme_subsystems_lock); | |
2360 | ||
ed754e5d | 2361 | ida_destroy(&subsys->ns_ida); |
ab9e00cc CH |
2362 | device_del(&subsys->dev); |
2363 | put_device(&subsys->dev); | |
2364 | } | |
2365 | ||
2366 | static void nvme_put_subsystem(struct nvme_subsystem *subsys) | |
2367 | { | |
2368 | kref_put(&subsys->ref, nvme_destroy_subsystem); | |
2369 | } | |
2370 | ||
2371 | static struct nvme_subsystem *__nvme_find_get_subsystem(const char *subsysnqn) | |
2372 | { | |
2373 | struct nvme_subsystem *subsys; | |
2374 | ||
2375 | lockdep_assert_held(&nvme_subsystems_lock); | |
2376 | ||
2377 | list_for_each_entry(subsys, &nvme_subsystems, entry) { | |
2378 | if (strcmp(subsys->subnqn, subsysnqn)) | |
2379 | continue; | |
2380 | if (!kref_get_unless_zero(&subsys->ref)) | |
2381 | continue; | |
2382 | return subsys; | |
2383 | } | |
2384 | ||
2385 | return NULL; | |
2386 | } | |
2387 | ||
1e496938 HR |
2388 | #define SUBSYS_ATTR_RO(_name, _mode, _show) \ |
2389 | struct device_attribute subsys_attr_##_name = \ | |
2390 | __ATTR(_name, _mode, _show, NULL) | |
2391 | ||
2392 | static ssize_t nvme_subsys_show_nqn(struct device *dev, | |
2393 | struct device_attribute *attr, | |
2394 | char *buf) | |
2395 | { | |
2396 | struct nvme_subsystem *subsys = | |
2397 | container_of(dev, struct nvme_subsystem, dev); | |
2398 | ||
2399 | return snprintf(buf, PAGE_SIZE, "%s\n", subsys->subnqn); | |
2400 | } | |
2401 | static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn); | |
2402 | ||
2403 | #define nvme_subsys_show_str_function(field) \ | |
2404 | static ssize_t subsys_##field##_show(struct device *dev, \ | |
2405 | struct device_attribute *attr, char *buf) \ | |
2406 | { \ | |
2407 | struct nvme_subsystem *subsys = \ | |
2408 | container_of(dev, struct nvme_subsystem, dev); \ | |
2409 | return sprintf(buf, "%.*s\n", \ | |
2410 | (int)sizeof(subsys->field), subsys->field); \ | |
2411 | } \ | |
2412 | static SUBSYS_ATTR_RO(field, S_IRUGO, subsys_##field##_show); | |
2413 | ||
2414 | nvme_subsys_show_str_function(model); | |
2415 | nvme_subsys_show_str_function(serial); | |
2416 | nvme_subsys_show_str_function(firmware_rev); | |
2417 | ||
2418 | static struct attribute *nvme_subsys_attrs[] = { | |
2419 | &subsys_attr_model.attr, | |
2420 | &subsys_attr_serial.attr, | |
2421 | &subsys_attr_firmware_rev.attr, | |
2422 | &subsys_attr_subsysnqn.attr, | |
75c10e73 HR |
2423 | #ifdef CONFIG_NVME_MULTIPATH |
2424 | &subsys_attr_iopolicy.attr, | |
2425 | #endif | |
1e496938 HR |
2426 | NULL, |
2427 | }; | |
2428 | ||
2429 | static struct attribute_group nvme_subsys_attrs_group = { | |
2430 | .attrs = nvme_subsys_attrs, | |
2431 | }; | |
2432 | ||
2433 | static const struct attribute_group *nvme_subsys_attrs_groups[] = { | |
2434 | &nvme_subsys_attrs_group, | |
2435 | NULL, | |
2436 | }; | |
2437 | ||
1b1031ca CH |
2438 | static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, |
2439 | struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) | |
b837b283 | 2440 | { |
1b1031ca | 2441 | struct nvme_ctrl *tmp; |
b837b283 | 2442 | |
32fd90c4 CH |
2443 | lockdep_assert_held(&nvme_subsystems_lock); |
2444 | ||
1b1031ca | 2445 | list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) { |
420dc733 SG |
2446 | if (tmp->state == NVME_CTRL_DELETING || |
2447 | tmp->state == NVME_CTRL_DEAD) | |
1b1031ca CH |
2448 | continue; |
2449 | ||
2450 | if (tmp->cntlid == ctrl->cntlid) { | |
2451 | dev_err(ctrl->device, | |
2452 | "Duplicate cntlid %u with %s, rejecting\n", | |
2453 | ctrl->cntlid, dev_name(tmp->device)); | |
2454 | return false; | |
2455 | } | |
b837b283 | 2456 | |
1b1031ca CH |
2457 | if ((id->cmic & (1 << 1)) || |
2458 | (ctrl->opts && ctrl->opts->discovery_nqn)) | |
2459 | continue; | |
2460 | ||
2461 | dev_err(ctrl->device, | |
2462 | "Subsystem does not support multiple controllers\n"); | |
2463 | return false; | |
b837b283 | 2464 | } |
b837b283 | 2465 | |
1b1031ca | 2466 | return true; |
b837b283 IR |
2467 | } |
2468 | ||
ab9e00cc CH |
2469 | static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) |
2470 | { | |
2471 | struct nvme_subsystem *subsys, *found; | |
2472 | int ret; | |
2473 | ||
2474 | subsys = kzalloc(sizeof(*subsys), GFP_KERNEL); | |
2475 | if (!subsys) | |
2476 | return -ENOMEM; | |
2477 | ret = ida_simple_get(&nvme_subsystems_ida, 0, 0, GFP_KERNEL); | |
2478 | if (ret < 0) { | |
2479 | kfree(subsys); | |
2480 | return ret; | |
2481 | } | |
2482 | subsys->instance = ret; | |
2483 | mutex_init(&subsys->lock); | |
2484 | kref_init(&subsys->ref); | |
2485 | INIT_LIST_HEAD(&subsys->ctrls); | |
ed754e5d | 2486 | INIT_LIST_HEAD(&subsys->nsheads); |
ab9e00cc CH |
2487 | nvme_init_subnqn(subsys, ctrl, id); |
2488 | memcpy(subsys->serial, id->sn, sizeof(subsys->serial)); | |
2489 | memcpy(subsys->model, id->mn, sizeof(subsys->model)); | |
2490 | memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev)); | |
2491 | subsys->vendor_id = le16_to_cpu(id->vid); | |
2492 | subsys->cmic = id->cmic; | |
81adb863 | 2493 | subsys->awupf = le16_to_cpu(id->awupf); |
75c10e73 HR |
2494 | #ifdef CONFIG_NVME_MULTIPATH |
2495 | subsys->iopolicy = NVME_IOPOLICY_NUMA; | |
2496 | #endif | |
ab9e00cc CH |
2497 | |
2498 | subsys->dev.class = nvme_subsys_class; | |
2499 | subsys->dev.release = nvme_release_subsystem; | |
1e496938 | 2500 | subsys->dev.groups = nvme_subsys_attrs_groups; |
ab9e00cc CH |
2501 | dev_set_name(&subsys->dev, "nvme-subsys%d", subsys->instance); |
2502 | device_initialize(&subsys->dev); | |
2503 | ||
2504 | mutex_lock(&nvme_subsystems_lock); | |
2505 | found = __nvme_find_get_subsystem(subsys->subnqn); | |
2506 | if (found) { | |
e654dfd3 | 2507 | put_device(&subsys->dev); |
ab9e00cc | 2508 | subsys = found; |
32fd90c4 | 2509 | |
1b1031ca | 2510 | if (!nvme_validate_cntlid(subsys, ctrl, id)) { |
ab9e00cc | 2511 | ret = -EINVAL; |
32fd90c4 | 2512 | goto out_put_subsystem; |
ab9e00cc | 2513 | } |
ab9e00cc CH |
2514 | } else { |
2515 | ret = device_add(&subsys->dev); | |
2516 | if (ret) { | |
2517 | dev_err(ctrl->device, | |
2518 | "failed to register subsystem device.\n"); | |
2519 | goto out_unlock; | |
2520 | } | |
ed754e5d | 2521 | ida_init(&subsys->ns_ida); |
ab9e00cc CH |
2522 | list_add_tail(&subsys->entry, &nvme_subsystems); |
2523 | } | |
2524 | ||
ab9e00cc CH |
2525 | if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, |
2526 | dev_name(ctrl->device))) { | |
2527 | dev_err(ctrl->device, | |
2528 | "failed to create sysfs link from subsystem.\n"); | |
32fd90c4 | 2529 | goto out_put_subsystem; |
ab9e00cc CH |
2530 | } |
2531 | ||
32fd90c4 | 2532 | ctrl->subsys = subsys; |
ab9e00cc | 2533 | list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); |
32fd90c4 | 2534 | mutex_unlock(&nvme_subsystems_lock); |
ab9e00cc CH |
2535 | return 0; |
2536 | ||
32fd90c4 CH |
2537 | out_put_subsystem: |
2538 | nvme_put_subsystem(subsys); | |
ab9e00cc CH |
2539 | out_unlock: |
2540 | mutex_unlock(&nvme_subsystems_lock); | |
2541 | put_device(&subsys->dev); | |
2542 | return ret; | |
180de007 CH |
2543 | } |
2544 | ||
0e98719b CH |
2545 | int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, |
2546 | void *log, size_t size, u64 offset) | |
c627c487 KB |
2547 | { |
2548 | struct nvme_command c = { }; | |
70da6094 MB |
2549 | unsigned long dwlen = size / 4 - 1; |
2550 | ||
2551 | c.get_log_page.opcode = nvme_admin_get_log_page; | |
0e98719b | 2552 | c.get_log_page.nsid = cpu_to_le32(nsid); |
70da6094 | 2553 | c.get_log_page.lid = log_page; |
0e98719b | 2554 | c.get_log_page.lsp = lsp; |
70da6094 MB |
2555 | c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); |
2556 | c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); | |
7ec6074f MB |
2557 | c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); |
2558 | c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); | |
c627c487 KB |
2559 | |
2560 | return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); | |
2561 | } | |
2562 | ||
84fef62d KB |
2563 | static int nvme_get_effects_log(struct nvme_ctrl *ctrl) |
2564 | { | |
2565 | int ret; | |
2566 | ||
2567 | if (!ctrl->effects) | |
2568 | ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); | |
2569 | ||
2570 | if (!ctrl->effects) | |
2571 | return 0; | |
2572 | ||
0e98719b CH |
2573 | ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, |
2574 | ctrl->effects, sizeof(*ctrl->effects), 0); | |
84fef62d KB |
2575 | if (ret) { |
2576 | kfree(ctrl->effects); | |
2577 | ctrl->effects = NULL; | |
2578 | } | |
2579 | return ret; | |
180de007 CH |
2580 | } |
2581 | ||
7fd8930f CH |
2582 | /* |
2583 | * Initialize the cached copies of the Identify data and various controller | |
2584 | * register in our nvme_ctrl structure. This should be called as soon as | |
2585 | * the admin queue is fully up and running. | |
2586 | */ | |
2587 | int nvme_init_identify(struct nvme_ctrl *ctrl) | |
2588 | { | |
2589 | struct nvme_id_ctrl *id; | |
7fd8930f | 2590 | int ret, page_shift; |
a229dbf6 | 2591 | u32 max_hw_sectors; |
76a5af84 | 2592 | bool prev_apst_enabled; |
7fd8930f | 2593 | |
f3ca80fc CH |
2594 | ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs); |
2595 | if (ret) { | |
1b3c47c1 | 2596 | dev_err(ctrl->device, "Reading VS failed (%d)\n", ret); |
f3ca80fc CH |
2597 | return ret; |
2598 | } | |
4fba4458 | 2599 | page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12; |
c0f2f45b | 2600 | ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize); |
7fd8930f | 2601 | |
8ef2074d | 2602 | if (ctrl->vs >= NVME_VS(1, 1, 0)) |
4fba4458 | 2603 | ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap); |
f3ca80fc | 2604 | |
7fd8930f CH |
2605 | ret = nvme_identify_ctrl(ctrl, &id); |
2606 | if (ret) { | |
1b3c47c1 | 2607 | dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret); |
7fd8930f CH |
2608 | return -EIO; |
2609 | } | |
2610 | ||
84fef62d KB |
2611 | if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) { |
2612 | ret = nvme_get_effects_log(ctrl); | |
2613 | if (ret < 0) | |
75c8b19a | 2614 | goto out_free; |
84fef62d | 2615 | } |
180de007 | 2616 | |
bd4da3ab | 2617 | if (!ctrl->identified) { |
ab9e00cc CH |
2618 | int i; |
2619 | ||
2620 | ret = nvme_init_subsystem(ctrl, id); | |
2621 | if (ret) | |
2622 | goto out_free; | |
2623 | ||
bd4da3ab AL |
2624 | /* |
2625 | * Check for quirks. Quirk can depend on firmware version, | |
2626 | * so, in principle, the set of quirks present can change | |
2627 | * across a reset. As a possible future enhancement, we | |
2628 | * could re-scan for quirks every time we reinitialize | |
2629 | * the device, but we'd have to make sure that the driver | |
2630 | * behaves intelligently if the quirks change. | |
2631 | */ | |
bd4da3ab AL |
2632 | for (i = 0; i < ARRAY_SIZE(core_quirks); i++) { |
2633 | if (quirk_matches(id, &core_quirks[i])) | |
2634 | ctrl->quirks |= core_quirks[i].quirks; | |
2635 | } | |
2636 | } | |
2637 | ||
c35e30b4 | 2638 | if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) { |
f0425db0 | 2639 | dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n"); |
c35e30b4 AL |
2640 | ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS; |
2641 | } | |
2642 | ||
49cd84b6 KB |
2643 | ctrl->crdt[0] = le16_to_cpu(id->crdt1); |
2644 | ctrl->crdt[1] = le16_to_cpu(id->crdt2); | |
2645 | ctrl->crdt[2] = le16_to_cpu(id->crdt3); | |
2646 | ||
8a9ae523 | 2647 | ctrl->oacs = le16_to_cpu(id->oacs); |
43e2d08d | 2648 | ctrl->oncs = le16_to_cpu(id->oncs); |
2d466c7a | 2649 | ctrl->mtfa = le16_to_cpu(id->mtfa); |
c0561f82 | 2650 | ctrl->oaes = le32_to_cpu(id->oaes); |
6bf25d16 | 2651 | atomic_set(&ctrl->abort_limit, id->acl + 1); |
7fd8930f | 2652 | ctrl->vwc = id->vwc; |
7fd8930f | 2653 | if (id->mdts) |
a229dbf6 | 2654 | max_hw_sectors = 1 << (id->mdts + page_shift - 9); |
7fd8930f | 2655 | else |
a229dbf6 CH |
2656 | max_hw_sectors = UINT_MAX; |
2657 | ctrl->max_hw_sectors = | |
2658 | min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); | |
7fd8930f | 2659 | |
da35825d | 2660 | nvme_set_queue_limits(ctrl, ctrl->admin_q); |
07bfcd09 | 2661 | ctrl->sgls = le32_to_cpu(id->sgls); |
038bd4cb | 2662 | ctrl->kas = le16_to_cpu(id->kas); |
0d0b660f | 2663 | ctrl->max_namespaces = le32_to_cpu(id->mnan); |
3e53ba38 | 2664 | ctrl->ctratt = le32_to_cpu(id->ctratt); |
07bfcd09 | 2665 | |
07fbd32a MP |
2666 | if (id->rtd3e) { |
2667 | /* us -> s */ | |
2668 | u32 transition_time = le32_to_cpu(id->rtd3e) / 1000000; | |
2669 | ||
2670 | ctrl->shutdown_timeout = clamp_t(unsigned int, transition_time, | |
2671 | shutdown_timeout, 60); | |
2672 | ||
2673 | if (ctrl->shutdown_timeout != shutdown_timeout) | |
1a3838d7 | 2674 | dev_info(ctrl->device, |
07fbd32a MP |
2675 | "Shutdown timeout set to %u seconds\n", |
2676 | ctrl->shutdown_timeout); | |
2677 | } else | |
2678 | ctrl->shutdown_timeout = shutdown_timeout; | |
2679 | ||
c5552fde | 2680 | ctrl->npss = id->npss; |
76a5af84 KHF |
2681 | ctrl->apsta = id->apsta; |
2682 | prev_apst_enabled = ctrl->apst_enabled; | |
c35e30b4 AL |
2683 | if (ctrl->quirks & NVME_QUIRK_NO_APST) { |
2684 | if (force_apst && id->apsta) { | |
f0425db0 | 2685 | dev_warn(ctrl->device, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n"); |
76a5af84 | 2686 | ctrl->apst_enabled = true; |
c35e30b4 | 2687 | } else { |
76a5af84 | 2688 | ctrl->apst_enabled = false; |
c35e30b4 AL |
2689 | } |
2690 | } else { | |
76a5af84 | 2691 | ctrl->apst_enabled = id->apsta; |
c35e30b4 | 2692 | } |
c5552fde AL |
2693 | memcpy(ctrl->psd, id->psd, sizeof(ctrl->psd)); |
2694 | ||
d3d5b87d | 2695 | if (ctrl->ops->flags & NVME_F_FABRICS) { |
07bfcd09 CH |
2696 | ctrl->icdoff = le16_to_cpu(id->icdoff); |
2697 | ctrl->ioccsz = le32_to_cpu(id->ioccsz); | |
2698 | ctrl->iorcsz = le32_to_cpu(id->iorcsz); | |
2699 | ctrl->maxcmd = le16_to_cpu(id->maxcmd); | |
2700 | ||
2701 | /* | |
2702 | * In fabrics we need to verify the cntlid matches the | |
2703 | * admin connect | |
2704 | */ | |
634b8325 | 2705 | if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { |
07bfcd09 | 2706 | ret = -EINVAL; |
634b8325 KB |
2707 | goto out_free; |
2708 | } | |
038bd4cb SG |
2709 | |
2710 | if (!ctrl->opts->discovery_nqn && !ctrl->kas) { | |
f0425db0 | 2711 | dev_err(ctrl->device, |
038bd4cb SG |
2712 | "keep-alive support is mandatory for fabrics\n"); |
2713 | ret = -EINVAL; | |
634b8325 | 2714 | goto out_free; |
038bd4cb | 2715 | } |
07bfcd09 CH |
2716 | } else { |
2717 | ctrl->cntlid = le16_to_cpu(id->cntlid); | |
fe6d53c9 CH |
2718 | ctrl->hmpre = le32_to_cpu(id->hmpre); |
2719 | ctrl->hmmin = le32_to_cpu(id->hmmin); | |
044a9df1 CH |
2720 | ctrl->hmminds = le32_to_cpu(id->hmminds); |
2721 | ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); | |
07bfcd09 | 2722 | } |
da35825d | 2723 | |
0d0b660f | 2724 | ret = nvme_mpath_init(ctrl, id); |
7fd8930f | 2725 | kfree(id); |
bd4da3ab | 2726 | |
0d0b660f CH |
2727 | if (ret < 0) |
2728 | return ret; | |
2729 | ||
76a5af84 | 2730 | if (ctrl->apst_enabled && !prev_apst_enabled) |
c5552fde | 2731 | dev_pm_qos_expose_latency_tolerance(ctrl->device); |
76a5af84 | 2732 | else if (!ctrl->apst_enabled && prev_apst_enabled) |
c5552fde AL |
2733 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
2734 | ||
634b8325 KB |
2735 | ret = nvme_configure_apst(ctrl); |
2736 | if (ret < 0) | |
2737 | return ret; | |
dbf86b39 JD |
2738 | |
2739 | ret = nvme_configure_timestamp(ctrl); | |
2740 | if (ret < 0) | |
2741 | return ret; | |
634b8325 KB |
2742 | |
2743 | ret = nvme_configure_directives(ctrl); | |
2744 | if (ret < 0) | |
2745 | return ret; | |
c5552fde | 2746 | |
49cd84b6 KB |
2747 | ret = nvme_configure_acre(ctrl); |
2748 | if (ret < 0) | |
2749 | return ret; | |
2750 | ||
bd4da3ab | 2751 | ctrl->identified = true; |
c5552fde | 2752 | |
634b8325 KB |
2753 | return 0; |
2754 | ||
2755 | out_free: | |
2756 | kfree(id); | |
07bfcd09 | 2757 | return ret; |
7fd8930f | 2758 | } |
576d55d6 | 2759 | EXPORT_SYMBOL_GPL(nvme_init_identify); |
7fd8930f | 2760 | |
f3ca80fc | 2761 | static int nvme_dev_open(struct inode *inode, struct file *file) |
1673f1f0 | 2762 | { |
a6a5149b CH |
2763 | struct nvme_ctrl *ctrl = |
2764 | container_of(inode->i_cdev, struct nvme_ctrl, cdev); | |
1673f1f0 | 2765 | |
2b1b7e78 JW |
2766 | switch (ctrl->state) { |
2767 | case NVME_CTRL_LIVE: | |
2768 | case NVME_CTRL_ADMIN_ONLY: | |
2769 | break; | |
2770 | default: | |
a6a5149b | 2771 | return -EWOULDBLOCK; |
2b1b7e78 JW |
2772 | } |
2773 | ||
a6a5149b | 2774 | file->private_data = ctrl; |
f3ca80fc CH |
2775 | return 0; |
2776 | } | |
2777 | ||
bfd89471 CH |
2778 | static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) |
2779 | { | |
2780 | struct nvme_ns *ns; | |
2781 | int ret; | |
2782 | ||
765cc031 | 2783 | down_read(&ctrl->namespaces_rwsem); |
bfd89471 CH |
2784 | if (list_empty(&ctrl->namespaces)) { |
2785 | ret = -ENOTTY; | |
2786 | goto out_unlock; | |
2787 | } | |
2788 | ||
2789 | ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list); | |
2790 | if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) { | |
1b3c47c1 | 2791 | dev_warn(ctrl->device, |
bfd89471 CH |
2792 | "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n"); |
2793 | ret = -EINVAL; | |
2794 | goto out_unlock; | |
2795 | } | |
2796 | ||
1b3c47c1 | 2797 | dev_warn(ctrl->device, |
bfd89471 CH |
2798 | "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n"); |
2799 | kref_get(&ns->kref); | |
765cc031 | 2800 | up_read(&ctrl->namespaces_rwsem); |
bfd89471 CH |
2801 | |
2802 | ret = nvme_user_cmd(ctrl, ns, argp); | |
2803 | nvme_put_ns(ns); | |
2804 | return ret; | |
2805 | ||
2806 | out_unlock: | |
765cc031 | 2807 | up_read(&ctrl->namespaces_rwsem); |
bfd89471 CH |
2808 | return ret; |
2809 | } | |
2810 | ||
f3ca80fc CH |
2811 | static long nvme_dev_ioctl(struct file *file, unsigned int cmd, |
2812 | unsigned long arg) | |
2813 | { | |
2814 | struct nvme_ctrl *ctrl = file->private_data; | |
2815 | void __user *argp = (void __user *)arg; | |
f3ca80fc CH |
2816 | |
2817 | switch (cmd) { | |
2818 | case NVME_IOCTL_ADMIN_CMD: | |
2819 | return nvme_user_cmd(ctrl, NULL, argp); | |
2820 | case NVME_IOCTL_IO_CMD: | |
bfd89471 | 2821 | return nvme_dev_user_cmd(ctrl, argp); |
f3ca80fc | 2822 | case NVME_IOCTL_RESET: |
1b3c47c1 | 2823 | dev_warn(ctrl->device, "resetting controller\n"); |
d86c4d8e | 2824 | return nvme_reset_ctrl_sync(ctrl); |
f3ca80fc CH |
2825 | case NVME_IOCTL_SUBSYS_RESET: |
2826 | return nvme_reset_subsystem(ctrl); | |
9ec3bb2f KB |
2827 | case NVME_IOCTL_RESCAN: |
2828 | nvme_queue_scan(ctrl); | |
2829 | return 0; | |
f3ca80fc CH |
2830 | default: |
2831 | return -ENOTTY; | |
2832 | } | |
2833 | } | |
2834 | ||
2835 | static const struct file_operations nvme_dev_fops = { | |
2836 | .owner = THIS_MODULE, | |
2837 | .open = nvme_dev_open, | |
f3ca80fc CH |
2838 | .unlocked_ioctl = nvme_dev_ioctl, |
2839 | .compat_ioctl = nvme_dev_ioctl, | |
2840 | }; | |
2841 | ||
2842 | static ssize_t nvme_sysfs_reset(struct device *dev, | |
2843 | struct device_attribute *attr, const char *buf, | |
2844 | size_t count) | |
2845 | { | |
2846 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
2847 | int ret; | |
2848 | ||
d86c4d8e | 2849 | ret = nvme_reset_ctrl_sync(ctrl); |
f3ca80fc CH |
2850 | if (ret < 0) |
2851 | return ret; | |
2852 | return count; | |
1673f1f0 | 2853 | } |
f3ca80fc | 2854 | static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset); |
1673f1f0 | 2855 | |
9ec3bb2f KB |
2856 | static ssize_t nvme_sysfs_rescan(struct device *dev, |
2857 | struct device_attribute *attr, const char *buf, | |
2858 | size_t count) | |
2859 | { | |
2860 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
2861 | ||
2862 | nvme_queue_scan(ctrl); | |
2863 | return count; | |
2864 | } | |
2865 | static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); | |
2866 | ||
5b85b826 CH |
2867 | static inline struct nvme_ns_head *dev_to_ns_head(struct device *dev) |
2868 | { | |
2869 | struct gendisk *disk = dev_to_disk(dev); | |
2870 | ||
2871 | if (disk->fops == &nvme_fops) | |
2872 | return nvme_get_ns_from_dev(dev)->head; | |
2873 | else | |
2874 | return disk->private_data; | |
2875 | } | |
2876 | ||
118472ab | 2877 | static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, |
5b85b826 | 2878 | char *buf) |
118472ab | 2879 | { |
5b85b826 CH |
2880 | struct nvme_ns_head *head = dev_to_ns_head(dev); |
2881 | struct nvme_ns_ids *ids = &head->ids; | |
2882 | struct nvme_subsystem *subsys = head->subsys; | |
ab9e00cc CH |
2883 | int serial_len = sizeof(subsys->serial); |
2884 | int model_len = sizeof(subsys->model); | |
118472ab | 2885 | |
002fab04 CH |
2886 | if (!uuid_is_null(&ids->uuid)) |
2887 | return sprintf(buf, "uuid.%pU\n", &ids->uuid); | |
6484f5d1 | 2888 | |
002fab04 CH |
2889 | if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) |
2890 | return sprintf(buf, "eui.%16phN\n", ids->nguid); | |
118472ab | 2891 | |
002fab04 CH |
2892 | if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) |
2893 | return sprintf(buf, "eui.%8phN\n", ids->eui64); | |
118472ab | 2894 | |
ab9e00cc CH |
2895 | while (serial_len > 0 && (subsys->serial[serial_len - 1] == ' ' || |
2896 | subsys->serial[serial_len - 1] == '\0')) | |
118472ab | 2897 | serial_len--; |
ab9e00cc CH |
2898 | while (model_len > 0 && (subsys->model[model_len - 1] == ' ' || |
2899 | subsys->model[model_len - 1] == '\0')) | |
118472ab KB |
2900 | model_len--; |
2901 | ||
ab9e00cc CH |
2902 | return sprintf(buf, "nvme.%04x-%*phN-%*phN-%08x\n", subsys->vendor_id, |
2903 | serial_len, subsys->serial, model_len, subsys->model, | |
5b85b826 | 2904 | head->ns_id); |
118472ab | 2905 | } |
c828a892 | 2906 | static DEVICE_ATTR_RO(wwid); |
118472ab | 2907 | |
d934f984 | 2908 | static ssize_t nguid_show(struct device *dev, struct device_attribute *attr, |
5b85b826 | 2909 | char *buf) |
d934f984 | 2910 | { |
5b85b826 | 2911 | return sprintf(buf, "%pU\n", dev_to_ns_head(dev)->ids.nguid); |
d934f984 | 2912 | } |
c828a892 | 2913 | static DEVICE_ATTR_RO(nguid); |
d934f984 | 2914 | |
2b9b6e86 | 2915 | static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, |
5b85b826 | 2916 | char *buf) |
2b9b6e86 | 2917 | { |
5b85b826 | 2918 | struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; |
d934f984 JT |
2919 | |
2920 | /* For backward compatibility expose the NGUID to userspace if | |
2921 | * we have no UUID set | |
2922 | */ | |
002fab04 | 2923 | if (uuid_is_null(&ids->uuid)) { |
d934f984 JT |
2924 | printk_ratelimited(KERN_WARNING |
2925 | "No UUID available providing old NGUID\n"); | |
002fab04 | 2926 | return sprintf(buf, "%pU\n", ids->nguid); |
d934f984 | 2927 | } |
002fab04 | 2928 | return sprintf(buf, "%pU\n", &ids->uuid); |
2b9b6e86 | 2929 | } |
c828a892 | 2930 | static DEVICE_ATTR_RO(uuid); |
2b9b6e86 KB |
2931 | |
2932 | static ssize_t eui_show(struct device *dev, struct device_attribute *attr, | |
5b85b826 | 2933 | char *buf) |
2b9b6e86 | 2934 | { |
5b85b826 | 2935 | return sprintf(buf, "%8ph\n", dev_to_ns_head(dev)->ids.eui64); |
2b9b6e86 | 2936 | } |
c828a892 | 2937 | static DEVICE_ATTR_RO(eui); |
2b9b6e86 KB |
2938 | |
2939 | static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, | |
5b85b826 | 2940 | char *buf) |
2b9b6e86 | 2941 | { |
5b85b826 | 2942 | return sprintf(buf, "%d\n", dev_to_ns_head(dev)->ns_id); |
2b9b6e86 | 2943 | } |
c828a892 | 2944 | static DEVICE_ATTR_RO(nsid); |
2b9b6e86 | 2945 | |
5b85b826 | 2946 | static struct attribute *nvme_ns_id_attrs[] = { |
118472ab | 2947 | &dev_attr_wwid.attr, |
2b9b6e86 | 2948 | &dev_attr_uuid.attr, |
d934f984 | 2949 | &dev_attr_nguid.attr, |
2b9b6e86 KB |
2950 | &dev_attr_eui.attr, |
2951 | &dev_attr_nsid.attr, | |
0d0b660f CH |
2952 | #ifdef CONFIG_NVME_MULTIPATH |
2953 | &dev_attr_ana_grpid.attr, | |
2954 | &dev_attr_ana_state.attr, | |
2955 | #endif | |
2b9b6e86 KB |
2956 | NULL, |
2957 | }; | |
2958 | ||
5b85b826 | 2959 | static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, |
2b9b6e86 KB |
2960 | struct attribute *a, int n) |
2961 | { | |
2962 | struct device *dev = container_of(kobj, struct device, kobj); | |
5b85b826 | 2963 | struct nvme_ns_ids *ids = &dev_to_ns_head(dev)->ids; |
2b9b6e86 KB |
2964 | |
2965 | if (a == &dev_attr_uuid.attr) { | |
a04b5de5 | 2966 | if (uuid_is_null(&ids->uuid) && |
002fab04 | 2967 | !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) |
d934f984 JT |
2968 | return 0; |
2969 | } | |
2970 | if (a == &dev_attr_nguid.attr) { | |
002fab04 | 2971 | if (!memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) |
2b9b6e86 KB |
2972 | return 0; |
2973 | } | |
2974 | if (a == &dev_attr_eui.attr) { | |
002fab04 | 2975 | if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) |
2b9b6e86 KB |
2976 | return 0; |
2977 | } | |
0d0b660f CH |
2978 | #ifdef CONFIG_NVME_MULTIPATH |
2979 | if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { | |
2980 | if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ | |
2981 | return 0; | |
2982 | if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) | |
2983 | return 0; | |
2984 | } | |
2985 | #endif | |
2b9b6e86 KB |
2986 | return a->mode; |
2987 | } | |
2988 | ||
eb090c4c | 2989 | static const struct attribute_group nvme_ns_id_attr_group = { |
5b85b826 CH |
2990 | .attrs = nvme_ns_id_attrs, |
2991 | .is_visible = nvme_ns_id_attrs_are_visible, | |
2b9b6e86 KB |
2992 | }; |
2993 | ||
33b14f67 HR |
2994 | const struct attribute_group *nvme_ns_id_attr_groups[] = { |
2995 | &nvme_ns_id_attr_group, | |
2996 | #ifdef CONFIG_NVM | |
2997 | &nvme_nvm_attr_group, | |
2998 | #endif | |
2999 | NULL, | |
3000 | }; | |
3001 | ||
931e1c22 | 3002 | #define nvme_show_str_function(field) \ |
779ff756 KB |
3003 | static ssize_t field##_show(struct device *dev, \ |
3004 | struct device_attribute *attr, char *buf) \ | |
3005 | { \ | |
3006 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ | |
ab9e00cc CH |
3007 | return sprintf(buf, "%.*s\n", \ |
3008 | (int)sizeof(ctrl->subsys->field), ctrl->subsys->field); \ | |
779ff756 KB |
3009 | } \ |
3010 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); | |
3011 | ||
ab9e00cc CH |
3012 | nvme_show_str_function(model); |
3013 | nvme_show_str_function(serial); | |
3014 | nvme_show_str_function(firmware_rev); | |
3015 | ||
931e1c22 ML |
3016 | #define nvme_show_int_function(field) \ |
3017 | static ssize_t field##_show(struct device *dev, \ | |
3018 | struct device_attribute *attr, char *buf) \ | |
3019 | { \ | |
3020 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \ | |
3021 | return sprintf(buf, "%d\n", ctrl->field); \ | |
3022 | } \ | |
3023 | static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); | |
3024 | ||
931e1c22 | 3025 | nvme_show_int_function(cntlid); |
103e515e | 3026 | nvme_show_int_function(numa_node); |
779ff756 | 3027 | |
1a353d85 ML |
3028 | static ssize_t nvme_sysfs_delete(struct device *dev, |
3029 | struct device_attribute *attr, const char *buf, | |
3030 | size_t count) | |
3031 | { | |
3032 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
3033 | ||
3034 | if (device_remove_file_self(dev, attr)) | |
c5017e85 | 3035 | nvme_delete_ctrl_sync(ctrl); |
1a353d85 ML |
3036 | return count; |
3037 | } | |
3038 | static DEVICE_ATTR(delete_controller, S_IWUSR, NULL, nvme_sysfs_delete); | |
3039 | ||
3040 | static ssize_t nvme_sysfs_show_transport(struct device *dev, | |
3041 | struct device_attribute *attr, | |
3042 | char *buf) | |
3043 | { | |
3044 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
3045 | ||
3046 | return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->ops->name); | |
3047 | } | |
3048 | static DEVICE_ATTR(transport, S_IRUGO, nvme_sysfs_show_transport, NULL); | |
3049 | ||
8432bdb2 SG |
3050 | static ssize_t nvme_sysfs_show_state(struct device *dev, |
3051 | struct device_attribute *attr, | |
3052 | char *buf) | |
3053 | { | |
3054 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
3055 | static const char *const state_name[] = { | |
3056 | [NVME_CTRL_NEW] = "new", | |
3057 | [NVME_CTRL_LIVE] = "live", | |
2b1b7e78 | 3058 | [NVME_CTRL_ADMIN_ONLY] = "only-admin", |
8432bdb2 | 3059 | [NVME_CTRL_RESETTING] = "resetting", |
ad6a0a52 | 3060 | [NVME_CTRL_CONNECTING] = "connecting", |
8432bdb2 SG |
3061 | [NVME_CTRL_DELETING] = "deleting", |
3062 | [NVME_CTRL_DEAD] = "dead", | |
3063 | }; | |
3064 | ||
3065 | if ((unsigned)ctrl->state < ARRAY_SIZE(state_name) && | |
3066 | state_name[ctrl->state]) | |
3067 | return sprintf(buf, "%s\n", state_name[ctrl->state]); | |
3068 | ||
3069 | return sprintf(buf, "unknown state\n"); | |
3070 | } | |
3071 | ||
3072 | static DEVICE_ATTR(state, S_IRUGO, nvme_sysfs_show_state, NULL); | |
3073 | ||
1a353d85 ML |
3074 | static ssize_t nvme_sysfs_show_subsysnqn(struct device *dev, |
3075 | struct device_attribute *attr, | |
3076 | char *buf) | |
3077 | { | |
3078 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
3079 | ||
ab9e00cc | 3080 | return snprintf(buf, PAGE_SIZE, "%s\n", ctrl->subsys->subnqn); |
1a353d85 ML |
3081 | } |
3082 | static DEVICE_ATTR(subsysnqn, S_IRUGO, nvme_sysfs_show_subsysnqn, NULL); | |
3083 | ||
3084 | static ssize_t nvme_sysfs_show_address(struct device *dev, | |
3085 | struct device_attribute *attr, | |
3086 | char *buf) | |
3087 | { | |
3088 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
3089 | ||
3090 | return ctrl->ops->get_address(ctrl, buf, PAGE_SIZE); | |
3091 | } | |
3092 | static DEVICE_ATTR(address, S_IRUGO, nvme_sysfs_show_address, NULL); | |
3093 | ||
779ff756 KB |
3094 | static struct attribute *nvme_dev_attrs[] = { |
3095 | &dev_attr_reset_controller.attr, | |
9ec3bb2f | 3096 | &dev_attr_rescan_controller.attr, |
779ff756 KB |
3097 | &dev_attr_model.attr, |
3098 | &dev_attr_serial.attr, | |
3099 | &dev_attr_firmware_rev.attr, | |
931e1c22 | 3100 | &dev_attr_cntlid.attr, |
1a353d85 ML |
3101 | &dev_attr_delete_controller.attr, |
3102 | &dev_attr_transport.attr, | |
3103 | &dev_attr_subsysnqn.attr, | |
3104 | &dev_attr_address.attr, | |
8432bdb2 | 3105 | &dev_attr_state.attr, |
103e515e | 3106 | &dev_attr_numa_node.attr, |
779ff756 KB |
3107 | NULL |
3108 | }; | |
3109 | ||
1a353d85 ML |
3110 | static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, |
3111 | struct attribute *a, int n) | |
3112 | { | |
3113 | struct device *dev = container_of(kobj, struct device, kobj); | |
3114 | struct nvme_ctrl *ctrl = dev_get_drvdata(dev); | |
3115 | ||
49d3d50b CH |
3116 | if (a == &dev_attr_delete_controller.attr && !ctrl->ops->delete_ctrl) |
3117 | return 0; | |
3118 | if (a == &dev_attr_address.attr && !ctrl->ops->get_address) | |
3119 | return 0; | |
1a353d85 ML |
3120 | |
3121 | return a->mode; | |
3122 | } | |
3123 | ||
779ff756 | 3124 | static struct attribute_group nvme_dev_attrs_group = { |
1a353d85 ML |
3125 | .attrs = nvme_dev_attrs, |
3126 | .is_visible = nvme_dev_attrs_are_visible, | |
779ff756 KB |
3127 | }; |
3128 | ||
3129 | static const struct attribute_group *nvme_dev_attr_groups[] = { | |
3130 | &nvme_dev_attrs_group, | |
3131 | NULL, | |
3132 | }; | |
3133 | ||
ed754e5d CH |
3134 | static struct nvme_ns_head *__nvme_find_ns_head(struct nvme_subsystem *subsys, |
3135 | unsigned nsid) | |
3136 | { | |
3137 | struct nvme_ns_head *h; | |
3138 | ||
3139 | lockdep_assert_held(&subsys->lock); | |
3140 | ||
3141 | list_for_each_entry(h, &subsys->nsheads, entry) { | |
3142 | if (h->ns_id == nsid && kref_get_unless_zero(&h->ref)) | |
3143 | return h; | |
3144 | } | |
3145 | ||
3146 | return NULL; | |
3147 | } | |
3148 | ||
3149 | static int __nvme_check_ids(struct nvme_subsystem *subsys, | |
3150 | struct nvme_ns_head *new) | |
3151 | { | |
3152 | struct nvme_ns_head *h; | |
3153 | ||
3154 | lockdep_assert_held(&subsys->lock); | |
3155 | ||
3156 | list_for_each_entry(h, &subsys->nsheads, entry) { | |
3157 | if (nvme_ns_ids_valid(&new->ids) && | |
2079699c | 3158 | !list_empty(&h->list) && |
ed754e5d CH |
3159 | nvme_ns_ids_equal(&new->ids, &h->ids)) |
3160 | return -EINVAL; | |
3161 | } | |
3162 | ||
3163 | return 0; | |
3164 | } | |
3165 | ||
3166 | static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, | |
3167 | unsigned nsid, struct nvme_id_ns *id) | |
3168 | { | |
3169 | struct nvme_ns_head *head; | |
f3334447 | 3170 | size_t size = sizeof(*head); |
ed754e5d CH |
3171 | int ret = -ENOMEM; |
3172 | ||
f3334447 CH |
3173 | #ifdef CONFIG_NVME_MULTIPATH |
3174 | size += num_possible_nodes() * sizeof(struct nvme_ns *); | |
3175 | #endif | |
3176 | ||
3177 | head = kzalloc(size, GFP_KERNEL); | |
ed754e5d CH |
3178 | if (!head) |
3179 | goto out; | |
3180 | ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL); | |
3181 | if (ret < 0) | |
3182 | goto out_free_head; | |
3183 | head->instance = ret; | |
3184 | INIT_LIST_HEAD(&head->list); | |
fd92c77f MG |
3185 | ret = init_srcu_struct(&head->srcu); |
3186 | if (ret) | |
3187 | goto out_ida_remove; | |
ed754e5d CH |
3188 | head->subsys = ctrl->subsys; |
3189 | head->ns_id = nsid; | |
3190 | kref_init(&head->ref); | |
3191 | ||
538af88e SG |
3192 | ret = nvme_report_ns_ids(ctrl, nsid, id, &head->ids); |
3193 | if (ret) | |
3194 | goto out_cleanup_srcu; | |
ed754e5d CH |
3195 | |
3196 | ret = __nvme_check_ids(ctrl->subsys, head); | |
3197 | if (ret) { | |
3198 | dev_err(ctrl->device, | |
3199 | "duplicate IDs for nsid %d\n", nsid); | |
3200 | goto out_cleanup_srcu; | |
3201 | } | |
3202 | ||
32acab31 CH |
3203 | ret = nvme_mpath_alloc_disk(ctrl, head); |
3204 | if (ret) | |
3205 | goto out_cleanup_srcu; | |
3206 | ||
ed754e5d | 3207 | list_add_tail(&head->entry, &ctrl->subsys->nsheads); |
12d9f070 JW |
3208 | |
3209 | kref_get(&ctrl->subsys->ref); | |
3210 | ||
ed754e5d CH |
3211 | return head; |
3212 | out_cleanup_srcu: | |
3213 | cleanup_srcu_struct(&head->srcu); | |
fd92c77f | 3214 | out_ida_remove: |
ed754e5d CH |
3215 | ida_simple_remove(&ctrl->subsys->ns_ida, head->instance); |
3216 | out_free_head: | |
3217 | kfree(head); | |
3218 | out: | |
538af88e SG |
3219 | if (ret > 0) |
3220 | ret = blk_status_to_errno(nvme_error_status(ret)); | |
ed754e5d CH |
3221 | return ERR_PTR(ret); |
3222 | } | |
3223 | ||
3224 | static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, | |
9bd82b1a | 3225 | struct nvme_id_ns *id) |
ed754e5d CH |
3226 | { |
3227 | struct nvme_ctrl *ctrl = ns->ctrl; | |
3228 | bool is_shared = id->nmic & (1 << 0); | |
3229 | struct nvme_ns_head *head = NULL; | |
3230 | int ret = 0; | |
3231 | ||
3232 | mutex_lock(&ctrl->subsys->lock); | |
3233 | if (is_shared) | |
3234 | head = __nvme_find_ns_head(ctrl->subsys, nsid); | |
3235 | if (!head) { | |
3236 | head = nvme_alloc_ns_head(ctrl, nsid, id); | |
3237 | if (IS_ERR(head)) { | |
3238 | ret = PTR_ERR(head); | |
3239 | goto out_unlock; | |
3240 | } | |
ed754e5d CH |
3241 | } else { |
3242 | struct nvme_ns_ids ids; | |
3243 | ||
538af88e SG |
3244 | ret = nvme_report_ns_ids(ctrl, nsid, id, &ids); |
3245 | if (ret) | |
3246 | goto out_unlock; | |
3247 | ||
ed754e5d CH |
3248 | if (!nvme_ns_ids_equal(&head->ids, &ids)) { |
3249 | dev_err(ctrl->device, | |
3250 | "IDs don't match for shared namespace %d\n", | |
3251 | nsid); | |
3252 | ret = -EINVAL; | |
3253 | goto out_unlock; | |
3254 | } | |
ed754e5d CH |
3255 | } |
3256 | ||
3257 | list_add_tail(&ns->siblings, &head->list); | |
3258 | ns->head = head; | |
3259 | ||
3260 | out_unlock: | |
3261 | mutex_unlock(&ctrl->subsys->lock); | |
538af88e SG |
3262 | if (ret > 0) |
3263 | ret = blk_status_to_errno(nvme_error_status(ret)); | |
ed754e5d CH |
3264 | return ret; |
3265 | } | |
3266 | ||
5bae7f73 CH |
3267 | static int ns_cmp(void *priv, struct list_head *a, struct list_head *b) |
3268 | { | |
3269 | struct nvme_ns *nsa = container_of(a, struct nvme_ns, list); | |
3270 | struct nvme_ns *nsb = container_of(b, struct nvme_ns, list); | |
3271 | ||
ed754e5d | 3272 | return nsa->head->ns_id - nsb->head->ns_id; |
5bae7f73 CH |
3273 | } |
3274 | ||
32f0c4af | 3275 | static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
5bae7f73 | 3276 | { |
32f0c4af | 3277 | struct nvme_ns *ns, *ret = NULL; |
69d3b8ac | 3278 | |
765cc031 | 3279 | down_read(&ctrl->namespaces_rwsem); |
5bae7f73 | 3280 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
ed754e5d | 3281 | if (ns->head->ns_id == nsid) { |
2dd41228 CH |
3282 | if (!kref_get_unless_zero(&ns->kref)) |
3283 | continue; | |
32f0c4af KB |
3284 | ret = ns; |
3285 | break; | |
3286 | } | |
ed754e5d | 3287 | if (ns->head->ns_id > nsid) |
5bae7f73 CH |
3288 | break; |
3289 | } | |
765cc031 | 3290 | up_read(&ctrl->namespaces_rwsem); |
32f0c4af | 3291 | return ret; |
5bae7f73 CH |
3292 | } |
3293 | ||
f5d11840 JA |
3294 | static int nvme_setup_streams_ns(struct nvme_ctrl *ctrl, struct nvme_ns *ns) |
3295 | { | |
3296 | struct streams_directive_params s; | |
3297 | int ret; | |
3298 | ||
3299 | if (!ctrl->nr_streams) | |
3300 | return 0; | |
3301 | ||
ed754e5d | 3302 | ret = nvme_get_stream_params(ctrl, &s, ns->head->ns_id); |
f5d11840 JA |
3303 | if (ret) |
3304 | return ret; | |
3305 | ||
3306 | ns->sws = le32_to_cpu(s.sws); | |
3307 | ns->sgs = le16_to_cpu(s.sgs); | |
3308 | ||
3309 | if (ns->sws) { | |
3310 | unsigned int bs = 1 << ns->lba_shift; | |
3311 | ||
3312 | blk_queue_io_min(ns->queue, bs * ns->sws); | |
3313 | if (ns->sgs) | |
3314 | blk_queue_io_opt(ns->queue, bs * ns->sws * ns->sgs); | |
3315 | } | |
3316 | ||
3317 | return 0; | |
3318 | } | |
3319 | ||
ab4ab09c | 3320 | static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
5bae7f73 CH |
3321 | { |
3322 | struct nvme_ns *ns; | |
3323 | struct gendisk *disk; | |
ac81bfa9 MB |
3324 | struct nvme_id_ns *id; |
3325 | char disk_name[DISK_NAME_LEN]; | |
ab4ab09c | 3326 | int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT, ret; |
5bae7f73 CH |
3327 | |
3328 | ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); | |
3329 | if (!ns) | |
ab4ab09c | 3330 | return -ENOMEM; |
5bae7f73 CH |
3331 | |
3332 | ns->queue = blk_mq_init_queue(ctrl->tagset); | |
ab4ab09c HR |
3333 | if (IS_ERR(ns->queue)) { |
3334 | ret = PTR_ERR(ns->queue); | |
ed754e5d | 3335 | goto out_free_ns; |
ab4ab09c | 3336 | } |
e0596ab2 | 3337 | |
7d30c81b | 3338 | if (ctrl->opts && ctrl->opts->data_digest) |
958f2a0f MS |
3339 | ns->queue->backing_dev_info->capabilities |
3340 | |= BDI_CAP_STABLE_WRITES; | |
3341 | ||
8b904b5b | 3342 | blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); |
e0596ab2 LG |
3343 | if (ctrl->ops->flags & NVME_F_PCI_P2PDMA) |
3344 | blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); | |
3345 | ||
5bae7f73 CH |
3346 | ns->queue->queuedata = ns; |
3347 | ns->ctrl = ctrl; | |
3348 | ||
5bae7f73 | 3349 | kref_init(&ns->kref); |
5bae7f73 | 3350 | ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ |
5bae7f73 CH |
3351 | |
3352 | blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); | |
da35825d | 3353 | nvme_set_queue_limits(ctrl, ns->queue); |
5bae7f73 | 3354 | |
331813f6 SG |
3355 | ret = nvme_identify_ns(ctrl, nsid, &id); |
3356 | if (ret) | |
ac81bfa9 MB |
3357 | goto out_free_queue; |
3358 | ||
ab4ab09c HR |
3359 | if (id->ncap == 0) { |
3360 | ret = -EINVAL; | |
cdbff4f2 | 3361 | goto out_free_id; |
ab4ab09c | 3362 | } |
cdbff4f2 | 3363 | |
ab4ab09c HR |
3364 | ret = nvme_init_ns_head(ns, nsid, id); |
3365 | if (ret) | |
ed754e5d | 3366 | goto out_free_id; |
654b4a4a | 3367 | nvme_setup_streams_ns(ctrl, ns); |
a785dbcc | 3368 | nvme_set_disk_name(disk_name, ns, ctrl, &flags); |
cdbff4f2 | 3369 | |
3dc87dd0 | 3370 | disk = alloc_disk_node(0, node); |
ab4ab09c HR |
3371 | if (!disk) { |
3372 | ret = -ENOMEM; | |
ed754e5d | 3373 | goto out_unlink_ns; |
ab4ab09c | 3374 | } |
ac81bfa9 | 3375 | |
3dc87dd0 MB |
3376 | disk->fops = &nvme_fops; |
3377 | disk->private_data = ns; | |
3378 | disk->queue = ns->queue; | |
32acab31 | 3379 | disk->flags = flags; |
3dc87dd0 MB |
3380 | memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); |
3381 | ns->disk = disk; | |
3382 | ||
3383 | __nvme_revalidate_disk(disk, id); | |
5bae7f73 | 3384 | |
85136c01 | 3385 | if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) { |
ab4ab09c HR |
3386 | ret = nvme_nvm_register(ns, disk_name, node); |
3387 | if (ret) { | |
85136c01 MB |
3388 | dev_warn(ctrl->device, "LightNVM init failure\n"); |
3389 | goto out_put_disk; | |
3390 | } | |
3391 | } | |
3392 | ||
765cc031 | 3393 | down_write(&ctrl->namespaces_rwsem); |
32f0c4af | 3394 | list_add_tail(&ns->list, &ctrl->namespaces); |
765cc031 | 3395 | up_write(&ctrl->namespaces_rwsem); |
32f0c4af | 3396 | |
d22524a4 | 3397 | nvme_get_ctrl(ctrl); |
ac81bfa9 | 3398 | |
33b14f67 | 3399 | device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups); |
32acab31 | 3400 | |
0d0b660f | 3401 | nvme_mpath_add_disk(ns, id); |
a3646451 | 3402 | nvme_fault_inject_init(&ns->fault_inject, ns->disk->disk_name); |
0d0b660f CH |
3403 | kfree(id); |
3404 | ||
ab4ab09c | 3405 | return 0; |
85136c01 MB |
3406 | out_put_disk: |
3407 | put_disk(ns->disk); | |
ed754e5d CH |
3408 | out_unlink_ns: |
3409 | mutex_lock(&ctrl->subsys->lock); | |
3410 | list_del_rcu(&ns->siblings); | |
3411 | mutex_unlock(&ctrl->subsys->lock); | |
a63b8370 | 3412 | nvme_put_ns_head(ns->head); |
ac81bfa9 MB |
3413 | out_free_id: |
3414 | kfree(id); | |
5bae7f73 CH |
3415 | out_free_queue: |
3416 | blk_cleanup_queue(ns->queue); | |
3417 | out_free_ns: | |
3418 | kfree(ns); | |
331813f6 SG |
3419 | if (ret > 0) |
3420 | ret = blk_status_to_errno(nvme_error_status(ret)); | |
ab4ab09c | 3421 | return ret; |
5bae7f73 CH |
3422 | } |
3423 | ||
3424 | static void nvme_ns_remove(struct nvme_ns *ns) | |
3425 | { | |
646017a6 KB |
3426 | if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) |
3427 | return; | |
69d3b8ac | 3428 | |
a3646451 | 3429 | nvme_fault_inject_fini(&ns->fault_inject); |
2181e455 AE |
3430 | |
3431 | mutex_lock(&ns->ctrl->subsys->lock); | |
3432 | list_del_rcu(&ns->siblings); | |
3433 | mutex_unlock(&ns->ctrl->subsys->lock); | |
3434 | synchronize_rcu(); /* guarantee not available in head->list */ | |
3435 | nvme_mpath_clear_current_path(ns); | |
3436 | synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */ | |
3437 | ||
b0b4e09c | 3438 | if (ns->disk && ns->disk->flags & GENHD_FL_UP) { |
5bae7f73 | 3439 | del_gendisk(ns->disk); |
5bae7f73 | 3440 | blk_cleanup_queue(ns->queue); |
bd9f5d65 ML |
3441 | if (blk_get_integrity(ns->disk)) |
3442 | blk_integrity_unregister(ns->disk); | |
5bae7f73 | 3443 | } |
32f0c4af | 3444 | |
765cc031 | 3445 | down_write(&ns->ctrl->namespaces_rwsem); |
5bae7f73 | 3446 | list_del_init(&ns->list); |
765cc031 | 3447 | up_write(&ns->ctrl->namespaces_rwsem); |
32f0c4af | 3448 | |
479a322f | 3449 | nvme_mpath_check_last_path(ns); |
5bae7f73 CH |
3450 | nvme_put_ns(ns); |
3451 | } | |
3452 | ||
540c801c KB |
3453 | static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) |
3454 | { | |
3455 | struct nvme_ns *ns; | |
3456 | ||
32f0c4af | 3457 | ns = nvme_find_get_ns(ctrl, nsid); |
540c801c | 3458 | if (ns) { |
b0b4e09c | 3459 | if (ns->disk && revalidate_disk(ns->disk)) |
540c801c | 3460 | nvme_ns_remove(ns); |
32f0c4af | 3461 | nvme_put_ns(ns); |
540c801c KB |
3462 | } else |
3463 | nvme_alloc_ns(ctrl, nsid); | |
3464 | } | |
3465 | ||
47b0e50a SB |
3466 | static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, |
3467 | unsigned nsid) | |
3468 | { | |
3469 | struct nvme_ns *ns, *next; | |
6f8e0d78 | 3470 | LIST_HEAD(rm_list); |
47b0e50a | 3471 | |
765cc031 | 3472 | down_write(&ctrl->namespaces_rwsem); |
47b0e50a | 3473 | list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { |
cf39a6bc | 3474 | if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) |
6f8e0d78 | 3475 | list_move_tail(&ns->list, &rm_list); |
47b0e50a | 3476 | } |
765cc031 | 3477 | up_write(&ctrl->namespaces_rwsem); |
6f8e0d78 JW |
3478 | |
3479 | list_for_each_entry_safe(ns, next, &rm_list, list) | |
3480 | nvme_ns_remove(ns); | |
3481 | ||
47b0e50a SB |
3482 | } |
3483 | ||
540c801c KB |
3484 | static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn) |
3485 | { | |
3486 | struct nvme_ns *ns; | |
3487 | __le32 *ns_list; | |
c8e8c77b JL |
3488 | unsigned i, j, nsid, prev = 0; |
3489 | unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024); | |
540c801c KB |
3490 | int ret = 0; |
3491 | ||
42595eb7 | 3492 | ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); |
540c801c KB |
3493 | if (!ns_list) |
3494 | return -ENOMEM; | |
3495 | ||
3496 | for (i = 0; i < num_lists; i++) { | |
3497 | ret = nvme_identify_ns_list(ctrl, prev, ns_list); | |
3498 | if (ret) | |
47b0e50a | 3499 | goto free; |
540c801c KB |
3500 | |
3501 | for (j = 0; j < min(nn, 1024U); j++) { | |
3502 | nsid = le32_to_cpu(ns_list[j]); | |
3503 | if (!nsid) | |
3504 | goto out; | |
3505 | ||
3506 | nvme_validate_ns(ctrl, nsid); | |
3507 | ||
3508 | while (++prev < nsid) { | |
32f0c4af KB |
3509 | ns = nvme_find_get_ns(ctrl, prev); |
3510 | if (ns) { | |
540c801c | 3511 | nvme_ns_remove(ns); |
32f0c4af KB |
3512 | nvme_put_ns(ns); |
3513 | } | |
540c801c KB |
3514 | } |
3515 | } | |
3516 | nn -= j; | |
3517 | } | |
3518 | out: | |
47b0e50a SB |
3519 | nvme_remove_invalid_namespaces(ctrl, prev); |
3520 | free: | |
540c801c KB |
3521 | kfree(ns_list); |
3522 | return ret; | |
3523 | } | |
3524 | ||
5955be21 | 3525 | static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn) |
5bae7f73 | 3526 | { |
5bae7f73 CH |
3527 | unsigned i; |
3528 | ||
540c801c KB |
3529 | for (i = 1; i <= nn; i++) |
3530 | nvme_validate_ns(ctrl, i); | |
3531 | ||
47b0e50a | 3532 | nvme_remove_invalid_namespaces(ctrl, nn); |
5bae7f73 CH |
3533 | } |
3534 | ||
f493af37 | 3535 | static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) |
30d90964 CH |
3536 | { |
3537 | size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32); | |
3538 | __le32 *log; | |
f493af37 | 3539 | int error; |
30d90964 CH |
3540 | |
3541 | log = kzalloc(log_size, GFP_KERNEL); | |
3542 | if (!log) | |
f493af37 | 3543 | return; |
30d90964 | 3544 | |
f493af37 CH |
3545 | /* |
3546 | * We need to read the log to clear the AEN, but we don't want to rely | |
3547 | * on it for the changed namespace information as userspace could have | |
3548 | * raced with us in reading the log page, which could cause us to miss | |
3549 | * updates. | |
3550 | */ | |
0e98719b CH |
3551 | error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, |
3552 | log_size, 0); | |
f493af37 | 3553 | if (error) |
30d90964 CH |
3554 | dev_warn(ctrl->device, |
3555 | "reading changed ns log failed: %d\n", error); | |
30d90964 | 3556 | |
30d90964 | 3557 | kfree(log); |
30d90964 CH |
3558 | } |
3559 | ||
5955be21 | 3560 | static void nvme_scan_work(struct work_struct *work) |
5bae7f73 | 3561 | { |
5955be21 CH |
3562 | struct nvme_ctrl *ctrl = |
3563 | container_of(work, struct nvme_ctrl, scan_work); | |
5bae7f73 | 3564 | struct nvme_id_ctrl *id; |
540c801c | 3565 | unsigned nn; |
5bae7f73 | 3566 | |
5955be21 CH |
3567 | if (ctrl->state != NVME_CTRL_LIVE) |
3568 | return; | |
3569 | ||
2b1b7e78 JW |
3570 | WARN_ON_ONCE(!ctrl->tagset); |
3571 | ||
77016199 | 3572 | if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) { |
30d90964 | 3573 | dev_info(ctrl->device, "rescanning namespaces.\n"); |
f493af37 | 3574 | nvme_clear_changed_ns_log(ctrl); |
30d90964 CH |
3575 | } |
3576 | ||
5bae7f73 CH |
3577 | if (nvme_identify_ctrl(ctrl, &id)) |
3578 | return; | |
540c801c | 3579 | |
e7ad43c3 | 3580 | mutex_lock(&ctrl->scan_lock); |
540c801c | 3581 | nn = le32_to_cpu(id->nn); |
8ef2074d | 3582 | if (ctrl->vs >= NVME_VS(1, 1, 0) && |
540c801c KB |
3583 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { |
3584 | if (!nvme_scan_ns_list(ctrl, nn)) | |
30d90964 | 3585 | goto out_free_id; |
540c801c | 3586 | } |
5955be21 | 3587 | nvme_scan_ns_sequential(ctrl, nn); |
30d90964 | 3588 | out_free_id: |
e7ad43c3 | 3589 | mutex_unlock(&ctrl->scan_lock); |
30d90964 | 3590 | kfree(id); |
765cc031 | 3591 | down_write(&ctrl->namespaces_rwsem); |
540c801c | 3592 | list_sort(NULL, &ctrl->namespaces, ns_cmp); |
765cc031 | 3593 | up_write(&ctrl->namespaces_rwsem); |
5955be21 | 3594 | } |
5bae7f73 | 3595 | |
32f0c4af KB |
3596 | /* |
3597 | * This function iterates the namespace list unlocked to allow recovery from | |
3598 | * controller failure. It is up to the caller to ensure the namespace list is | |
3599 | * not modified by scan work while this function is executing. | |
3600 | */ | |
5bae7f73 CH |
3601 | void nvme_remove_namespaces(struct nvme_ctrl *ctrl) |
3602 | { | |
3603 | struct nvme_ns *ns, *next; | |
6f8e0d78 | 3604 | LIST_HEAD(ns_list); |
5bae7f73 | 3605 | |
f6c8e432 SG |
3606 | /* prevent racing with ns scanning */ |
3607 | flush_work(&ctrl->scan_work); | |
3608 | ||
0ff9d4e1 KB |
3609 | /* |
3610 | * The dead states indicates the controller was not gracefully | |
3611 | * disconnected. In that case, we won't be able to flush any data while | |
3612 | * removing the namespaces' disks; fail all the queues now to avoid | |
3613 | * potentially having to clean up the failed sync later. | |
3614 | */ | |
3615 | if (ctrl->state == NVME_CTRL_DEAD) | |
3616 | nvme_kill_queues(ctrl); | |
3617 | ||
765cc031 | 3618 | down_write(&ctrl->namespaces_rwsem); |
6f8e0d78 | 3619 | list_splice_init(&ctrl->namespaces, &ns_list); |
765cc031 | 3620 | up_write(&ctrl->namespaces_rwsem); |
6f8e0d78 JW |
3621 | |
3622 | list_for_each_entry_safe(ns, next, &ns_list, list) | |
5bae7f73 CH |
3623 | nvme_ns_remove(ns); |
3624 | } | |
576d55d6 | 3625 | EXPORT_SYMBOL_GPL(nvme_remove_namespaces); |
5bae7f73 | 3626 | |
e3d7874d KB |
3627 | static void nvme_aen_uevent(struct nvme_ctrl *ctrl) |
3628 | { | |
3629 | char *envp[2] = { NULL, NULL }; | |
3630 | u32 aen_result = ctrl->aen_result; | |
3631 | ||
3632 | ctrl->aen_result = 0; | |
3633 | if (!aen_result) | |
3634 | return; | |
3635 | ||
3636 | envp[0] = kasprintf(GFP_KERNEL, "NVME_AEN=%#08x", aen_result); | |
3637 | if (!envp[0]) | |
3638 | return; | |
3639 | kobject_uevent_env(&ctrl->device->kobj, KOBJ_CHANGE, envp); | |
3640 | kfree(envp[0]); | |
3641 | } | |
3642 | ||
f866fc42 CH |
3643 | static void nvme_async_event_work(struct work_struct *work) |
3644 | { | |
3645 | struct nvme_ctrl *ctrl = | |
3646 | container_of(work, struct nvme_ctrl, async_event_work); | |
3647 | ||
e3d7874d | 3648 | nvme_aen_uevent(ctrl); |
ad22c355 | 3649 | ctrl->ops->submit_async_event(ctrl); |
f866fc42 CH |
3650 | } |
3651 | ||
b6dccf7f AD |
3652 | static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) |
3653 | { | |
3654 | ||
3655 | u32 csts; | |
3656 | ||
3657 | if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) | |
3658 | return false; | |
3659 | ||
3660 | if (csts == ~0) | |
3661 | return false; | |
3662 | ||
3663 | return ((ctrl->ctrl_config & NVME_CC_ENABLE) && (csts & NVME_CSTS_PP)); | |
3664 | } | |
3665 | ||
3666 | static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) | |
3667 | { | |
b6dccf7f AD |
3668 | struct nvme_fw_slot_info_log *log; |
3669 | ||
3670 | log = kmalloc(sizeof(*log), GFP_KERNEL); | |
3671 | if (!log) | |
3672 | return; | |
3673 | ||
0e98719b CH |
3674 | if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, |
3675 | sizeof(*log), 0)) | |
3676 | dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); | |
b6dccf7f AD |
3677 | kfree(log); |
3678 | } | |
3679 | ||
3680 | static void nvme_fw_act_work(struct work_struct *work) | |
3681 | { | |
3682 | struct nvme_ctrl *ctrl = container_of(work, | |
3683 | struct nvme_ctrl, fw_act_work); | |
3684 | unsigned long fw_act_timeout; | |
3685 | ||
3686 | if (ctrl->mtfa) | |
3687 | fw_act_timeout = jiffies + | |
3688 | msecs_to_jiffies(ctrl->mtfa * 100); | |
3689 | else | |
3690 | fw_act_timeout = jiffies + | |
3691 | msecs_to_jiffies(admin_timeout * 1000); | |
3692 | ||
3693 | nvme_stop_queues(ctrl); | |
3694 | while (nvme_ctrl_pp_status(ctrl)) { | |
3695 | if (time_after(jiffies, fw_act_timeout)) { | |
3696 | dev_warn(ctrl->device, | |
3697 | "Fw activation timeout, reset controller\n"); | |
3698 | nvme_reset_ctrl(ctrl); | |
3699 | break; | |
3700 | } | |
3701 | msleep(100); | |
3702 | } | |
3703 | ||
3704 | if (ctrl->state != NVME_CTRL_LIVE) | |
3705 | return; | |
3706 | ||
3707 | nvme_start_queues(ctrl); | |
a806c6c8 | 3708 | /* read FW slot information to clear the AER */ |
b6dccf7f AD |
3709 | nvme_get_fw_slot_info(ctrl); |
3710 | } | |
3711 | ||
868c2392 CH |
3712 | static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) |
3713 | { | |
09bd1ff4 CK |
3714 | u32 aer_notice_type = (result & 0xff00) >> 8; |
3715 | ||
521cfb8e CK |
3716 | trace_nvme_async_event(ctrl, aer_notice_type); |
3717 | ||
09bd1ff4 | 3718 | switch (aer_notice_type) { |
868c2392 | 3719 | case NVME_AER_NOTICE_NS_CHANGED: |
77016199 | 3720 | set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); |
868c2392 CH |
3721 | nvme_queue_scan(ctrl); |
3722 | break; | |
3723 | case NVME_AER_NOTICE_FW_ACT_STARTING: | |
3724 | queue_work(nvme_wq, &ctrl->fw_act_work); | |
3725 | break; | |
0d0b660f CH |
3726 | #ifdef CONFIG_NVME_MULTIPATH |
3727 | case NVME_AER_NOTICE_ANA: | |
3728 | if (!ctrl->ana_log_buf) | |
3729 | break; | |
3730 | queue_work(nvme_wq, &ctrl->ana_work); | |
3731 | break; | |
3732 | #endif | |
868c2392 CH |
3733 | default: |
3734 | dev_warn(ctrl->device, "async event result %08x\n", result); | |
3735 | } | |
3736 | } | |
3737 | ||
7bf58533 | 3738 | void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, |
287a63eb | 3739 | volatile union nvme_result *res) |
f866fc42 | 3740 | { |
7bf58533 | 3741 | u32 result = le32_to_cpu(res->u32); |
09bd1ff4 | 3742 | u32 aer_type = result & 0x07; |
f866fc42 | 3743 | |
ad22c355 | 3744 | if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS) |
f866fc42 CH |
3745 | return; |
3746 | ||
09bd1ff4 | 3747 | switch (aer_type) { |
868c2392 CH |
3748 | case NVME_AER_NOTICE: |
3749 | nvme_handle_aen_notice(ctrl, result); | |
3750 | break; | |
e3d7874d KB |
3751 | case NVME_AER_ERROR: |
3752 | case NVME_AER_SMART: | |
3753 | case NVME_AER_CSS: | |
3754 | case NVME_AER_VS: | |
09bd1ff4 | 3755 | trace_nvme_async_event(ctrl, aer_type); |
e3d7874d | 3756 | ctrl->aen_result = result; |
7bf58533 CH |
3757 | break; |
3758 | default: | |
3759 | break; | |
f866fc42 | 3760 | } |
c669ccdc | 3761 | queue_work(nvme_wq, &ctrl->async_event_work); |
f866fc42 | 3762 | } |
f866fc42 | 3763 | EXPORT_SYMBOL_GPL(nvme_complete_async_event); |
f3ca80fc | 3764 | |
d09f2b45 | 3765 | void nvme_stop_ctrl(struct nvme_ctrl *ctrl) |
576d55d6 | 3766 | { |
0d0b660f | 3767 | nvme_mpath_stop(ctrl); |
d09f2b45 | 3768 | nvme_stop_keep_alive(ctrl); |
f866fc42 | 3769 | flush_work(&ctrl->async_event_work); |
b6dccf7f | 3770 | cancel_work_sync(&ctrl->fw_act_work); |
d09f2b45 SG |
3771 | } |
3772 | EXPORT_SYMBOL_GPL(nvme_stop_ctrl); | |
3773 | ||
3774 | void nvme_start_ctrl(struct nvme_ctrl *ctrl) | |
3775 | { | |
3776 | if (ctrl->kato) | |
3777 | nvme_start_keep_alive(ctrl); | |
3778 | ||
3779 | if (ctrl->queue_count > 1) { | |
3780 | nvme_queue_scan(ctrl); | |
c0561f82 | 3781 | nvme_enable_aen(ctrl); |
d99ca609 | 3782 | queue_work(nvme_wq, &ctrl->async_event_work); |
d09f2b45 SG |
3783 | nvme_start_queues(ctrl); |
3784 | } | |
3785 | } | |
3786 | EXPORT_SYMBOL_GPL(nvme_start_ctrl); | |
5955be21 | 3787 | |
d09f2b45 SG |
3788 | void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) |
3789 | { | |
f79d5fda | 3790 | nvme_fault_inject_fini(&ctrl->fault_inject); |
510a405d | 3791 | dev_pm_qos_hide_latency_tolerance(ctrl->device); |
a6a5149b | 3792 | cdev_device_del(&ctrl->cdev, ctrl->device); |
53029b04 | 3793 | } |
576d55d6 | 3794 | EXPORT_SYMBOL_GPL(nvme_uninit_ctrl); |
53029b04 | 3795 | |
d22524a4 | 3796 | static void nvme_free_ctrl(struct device *dev) |
53029b04 | 3797 | { |
d22524a4 CH |
3798 | struct nvme_ctrl *ctrl = |
3799 | container_of(dev, struct nvme_ctrl, ctrl_device); | |
ab9e00cc | 3800 | struct nvme_subsystem *subsys = ctrl->subsys; |
f3ca80fc | 3801 | |
9843f685 | 3802 | ida_simple_remove(&nvme_instance_ida, ctrl->instance); |
84fef62d | 3803 | kfree(ctrl->effects); |
0d0b660f | 3804 | nvme_mpath_uninit(ctrl); |
092ff052 | 3805 | __free_page(ctrl->discard_page); |
f3ca80fc | 3806 | |
ab9e00cc | 3807 | if (subsys) { |
32fd90c4 | 3808 | mutex_lock(&nvme_subsystems_lock); |
ab9e00cc | 3809 | list_del(&ctrl->subsys_entry); |
ab9e00cc | 3810 | sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); |
32fd90c4 | 3811 | mutex_unlock(&nvme_subsystems_lock); |
ab9e00cc | 3812 | } |
f3ca80fc CH |
3813 | |
3814 | ctrl->ops->free_ctrl(ctrl); | |
f3ca80fc | 3815 | |
ab9e00cc CH |
3816 | if (subsys) |
3817 | nvme_put_subsystem(subsys); | |
f3ca80fc CH |
3818 | } |
3819 | ||
3820 | /* | |
3821 | * Initialize a NVMe controller structures. This needs to be called during | |
3822 | * earliest initialization so that we have the initialized structured around | |
3823 | * during probing. | |
3824 | */ | |
3825 | int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | |
3826 | const struct nvme_ctrl_ops *ops, unsigned long quirks) | |
3827 | { | |
3828 | int ret; | |
3829 | ||
bb8d261e CH |
3830 | ctrl->state = NVME_CTRL_NEW; |
3831 | spin_lock_init(&ctrl->lock); | |
e7ad43c3 | 3832 | mutex_init(&ctrl->scan_lock); |
f3ca80fc | 3833 | INIT_LIST_HEAD(&ctrl->namespaces); |
765cc031 | 3834 | init_rwsem(&ctrl->namespaces_rwsem); |
f3ca80fc CH |
3835 | ctrl->dev = dev; |
3836 | ctrl->ops = ops; | |
3837 | ctrl->quirks = quirks; | |
5955be21 | 3838 | INIT_WORK(&ctrl->scan_work, nvme_scan_work); |
f866fc42 | 3839 | INIT_WORK(&ctrl->async_event_work, nvme_async_event_work); |
b6dccf7f | 3840 | INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); |
c5017e85 | 3841 | INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); |
f3ca80fc | 3842 | |
230f1f9e JS |
3843 | INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); |
3844 | memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); | |
3845 | ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; | |
3846 | ||
cb5b7262 JA |
3847 | BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) > |
3848 | PAGE_SIZE); | |
3849 | ctrl->discard_page = alloc_page(GFP_KERNEL); | |
3850 | if (!ctrl->discard_page) { | |
3851 | ret = -ENOMEM; | |
3852 | goto out; | |
3853 | } | |
3854 | ||
9843f685 CH |
3855 | ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); |
3856 | if (ret < 0) | |
f3ca80fc | 3857 | goto out; |
9843f685 | 3858 | ctrl->instance = ret; |
f3ca80fc | 3859 | |
d22524a4 CH |
3860 | device_initialize(&ctrl->ctrl_device); |
3861 | ctrl->device = &ctrl->ctrl_device; | |
a6a5149b | 3862 | ctrl->device->devt = MKDEV(MAJOR(nvme_chr_devt), ctrl->instance); |
d22524a4 CH |
3863 | ctrl->device->class = nvme_class; |
3864 | ctrl->device->parent = ctrl->dev; | |
3865 | ctrl->device->groups = nvme_dev_attr_groups; | |
3866 | ctrl->device->release = nvme_free_ctrl; | |
3867 | dev_set_drvdata(ctrl->device, ctrl); | |
3868 | ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); | |
3869 | if (ret) | |
f3ca80fc | 3870 | goto out_release_instance; |
f3ca80fc | 3871 | |
a6a5149b CH |
3872 | cdev_init(&ctrl->cdev, &nvme_dev_fops); |
3873 | ctrl->cdev.owner = ops->module; | |
3874 | ret = cdev_device_add(&ctrl->cdev, ctrl->device); | |
d22524a4 CH |
3875 | if (ret) |
3876 | goto out_free_name; | |
f3ca80fc | 3877 | |
c5552fde AL |
3878 | /* |
3879 | * Initialize latency tolerance controls. The sysfs files won't | |
3880 | * be visible to userspace unless the device actually supports APST. | |
3881 | */ | |
3882 | ctrl->device->power.set_latency_tolerance = nvme_set_latency_tolerance; | |
3883 | dev_pm_qos_update_user_latency_tolerance(ctrl->device, | |
3884 | min(default_ps_max_latency_us, (unsigned long)S32_MAX)); | |
3885 | ||
f79d5fda AM |
3886 | nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); |
3887 | ||
f3ca80fc | 3888 | return 0; |
d22524a4 | 3889 | out_free_name: |
d6a2b953 | 3890 | kfree_const(ctrl->device->kobj.name); |
f3ca80fc | 3891 | out_release_instance: |
9843f685 | 3892 | ida_simple_remove(&nvme_instance_ida, ctrl->instance); |
f3ca80fc | 3893 | out: |
cb5b7262 JA |
3894 | if (ctrl->discard_page) |
3895 | __free_page(ctrl->discard_page); | |
f3ca80fc CH |
3896 | return ret; |
3897 | } | |
576d55d6 | 3898 | EXPORT_SYMBOL_GPL(nvme_init_ctrl); |
f3ca80fc | 3899 | |
69d9a99c KB |
3900 | /** |
3901 | * nvme_kill_queues(): Ends all namespace queues | |
3902 | * @ctrl: the dead controller that needs to end | |
3903 | * | |
3904 | * Call this function when the driver determines it is unable to get the | |
3905 | * controller in a state capable of servicing IO. | |
3906 | */ | |
3907 | void nvme_kill_queues(struct nvme_ctrl *ctrl) | |
3908 | { | |
3909 | struct nvme_ns *ns; | |
3910 | ||
765cc031 | 3911 | down_read(&ctrl->namespaces_rwsem); |
82654b6b | 3912 | |
443bd90f | 3913 | /* Forcibly unquiesce queues to avoid blocking dispatch */ |
751a0cc0 | 3914 | if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q)) |
7dd1ab16 | 3915 | blk_mq_unquiesce_queue(ctrl->admin_q); |
443bd90f | 3916 | |
cf39a6bc SB |
3917 | list_for_each_entry(ns, &ctrl->namespaces, list) |
3918 | nvme_set_queue_dying(ns); | |
806f026f | 3919 | |
765cc031 | 3920 | up_read(&ctrl->namespaces_rwsem); |
69d9a99c | 3921 | } |
237045fc | 3922 | EXPORT_SYMBOL_GPL(nvme_kill_queues); |
69d9a99c | 3923 | |
302ad8cc KB |
3924 | void nvme_unfreeze(struct nvme_ctrl *ctrl) |
3925 | { | |
3926 | struct nvme_ns *ns; | |
3927 | ||
765cc031 | 3928 | down_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3929 | list_for_each_entry(ns, &ctrl->namespaces, list) |
3930 | blk_mq_unfreeze_queue(ns->queue); | |
765cc031 | 3931 | up_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3932 | } |
3933 | EXPORT_SYMBOL_GPL(nvme_unfreeze); | |
3934 | ||
3935 | void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) | |
3936 | { | |
3937 | struct nvme_ns *ns; | |
3938 | ||
765cc031 | 3939 | down_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3940 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
3941 | timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); | |
3942 | if (timeout <= 0) | |
3943 | break; | |
3944 | } | |
765cc031 | 3945 | up_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3946 | } |
3947 | EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout); | |
3948 | ||
3949 | void nvme_wait_freeze(struct nvme_ctrl *ctrl) | |
3950 | { | |
3951 | struct nvme_ns *ns; | |
3952 | ||
765cc031 | 3953 | down_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3954 | list_for_each_entry(ns, &ctrl->namespaces, list) |
3955 | blk_mq_freeze_queue_wait(ns->queue); | |
765cc031 | 3956 | up_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3957 | } |
3958 | EXPORT_SYMBOL_GPL(nvme_wait_freeze); | |
3959 | ||
3960 | void nvme_start_freeze(struct nvme_ctrl *ctrl) | |
3961 | { | |
3962 | struct nvme_ns *ns; | |
3963 | ||
765cc031 | 3964 | down_read(&ctrl->namespaces_rwsem); |
302ad8cc | 3965 | list_for_each_entry(ns, &ctrl->namespaces, list) |
1671d522 | 3966 | blk_freeze_queue_start(ns->queue); |
765cc031 | 3967 | up_read(&ctrl->namespaces_rwsem); |
302ad8cc KB |
3968 | } |
3969 | EXPORT_SYMBOL_GPL(nvme_start_freeze); | |
3970 | ||
25646264 | 3971 | void nvme_stop_queues(struct nvme_ctrl *ctrl) |
363c9aac SG |
3972 | { |
3973 | struct nvme_ns *ns; | |
3974 | ||
765cc031 | 3975 | down_read(&ctrl->namespaces_rwsem); |
a6eaa884 | 3976 | list_for_each_entry(ns, &ctrl->namespaces, list) |
3174dd33 | 3977 | blk_mq_quiesce_queue(ns->queue); |
765cc031 | 3978 | up_read(&ctrl->namespaces_rwsem); |
363c9aac | 3979 | } |
576d55d6 | 3980 | EXPORT_SYMBOL_GPL(nvme_stop_queues); |
363c9aac | 3981 | |
25646264 | 3982 | void nvme_start_queues(struct nvme_ctrl *ctrl) |
363c9aac SG |
3983 | { |
3984 | struct nvme_ns *ns; | |
3985 | ||
765cc031 | 3986 | down_read(&ctrl->namespaces_rwsem); |
8d7b8faf | 3987 | list_for_each_entry(ns, &ctrl->namespaces, list) |
f660174e | 3988 | blk_mq_unquiesce_queue(ns->queue); |
765cc031 | 3989 | up_read(&ctrl->namespaces_rwsem); |
363c9aac | 3990 | } |
576d55d6 | 3991 | EXPORT_SYMBOL_GPL(nvme_start_queues); |
363c9aac | 3992 | |
d6135c3a KB |
3993 | |
3994 | void nvme_sync_queues(struct nvme_ctrl *ctrl) | |
3995 | { | |
3996 | struct nvme_ns *ns; | |
3997 | ||
3998 | down_read(&ctrl->namespaces_rwsem); | |
3999 | list_for_each_entry(ns, &ctrl->namespaces, list) | |
4000 | blk_sync_queue(ns->queue); | |
4001 | up_read(&ctrl->namespaces_rwsem); | |
4002 | } | |
4003 | EXPORT_SYMBOL_GPL(nvme_sync_queues); | |
4004 | ||
81101540 CH |
4005 | /* |
4006 | * Check we didn't inadvertently grow the command structure sizes: | |
4007 | */ | |
4008 | static inline void _nvme_check_size(void) | |
4009 | { | |
4010 | BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); | |
4011 | BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); | |
4012 | BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); | |
4013 | BUILD_BUG_ON(sizeof(struct nvme_features) != 64); | |
4014 | BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); | |
4015 | BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); | |
4016 | BUILD_BUG_ON(sizeof(struct nvme_dsm_cmd) != 64); | |
4017 | BUILD_BUG_ON(sizeof(struct nvme_write_zeroes_cmd) != 64); | |
4018 | BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64); | |
4019 | BUILD_BUG_ON(sizeof(struct nvme_get_log_page_command) != 64); | |
4020 | BUILD_BUG_ON(sizeof(struct nvme_command) != 64); | |
4021 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != NVME_IDENTIFY_DATA_SIZE); | |
4022 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != NVME_IDENTIFY_DATA_SIZE); | |
4023 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); | |
4024 | BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); | |
4025 | BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); | |
4026 | BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); | |
4027 | } | |
4028 | ||
4029 | ||
893a74b7 | 4030 | static int __init nvme_core_init(void) |
5bae7f73 | 4031 | { |
b227c59b | 4032 | int result = -ENOMEM; |
5bae7f73 | 4033 | |
81101540 CH |
4034 | _nvme_check_size(); |
4035 | ||
9a6327d2 SG |
4036 | nvme_wq = alloc_workqueue("nvme-wq", |
4037 | WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); | |
4038 | if (!nvme_wq) | |
b227c59b RS |
4039 | goto out; |
4040 | ||
4041 | nvme_reset_wq = alloc_workqueue("nvme-reset-wq", | |
4042 | WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); | |
4043 | if (!nvme_reset_wq) | |
4044 | goto destroy_wq; | |
4045 | ||
4046 | nvme_delete_wq = alloc_workqueue("nvme-delete-wq", | |
4047 | WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); | |
4048 | if (!nvme_delete_wq) | |
4049 | goto destroy_reset_wq; | |
9a6327d2 | 4050 | |
a6a5149b | 4051 | result = alloc_chrdev_region(&nvme_chr_devt, 0, NVME_MINORS, "nvme"); |
f3ca80fc | 4052 | if (result < 0) |
b227c59b | 4053 | goto destroy_delete_wq; |
f3ca80fc CH |
4054 | |
4055 | nvme_class = class_create(THIS_MODULE, "nvme"); | |
4056 | if (IS_ERR(nvme_class)) { | |
4057 | result = PTR_ERR(nvme_class); | |
4058 | goto unregister_chrdev; | |
4059 | } | |
4060 | ||
ab9e00cc CH |
4061 | nvme_subsys_class = class_create(THIS_MODULE, "nvme-subsystem"); |
4062 | if (IS_ERR(nvme_subsys_class)) { | |
4063 | result = PTR_ERR(nvme_subsys_class); | |
4064 | goto destroy_class; | |
4065 | } | |
5bae7f73 | 4066 | return 0; |
f3ca80fc | 4067 | |
ab9e00cc CH |
4068 | destroy_class: |
4069 | class_destroy(nvme_class); | |
9a6327d2 | 4070 | unregister_chrdev: |
a6a5149b | 4071 | unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); |
b227c59b RS |
4072 | destroy_delete_wq: |
4073 | destroy_workqueue(nvme_delete_wq); | |
4074 | destroy_reset_wq: | |
4075 | destroy_workqueue(nvme_reset_wq); | |
9a6327d2 SG |
4076 | destroy_wq: |
4077 | destroy_workqueue(nvme_wq); | |
b227c59b | 4078 | out: |
f3ca80fc | 4079 | return result; |
5bae7f73 CH |
4080 | } |
4081 | ||
893a74b7 | 4082 | static void __exit nvme_core_exit(void) |
5bae7f73 | 4083 | { |
ab9e00cc CH |
4084 | ida_destroy(&nvme_subsystems_ida); |
4085 | class_destroy(nvme_subsys_class); | |
f3ca80fc | 4086 | class_destroy(nvme_class); |
a6a5149b | 4087 | unregister_chrdev_region(nvme_chr_devt, NVME_MINORS); |
b227c59b RS |
4088 | destroy_workqueue(nvme_delete_wq); |
4089 | destroy_workqueue(nvme_reset_wq); | |
9a6327d2 | 4090 | destroy_workqueue(nvme_wq); |
5bae7f73 | 4091 | } |
576d55d6 ML |
4092 | |
4093 | MODULE_LICENSE("GPL"); | |
4094 | MODULE_VERSION("1.0"); | |
4095 | module_init(nvme_core_init); | |
4096 | module_exit(nvme_core_exit); |