]> Git Repo - linux.git/blob - drivers/nvme/host/multipath.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / drivers / nvme / host / multipath.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017-2018 Christoph Hellwig.
4  */
5
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
10 #include "nvme.h"
11
12 bool multipath = true;
13 module_param(multipath, bool, 0444);
14 MODULE_PARM_DESC(multipath,
15         "turn on native support for multiple controllers per subsystem");
16
17 static const char *nvme_iopolicy_names[] = {
18         [NVME_IOPOLICY_NUMA]    = "numa",
19         [NVME_IOPOLICY_RR]      = "round-robin",
20         [NVME_IOPOLICY_QD]      = "queue-depth",
21 };
22
23 static int iopolicy = NVME_IOPOLICY_NUMA;
24
25 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
26 {
27         if (!val)
28                 return -EINVAL;
29         if (!strncmp(val, "numa", 4))
30                 iopolicy = NVME_IOPOLICY_NUMA;
31         else if (!strncmp(val, "round-robin", 11))
32                 iopolicy = NVME_IOPOLICY_RR;
33         else if (!strncmp(val, "queue-depth", 11))
34                 iopolicy = NVME_IOPOLICY_QD;
35         else
36                 return -EINVAL;
37
38         return 0;
39 }
40
41 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
42 {
43         return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
44 }
45
46 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
47         &iopolicy, 0644);
48 MODULE_PARM_DESC(iopolicy,
49         "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
50
51 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
52 {
53         subsys->iopolicy = iopolicy;
54 }
55
56 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
57 {
58         struct nvme_ns_head *h;
59
60         lockdep_assert_held(&subsys->lock);
61         list_for_each_entry(h, &subsys->nsheads, entry)
62                 if (h->disk)
63                         blk_mq_unfreeze_queue(h->disk->queue);
64 }
65
66 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
67 {
68         struct nvme_ns_head *h;
69
70         lockdep_assert_held(&subsys->lock);
71         list_for_each_entry(h, &subsys->nsheads, entry)
72                 if (h->disk)
73                         blk_mq_freeze_queue_wait(h->disk->queue);
74 }
75
76 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
77 {
78         struct nvme_ns_head *h;
79
80         lockdep_assert_held(&subsys->lock);
81         list_for_each_entry(h, &subsys->nsheads, entry)
82                 if (h->disk)
83                         blk_freeze_queue_start(h->disk->queue);
84 }
85
86 void nvme_failover_req(struct request *req)
87 {
88         struct nvme_ns *ns = req->q->queuedata;
89         u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK;
90         unsigned long flags;
91         struct bio *bio;
92
93         nvme_mpath_clear_current_path(ns);
94
95         /*
96          * If we got back an ANA error, we know the controller is alive but not
97          * ready to serve this namespace.  Kick of a re-read of the ANA
98          * information page, and just try any other available path for now.
99          */
100         if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
101                 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
102                 queue_work(nvme_wq, &ns->ctrl->ana_work);
103         }
104
105         spin_lock_irqsave(&ns->head->requeue_lock, flags);
106         for (bio = req->bio; bio; bio = bio->bi_next) {
107                 bio_set_dev(bio, ns->head->disk->part0);
108                 if (bio->bi_opf & REQ_POLLED) {
109                         bio->bi_opf &= ~REQ_POLLED;
110                         bio->bi_cookie = BLK_QC_T_NONE;
111                 }
112                 /*
113                  * The alternate request queue that we may end up submitting
114                  * the bio to may be frozen temporarily, in this case REQ_NOWAIT
115                  * will fail the I/O immediately with EAGAIN to the issuer.
116                  * We are not in the issuer context which cannot block. Clear
117                  * the flag to avoid spurious EAGAIN I/O failures.
118                  */
119                 bio->bi_opf &= ~REQ_NOWAIT;
120         }
121         blk_steal_bios(&ns->head->requeue_list, req);
122         spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
123
124         nvme_req(req)->status = 0;
125         nvme_end_req(req);
126         kblockd_schedule_work(&ns->head->requeue_work);
127 }
128
129 void nvme_mpath_start_request(struct request *rq)
130 {
131         struct nvme_ns *ns = rq->q->queuedata;
132         struct gendisk *disk = ns->head->disk;
133
134         if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
135                 atomic_inc(&ns->ctrl->nr_active);
136                 nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
137         }
138
139         if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
140                 return;
141
142         nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
143         nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
144                                                       jiffies);
145 }
146 EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
147
148 void nvme_mpath_end_request(struct request *rq)
149 {
150         struct nvme_ns *ns = rq->q->queuedata;
151
152         if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
153                 atomic_dec_if_positive(&ns->ctrl->nr_active);
154
155         if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
156                 return;
157         bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
158                          blk_rq_bytes(rq) >> SECTOR_SHIFT,
159                          nvme_req(rq)->start_time);
160 }
161
162 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
163 {
164         struct nvme_ns *ns;
165         int srcu_idx;
166
167         srcu_idx = srcu_read_lock(&ctrl->srcu);
168         list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
169                 if (!ns->head->disk)
170                         continue;
171                 kblockd_schedule_work(&ns->head->requeue_work);
172                 if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
173                         disk_uevent(ns->head->disk, KOBJ_CHANGE);
174         }
175         srcu_read_unlock(&ctrl->srcu, srcu_idx);
176 }
177
178 static const char *nvme_ana_state_names[] = {
179         [0]                             = "invalid state",
180         [NVME_ANA_OPTIMIZED]            = "optimized",
181         [NVME_ANA_NONOPTIMIZED]         = "non-optimized",
182         [NVME_ANA_INACCESSIBLE]         = "inaccessible",
183         [NVME_ANA_PERSISTENT_LOSS]      = "persistent-loss",
184         [NVME_ANA_CHANGE]               = "change",
185 };
186
187 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
188 {
189         struct nvme_ns_head *head = ns->head;
190         bool changed = false;
191         int node;
192
193         if (!head)
194                 goto out;
195
196         for_each_node(node) {
197                 if (ns == rcu_access_pointer(head->current_path[node])) {
198                         rcu_assign_pointer(head->current_path[node], NULL);
199                         changed = true;
200                 }
201         }
202 out:
203         return changed;
204 }
205
206 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
207 {
208         struct nvme_ns *ns;
209         int srcu_idx;
210
211         srcu_idx = srcu_read_lock(&ctrl->srcu);
212         list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
213                 nvme_mpath_clear_current_path(ns);
214                 kblockd_schedule_work(&ns->head->requeue_work);
215         }
216         srcu_read_unlock(&ctrl->srcu, srcu_idx);
217 }
218
219 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
220 {
221         struct nvme_ns_head *head = ns->head;
222         sector_t capacity = get_capacity(head->disk);
223         int node;
224         int srcu_idx;
225
226         srcu_idx = srcu_read_lock(&head->srcu);
227         list_for_each_entry_rcu(ns, &head->list, siblings) {
228                 if (capacity != get_capacity(ns->disk))
229                         clear_bit(NVME_NS_READY, &ns->flags);
230         }
231         srcu_read_unlock(&head->srcu, srcu_idx);
232
233         for_each_node(node)
234                 rcu_assign_pointer(head->current_path[node], NULL);
235         kblockd_schedule_work(&head->requeue_work);
236 }
237
238 static bool nvme_path_is_disabled(struct nvme_ns *ns)
239 {
240         enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
241
242         /*
243          * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
244          * still be able to complete assuming that the controller is connected.
245          * Otherwise it will fail immediately and return to the requeue list.
246          */
247         if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
248                 return true;
249         if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
250             !test_bit(NVME_NS_READY, &ns->flags))
251                 return true;
252         return false;
253 }
254
255 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
256 {
257         int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
258         struct nvme_ns *found = NULL, *fallback = NULL, *ns;
259
260         list_for_each_entry_rcu(ns, &head->list, siblings) {
261                 if (nvme_path_is_disabled(ns))
262                         continue;
263
264                 if (ns->ctrl->numa_node != NUMA_NO_NODE &&
265                     READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
266                         distance = node_distance(node, ns->ctrl->numa_node);
267                 else
268                         distance = LOCAL_DISTANCE;
269
270                 switch (ns->ana_state) {
271                 case NVME_ANA_OPTIMIZED:
272                         if (distance < found_distance) {
273                                 found_distance = distance;
274                                 found = ns;
275                         }
276                         break;
277                 case NVME_ANA_NONOPTIMIZED:
278                         if (distance < fallback_distance) {
279                                 fallback_distance = distance;
280                                 fallback = ns;
281                         }
282                         break;
283                 default:
284                         break;
285                 }
286         }
287
288         if (!found)
289                 found = fallback;
290         if (found)
291                 rcu_assign_pointer(head->current_path[node], found);
292         return found;
293 }
294
295 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
296                 struct nvme_ns *ns)
297 {
298         ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
299                         siblings);
300         if (ns)
301                 return ns;
302         return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
303 }
304
305 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head)
306 {
307         struct nvme_ns *ns, *found = NULL;
308         int node = numa_node_id();
309         struct nvme_ns *old = srcu_dereference(head->current_path[node],
310                                                &head->srcu);
311
312         if (unlikely(!old))
313                 return __nvme_find_path(head, node);
314
315         if (list_is_singular(&head->list)) {
316                 if (nvme_path_is_disabled(old))
317                         return NULL;
318                 return old;
319         }
320
321         for (ns = nvme_next_ns(head, old);
322              ns && ns != old;
323              ns = nvme_next_ns(head, ns)) {
324                 if (nvme_path_is_disabled(ns))
325                         continue;
326
327                 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
328                         found = ns;
329                         goto out;
330                 }
331                 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
332                         found = ns;
333         }
334
335         /*
336          * The loop above skips the current path for round-robin semantics.
337          * Fall back to the current path if either:
338          *  - no other optimized path found and current is optimized,
339          *  - no other usable path found and current is usable.
340          */
341         if (!nvme_path_is_disabled(old) &&
342             (old->ana_state == NVME_ANA_OPTIMIZED ||
343              (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
344                 return old;
345
346         if (!found)
347                 return NULL;
348 out:
349         rcu_assign_pointer(head->current_path[node], found);
350         return found;
351 }
352
353 static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
354 {
355         struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
356         unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
357         unsigned int depth;
358
359         list_for_each_entry_rcu(ns, &head->list, siblings) {
360                 if (nvme_path_is_disabled(ns))
361                         continue;
362
363                 depth = atomic_read(&ns->ctrl->nr_active);
364
365                 switch (ns->ana_state) {
366                 case NVME_ANA_OPTIMIZED:
367                         if (depth < min_depth_opt) {
368                                 min_depth_opt = depth;
369                                 best_opt = ns;
370                         }
371                         break;
372                 case NVME_ANA_NONOPTIMIZED:
373                         if (depth < min_depth_nonopt) {
374                                 min_depth_nonopt = depth;
375                                 best_nonopt = ns;
376                         }
377                         break;
378                 default:
379                         break;
380                 }
381
382                 if (min_depth_opt == 0)
383                         return best_opt;
384         }
385
386         return best_opt ? best_opt : best_nonopt;
387 }
388
389 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
390 {
391         return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
392                 ns->ana_state == NVME_ANA_OPTIMIZED;
393 }
394
395 static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
396 {
397         int node = numa_node_id();
398         struct nvme_ns *ns;
399
400         ns = srcu_dereference(head->current_path[node], &head->srcu);
401         if (unlikely(!ns))
402                 return __nvme_find_path(head, node);
403         if (unlikely(!nvme_path_is_optimized(ns)))
404                 return __nvme_find_path(head, node);
405         return ns;
406 }
407
408 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
409 {
410         switch (READ_ONCE(head->subsys->iopolicy)) {
411         case NVME_IOPOLICY_QD:
412                 return nvme_queue_depth_path(head);
413         case NVME_IOPOLICY_RR:
414                 return nvme_round_robin_path(head);
415         default:
416                 return nvme_numa_path(head);
417         }
418 }
419
420 static bool nvme_available_path(struct nvme_ns_head *head)
421 {
422         struct nvme_ns *ns;
423
424         list_for_each_entry_rcu(ns, &head->list, siblings) {
425                 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
426                         continue;
427                 switch (nvme_ctrl_state(ns->ctrl)) {
428                 case NVME_CTRL_LIVE:
429                 case NVME_CTRL_RESETTING:
430                 case NVME_CTRL_CONNECTING:
431                         /* fallthru */
432                         return true;
433                 default:
434                         break;
435                 }
436         }
437         return false;
438 }
439
440 static void nvme_ns_head_submit_bio(struct bio *bio)
441 {
442         struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
443         struct device *dev = disk_to_dev(head->disk);
444         struct nvme_ns *ns;
445         int srcu_idx;
446
447         /*
448          * The namespace might be going away and the bio might be moved to a
449          * different queue via blk_steal_bios(), so we need to use the bio_split
450          * pool from the original queue to allocate the bvecs from.
451          */
452         bio = bio_split_to_limits(bio);
453         if (!bio)
454                 return;
455
456         srcu_idx = srcu_read_lock(&head->srcu);
457         ns = nvme_find_path(head);
458         if (likely(ns)) {
459                 bio_set_dev(bio, ns->disk->part0);
460                 bio->bi_opf |= REQ_NVME_MPATH;
461                 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
462                                       bio->bi_iter.bi_sector);
463                 submit_bio_noacct(bio);
464         } else if (nvme_available_path(head)) {
465                 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
466
467                 spin_lock_irq(&head->requeue_lock);
468                 bio_list_add(&head->requeue_list, bio);
469                 spin_unlock_irq(&head->requeue_lock);
470         } else {
471                 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
472
473                 bio_io_error(bio);
474         }
475
476         srcu_read_unlock(&head->srcu, srcu_idx);
477 }
478
479 static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode)
480 {
481         if (!nvme_tryget_ns_head(disk->private_data))
482                 return -ENXIO;
483         return 0;
484 }
485
486 static void nvme_ns_head_release(struct gendisk *disk)
487 {
488         nvme_put_ns_head(disk->private_data);
489 }
490
491 static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
492                 enum blk_unique_id type)
493 {
494         struct nvme_ns_head *head = disk->private_data;
495         struct nvme_ns *ns;
496         int srcu_idx, ret = -EWOULDBLOCK;
497
498         srcu_idx = srcu_read_lock(&head->srcu);
499         ns = nvme_find_path(head);
500         if (ns)
501                 ret = nvme_ns_get_unique_id(ns, id, type);
502         srcu_read_unlock(&head->srcu, srcu_idx);
503         return ret;
504 }
505
506 #ifdef CONFIG_BLK_DEV_ZONED
507 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
508                 unsigned int nr_zones, report_zones_cb cb, void *data)
509 {
510         struct nvme_ns_head *head = disk->private_data;
511         struct nvme_ns *ns;
512         int srcu_idx, ret = -EWOULDBLOCK;
513
514         srcu_idx = srcu_read_lock(&head->srcu);
515         ns = nvme_find_path(head);
516         if (ns)
517                 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
518         srcu_read_unlock(&head->srcu, srcu_idx);
519         return ret;
520 }
521 #else
522 #define nvme_ns_head_report_zones       NULL
523 #endif /* CONFIG_BLK_DEV_ZONED */
524
525 const struct block_device_operations nvme_ns_head_ops = {
526         .owner          = THIS_MODULE,
527         .submit_bio     = nvme_ns_head_submit_bio,
528         .open           = nvme_ns_head_open,
529         .release        = nvme_ns_head_release,
530         .ioctl          = nvme_ns_head_ioctl,
531         .compat_ioctl   = blkdev_compat_ptr_ioctl,
532         .getgeo         = nvme_getgeo,
533         .get_unique_id  = nvme_ns_head_get_unique_id,
534         .report_zones   = nvme_ns_head_report_zones,
535         .pr_ops         = &nvme_pr_ops,
536 };
537
538 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
539 {
540         return container_of(cdev, struct nvme_ns_head, cdev);
541 }
542
543 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
544 {
545         if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
546                 return -ENXIO;
547         return 0;
548 }
549
550 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
551 {
552         nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
553         return 0;
554 }
555
556 static const struct file_operations nvme_ns_head_chr_fops = {
557         .owner          = THIS_MODULE,
558         .open           = nvme_ns_head_chr_open,
559         .release        = nvme_ns_head_chr_release,
560         .unlocked_ioctl = nvme_ns_head_chr_ioctl,
561         .compat_ioctl   = compat_ptr_ioctl,
562         .uring_cmd      = nvme_ns_head_chr_uring_cmd,
563         .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
564 };
565
566 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
567 {
568         int ret;
569
570         head->cdev_device.parent = &head->subsys->dev;
571         ret = dev_set_name(&head->cdev_device, "ng%dn%d",
572                            head->subsys->instance, head->instance);
573         if (ret)
574                 return ret;
575         ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
576                             &nvme_ns_head_chr_fops, THIS_MODULE);
577         return ret;
578 }
579
580 static void nvme_requeue_work(struct work_struct *work)
581 {
582         struct nvme_ns_head *head =
583                 container_of(work, struct nvme_ns_head, requeue_work);
584         struct bio *bio, *next;
585
586         spin_lock_irq(&head->requeue_lock);
587         next = bio_list_get(&head->requeue_list);
588         spin_unlock_irq(&head->requeue_lock);
589
590         while ((bio = next) != NULL) {
591                 next = bio->bi_next;
592                 bio->bi_next = NULL;
593
594                 submit_bio_noacct(bio);
595         }
596 }
597
598 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
599 {
600         struct queue_limits lim;
601
602         mutex_init(&head->lock);
603         bio_list_init(&head->requeue_list);
604         spin_lock_init(&head->requeue_lock);
605         INIT_WORK(&head->requeue_work, nvme_requeue_work);
606
607         /*
608          * Add a multipath node if the subsystems supports multiple controllers.
609          * We also do this for private namespaces as the namespace sharing flag
610          * could change after a rescan.
611          */
612         if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
613             !nvme_is_unique_nsid(ctrl, head) || !multipath)
614                 return 0;
615
616         blk_set_stacking_limits(&lim);
617         lim.dma_alignment = 3;
618         lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
619         if (head->ids.csi != NVME_CSI_ZNS)
620                 lim.max_zone_append_sectors = 0;
621
622         head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
623         if (IS_ERR(head->disk))
624                 return PTR_ERR(head->disk);
625         head->disk->fops = &nvme_ns_head_ops;
626         head->disk->private_data = head;
627         sprintf(head->disk->disk_name, "nvme%dn%d",
628                         ctrl->subsys->instance, head->instance);
629         return 0;
630 }
631
632 static void nvme_mpath_set_live(struct nvme_ns *ns)
633 {
634         struct nvme_ns_head *head = ns->head;
635         int rc;
636
637         if (!head->disk)
638                 return;
639
640         /*
641          * test_and_set_bit() is used because it is protecting against two nvme
642          * paths simultaneously calling device_add_disk() on the same namespace
643          * head.
644          */
645         if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
646                 rc = device_add_disk(&head->subsys->dev, head->disk,
647                                      nvme_ns_attr_groups);
648                 if (rc) {
649                         clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
650                         return;
651                 }
652                 nvme_add_ns_head_cdev(head);
653         }
654
655         mutex_lock(&head->lock);
656         if (nvme_path_is_optimized(ns)) {
657                 int node, srcu_idx;
658
659                 srcu_idx = srcu_read_lock(&head->srcu);
660                 for_each_online_node(node)
661                         __nvme_find_path(head, node);
662                 srcu_read_unlock(&head->srcu, srcu_idx);
663         }
664         mutex_unlock(&head->lock);
665
666         synchronize_srcu(&head->srcu);
667         kblockd_schedule_work(&head->requeue_work);
668 }
669
670 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
671                 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
672                         void *))
673 {
674         void *base = ctrl->ana_log_buf;
675         size_t offset = sizeof(struct nvme_ana_rsp_hdr);
676         int error, i;
677
678         lockdep_assert_held(&ctrl->ana_lock);
679
680         for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
681                 struct nvme_ana_group_desc *desc = base + offset;
682                 u32 nr_nsids;
683                 size_t nsid_buf_size;
684
685                 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
686                         return -EINVAL;
687
688                 nr_nsids = le32_to_cpu(desc->nnsids);
689                 nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
690
691                 if (WARN_ON_ONCE(desc->grpid == 0))
692                         return -EINVAL;
693                 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
694                         return -EINVAL;
695                 if (WARN_ON_ONCE(desc->state == 0))
696                         return -EINVAL;
697                 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
698                         return -EINVAL;
699
700                 offset += sizeof(*desc);
701                 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
702                         return -EINVAL;
703
704                 error = cb(ctrl, desc, data);
705                 if (error)
706                         return error;
707
708                 offset += nsid_buf_size;
709         }
710
711         return 0;
712 }
713
714 static inline bool nvme_state_is_live(enum nvme_ana_state state)
715 {
716         return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
717 }
718
719 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
720                 struct nvme_ns *ns)
721 {
722         ns->ana_grpid = le32_to_cpu(desc->grpid);
723         ns->ana_state = desc->state;
724         clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
725         /*
726          * nvme_mpath_set_live() will trigger I/O to the multipath path device
727          * and in turn to this path device.  However we cannot accept this I/O
728          * if the controller is not live.  This may deadlock if called from
729          * nvme_mpath_init_identify() and the ctrl will never complete
730          * initialization, preventing I/O from completing.  For this case we
731          * will reprocess the ANA log page in nvme_mpath_update() once the
732          * controller is ready.
733          */
734         if (nvme_state_is_live(ns->ana_state) &&
735             nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
736                 nvme_mpath_set_live(ns);
737 }
738
739 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
740                 struct nvme_ana_group_desc *desc, void *data)
741 {
742         u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
743         unsigned *nr_change_groups = data;
744         struct nvme_ns *ns;
745         int srcu_idx;
746
747         dev_dbg(ctrl->device, "ANA group %d: %s.\n",
748                         le32_to_cpu(desc->grpid),
749                         nvme_ana_state_names[desc->state]);
750
751         if (desc->state == NVME_ANA_CHANGE)
752                 (*nr_change_groups)++;
753
754         if (!nr_nsids)
755                 return 0;
756
757         srcu_idx = srcu_read_lock(&ctrl->srcu);
758         list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
759                 unsigned nsid;
760 again:
761                 nsid = le32_to_cpu(desc->nsids[n]);
762                 if (ns->head->ns_id < nsid)
763                         continue;
764                 if (ns->head->ns_id == nsid)
765                         nvme_update_ns_ana_state(desc, ns);
766                 if (++n == nr_nsids)
767                         break;
768                 if (ns->head->ns_id > nsid)
769                         goto again;
770         }
771         srcu_read_unlock(&ctrl->srcu, srcu_idx);
772         return 0;
773 }
774
775 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
776 {
777         u32 nr_change_groups = 0;
778         int error;
779
780         mutex_lock(&ctrl->ana_lock);
781         error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
782                         ctrl->ana_log_buf, ctrl->ana_log_size, 0);
783         if (error) {
784                 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
785                 goto out_unlock;
786         }
787
788         error = nvme_parse_ana_log(ctrl, &nr_change_groups,
789                         nvme_update_ana_state);
790         if (error)
791                 goto out_unlock;
792
793         /*
794          * In theory we should have an ANATT timer per group as they might enter
795          * the change state at different times.  But that is a lot of overhead
796          * just to protect against a target that keeps entering new changes
797          * states while never finishing previous ones.  But we'll still
798          * eventually time out once all groups are in change state, so this
799          * isn't a big deal.
800          *
801          * We also double the ANATT value to provide some slack for transports
802          * or AEN processing overhead.
803          */
804         if (nr_change_groups)
805                 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
806         else
807                 del_timer_sync(&ctrl->anatt_timer);
808 out_unlock:
809         mutex_unlock(&ctrl->ana_lock);
810         return error;
811 }
812
813 static void nvme_ana_work(struct work_struct *work)
814 {
815         struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
816
817         if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
818                 return;
819
820         nvme_read_ana_log(ctrl);
821 }
822
823 void nvme_mpath_update(struct nvme_ctrl *ctrl)
824 {
825         u32 nr_change_groups = 0;
826
827         if (!ctrl->ana_log_buf)
828                 return;
829
830         mutex_lock(&ctrl->ana_lock);
831         nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
832         mutex_unlock(&ctrl->ana_lock);
833 }
834
835 static void nvme_anatt_timeout(struct timer_list *t)
836 {
837         struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
838
839         dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
840         nvme_reset_ctrl(ctrl);
841 }
842
843 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
844 {
845         if (!nvme_ctrl_use_ana(ctrl))
846                 return;
847         del_timer_sync(&ctrl->anatt_timer);
848         cancel_work_sync(&ctrl->ana_work);
849 }
850
851 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
852         struct device_attribute subsys_attr_##_name =   \
853                 __ATTR(_name, _mode, _show, _store)
854
855 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
856                 struct device_attribute *attr, char *buf)
857 {
858         struct nvme_subsystem *subsys =
859                 container_of(dev, struct nvme_subsystem, dev);
860
861         return sysfs_emit(buf, "%s\n",
862                           nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
863 }
864
865 static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys,
866                 int iopolicy)
867 {
868         struct nvme_ctrl *ctrl;
869         int old_iopolicy = READ_ONCE(subsys->iopolicy);
870
871         if (old_iopolicy == iopolicy)
872                 return;
873
874         WRITE_ONCE(subsys->iopolicy, iopolicy);
875
876         /* iopolicy changes clear the mpath by design */
877         mutex_lock(&nvme_subsystems_lock);
878         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
879                 nvme_mpath_clear_ctrl_paths(ctrl);
880         mutex_unlock(&nvme_subsystems_lock);
881
882         pr_notice("subsysnqn %s iopolicy changed from %s to %s\n",
883                         subsys->subnqn,
884                         nvme_iopolicy_names[old_iopolicy],
885                         nvme_iopolicy_names[iopolicy]);
886 }
887
888 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
889                 struct device_attribute *attr, const char *buf, size_t count)
890 {
891         struct nvme_subsystem *subsys =
892                 container_of(dev, struct nvme_subsystem, dev);
893         int i;
894
895         for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
896                 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
897                         nvme_subsys_iopolicy_update(subsys, i);
898                         return count;
899                 }
900         }
901
902         return -EINVAL;
903 }
904 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
905                       nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
906
907 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
908                 char *buf)
909 {
910         return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
911 }
912 DEVICE_ATTR_RO(ana_grpid);
913
914 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
915                 char *buf)
916 {
917         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
918
919         return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
920 }
921 DEVICE_ATTR_RO(ana_state);
922
923 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
924                 struct nvme_ana_group_desc *desc, void *data)
925 {
926         struct nvme_ana_group_desc *dst = data;
927
928         if (desc->grpid != dst->grpid)
929                 return 0;
930
931         *dst = *desc;
932         return -ENXIO; /* just break out of the loop */
933 }
934
935 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
936 {
937         if (nvme_ctrl_use_ana(ns->ctrl)) {
938                 struct nvme_ana_group_desc desc = {
939                         .grpid = anagrpid,
940                         .state = 0,
941                 };
942
943                 mutex_lock(&ns->ctrl->ana_lock);
944                 ns->ana_grpid = le32_to_cpu(anagrpid);
945                 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
946                 mutex_unlock(&ns->ctrl->ana_lock);
947                 if (desc.state) {
948                         /* found the group desc: update */
949                         nvme_update_ns_ana_state(&desc, ns);
950                 } else {
951                         /* group desc not found: trigger a re-read */
952                         set_bit(NVME_NS_ANA_PENDING, &ns->flags);
953                         queue_work(nvme_wq, &ns->ctrl->ana_work);
954                 }
955         } else {
956                 ns->ana_state = NVME_ANA_OPTIMIZED;
957                 nvme_mpath_set_live(ns);
958         }
959
960 #ifdef CONFIG_BLK_DEV_ZONED
961         if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
962                 ns->head->disk->nr_zones = ns->disk->nr_zones;
963 #endif
964 }
965
966 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
967 {
968         if (!head->disk)
969                 return;
970         kblockd_schedule_work(&head->requeue_work);
971         if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
972                 nvme_cdev_del(&head->cdev, &head->cdev_device);
973                 del_gendisk(head->disk);
974         }
975 }
976
977 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
978 {
979         if (!head->disk)
980                 return;
981         /* make sure all pending bios are cleaned up */
982         kblockd_schedule_work(&head->requeue_work);
983         flush_work(&head->requeue_work);
984         put_disk(head->disk);
985 }
986
987 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
988 {
989         mutex_init(&ctrl->ana_lock);
990         timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
991         INIT_WORK(&ctrl->ana_work, nvme_ana_work);
992 }
993
994 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
995 {
996         size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
997         size_t ana_log_size;
998         int error = 0;
999
1000         /* check if multipath is enabled and we have the capability */
1001         if (!multipath || !ctrl->subsys ||
1002             !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
1003                 return 0;
1004
1005         /* initialize this in the identify path to cover controller resets */
1006         atomic_set(&ctrl->nr_active, 0);
1007
1008         if (!ctrl->max_namespaces ||
1009             ctrl->max_namespaces > le32_to_cpu(id->nn)) {
1010                 dev_err(ctrl->device,
1011                         "Invalid MNAN value %u\n", ctrl->max_namespaces);
1012                 return -EINVAL;
1013         }
1014
1015         ctrl->anacap = id->anacap;
1016         ctrl->anatt = id->anatt;
1017         ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
1018         ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
1019
1020         ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
1021                 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
1022                 ctrl->max_namespaces * sizeof(__le32);
1023         if (ana_log_size > max_transfer_size) {
1024                 dev_err(ctrl->device,
1025                         "ANA log page size (%zd) larger than MDTS (%zd).\n",
1026                         ana_log_size, max_transfer_size);
1027                 dev_err(ctrl->device, "disabling ANA support.\n");
1028                 goto out_uninit;
1029         }
1030         if (ana_log_size > ctrl->ana_log_size) {
1031                 nvme_mpath_stop(ctrl);
1032                 nvme_mpath_uninit(ctrl);
1033                 ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
1034                 if (!ctrl->ana_log_buf)
1035                         return -ENOMEM;
1036         }
1037         ctrl->ana_log_size = ana_log_size;
1038         error = nvme_read_ana_log(ctrl);
1039         if (error)
1040                 goto out_uninit;
1041         return 0;
1042
1043 out_uninit:
1044         nvme_mpath_uninit(ctrl);
1045         return error;
1046 }
1047
1048 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1049 {
1050         kvfree(ctrl->ana_log_buf);
1051         ctrl->ana_log_buf = NULL;
1052         ctrl->ana_log_size = 0;
1053 }
This page took 0.093121 seconds and 4 git commands to generate.