]> Git Repo - linux.git/blob - drivers/nvme/host/multipath.c
crypto: akcipher - Drop sign/verify operations
[linux.git] / drivers / nvme / host / multipath.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2017-2018 Christoph Hellwig.
4  */
5
6 #include <linux/backing-dev.h>
7 #include <linux/moduleparam.h>
8 #include <linux/vmalloc.h>
9 #include <trace/events/block.h>
10 #include "nvme.h"
11
12 bool multipath = true;
13 module_param(multipath, bool, 0444);
14 MODULE_PARM_DESC(multipath,
15         "turn on native support for multiple controllers per subsystem");
16
17 static const char *nvme_iopolicy_names[] = {
18         [NVME_IOPOLICY_NUMA]    = "numa",
19         [NVME_IOPOLICY_RR]      = "round-robin",
20         [NVME_IOPOLICY_QD]      = "queue-depth",
21 };
22
23 static int iopolicy = NVME_IOPOLICY_NUMA;
24
25 static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
26 {
27         if (!val)
28                 return -EINVAL;
29         if (!strncmp(val, "numa", 4))
30                 iopolicy = NVME_IOPOLICY_NUMA;
31         else if (!strncmp(val, "round-robin", 11))
32                 iopolicy = NVME_IOPOLICY_RR;
33         else if (!strncmp(val, "queue-depth", 11))
34                 iopolicy = NVME_IOPOLICY_QD;
35         else
36                 return -EINVAL;
37
38         return 0;
39 }
40
41 static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
42 {
43         return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
44 }
45
46 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
47         &iopolicy, 0644);
48 MODULE_PARM_DESC(iopolicy,
49         "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
50
51 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
52 {
53         subsys->iopolicy = iopolicy;
54 }
55
56 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
57 {
58         struct nvme_ns_head *h;
59
60         lockdep_assert_held(&subsys->lock);
61         list_for_each_entry(h, &subsys->nsheads, entry)
62                 if (h->disk)
63                         blk_mq_unfreeze_queue(h->disk->queue);
64 }
65
66 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
67 {
68         struct nvme_ns_head *h;
69
70         lockdep_assert_held(&subsys->lock);
71         list_for_each_entry(h, &subsys->nsheads, entry)
72                 if (h->disk)
73                         blk_mq_freeze_queue_wait(h->disk->queue);
74 }
75
76 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
77 {
78         struct nvme_ns_head *h;
79
80         lockdep_assert_held(&subsys->lock);
81         list_for_each_entry(h, &subsys->nsheads, entry)
82                 if (h->disk)
83                         blk_freeze_queue_start(h->disk->queue);
84 }
85
86 void nvme_failover_req(struct request *req)
87 {
88         struct nvme_ns *ns = req->q->queuedata;
89         u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK;
90         unsigned long flags;
91         struct bio *bio;
92
93         nvme_mpath_clear_current_path(ns);
94
95         /*
96          * If we got back an ANA error, we know the controller is alive but not
97          * ready to serve this namespace.  Kick of a re-read of the ANA
98          * information page, and just try any other available path for now.
99          */
100         if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
101                 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
102                 queue_work(nvme_wq, &ns->ctrl->ana_work);
103         }
104
105         spin_lock_irqsave(&ns->head->requeue_lock, flags);
106         for (bio = req->bio; bio; bio = bio->bi_next) {
107                 bio_set_dev(bio, ns->head->disk->part0);
108                 if (bio->bi_opf & REQ_POLLED) {
109                         bio->bi_opf &= ~REQ_POLLED;
110                         bio->bi_cookie = BLK_QC_T_NONE;
111                 }
112                 /*
113                  * The alternate request queue that we may end up submitting
114                  * the bio to may be frozen temporarily, in this case REQ_NOWAIT
115                  * will fail the I/O immediately with EAGAIN to the issuer.
116                  * We are not in the issuer context which cannot block. Clear
117                  * the flag to avoid spurious EAGAIN I/O failures.
118                  */
119                 bio->bi_opf &= ~REQ_NOWAIT;
120         }
121         blk_steal_bios(&ns->head->requeue_list, req);
122         spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
123
124         nvme_req(req)->status = 0;
125         nvme_end_req(req);
126         kblockd_schedule_work(&ns->head->requeue_work);
127 }
128
129 void nvme_mpath_start_request(struct request *rq)
130 {
131         struct nvme_ns *ns = rq->q->queuedata;
132         struct gendisk *disk = ns->head->disk;
133
134         if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
135                 atomic_inc(&ns->ctrl->nr_active);
136                 nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
137         }
138
139         if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
140                 return;
141
142         nvme_req(rq)->flags |= NVME_MPATH_IO_STATS;
143         nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq),
144                                                       jiffies);
145 }
146 EXPORT_SYMBOL_GPL(nvme_mpath_start_request);
147
148 void nvme_mpath_end_request(struct request *rq)
149 {
150         struct nvme_ns *ns = rq->q->queuedata;
151
152         if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
153                 atomic_dec_if_positive(&ns->ctrl->nr_active);
154
155         if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
156                 return;
157         bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
158                          blk_rq_bytes(rq) >> SECTOR_SHIFT,
159                          nvme_req(rq)->start_time);
160 }
161
162 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
163 {
164         struct nvme_ns *ns;
165         int srcu_idx;
166
167         srcu_idx = srcu_read_lock(&ctrl->srcu);
168         list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
169                 if (!ns->head->disk)
170                         continue;
171                 kblockd_schedule_work(&ns->head->requeue_work);
172                 if (nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
173                         disk_uevent(ns->head->disk, KOBJ_CHANGE);
174         }
175         srcu_read_unlock(&ctrl->srcu, srcu_idx);
176 }
177
178 static const char *nvme_ana_state_names[] = {
179         [0]                             = "invalid state",
180         [NVME_ANA_OPTIMIZED]            = "optimized",
181         [NVME_ANA_NONOPTIMIZED]         = "non-optimized",
182         [NVME_ANA_INACCESSIBLE]         = "inaccessible",
183         [NVME_ANA_PERSISTENT_LOSS]      = "persistent-loss",
184         [NVME_ANA_CHANGE]               = "change",
185 };
186
187 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
188 {
189         struct nvme_ns_head *head = ns->head;
190         bool changed = false;
191         int node;
192
193         if (!head)
194                 goto out;
195
196         for_each_node(node) {
197                 if (ns == rcu_access_pointer(head->current_path[node])) {
198                         rcu_assign_pointer(head->current_path[node], NULL);
199                         changed = true;
200                 }
201         }
202 out:
203         return changed;
204 }
205
206 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
207 {
208         struct nvme_ns *ns;
209         int srcu_idx;
210
211         srcu_idx = srcu_read_lock(&ctrl->srcu);
212         list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
213                 nvme_mpath_clear_current_path(ns);
214                 kblockd_schedule_work(&ns->head->requeue_work);
215         }
216         srcu_read_unlock(&ctrl->srcu, srcu_idx);
217 }
218
219 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
220 {
221         struct nvme_ns_head *head = ns->head;
222         sector_t capacity = get_capacity(head->disk);
223         int node;
224         int srcu_idx;
225
226         srcu_idx = srcu_read_lock(&head->srcu);
227         list_for_each_entry_rcu(ns, &head->list, siblings) {
228                 if (capacity != get_capacity(ns->disk))
229                         clear_bit(NVME_NS_READY, &ns->flags);
230         }
231         srcu_read_unlock(&head->srcu, srcu_idx);
232
233         for_each_node(node)
234                 rcu_assign_pointer(head->current_path[node], NULL);
235         kblockd_schedule_work(&head->requeue_work);
236 }
237
238 static bool nvme_path_is_disabled(struct nvme_ns *ns)
239 {
240         enum nvme_ctrl_state state = nvme_ctrl_state(ns->ctrl);
241
242         /*
243          * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
244          * still be able to complete assuming that the controller is connected.
245          * Otherwise it will fail immediately and return to the requeue list.
246          */
247         if (state != NVME_CTRL_LIVE && state != NVME_CTRL_DELETING)
248                 return true;
249         if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
250             !test_bit(NVME_NS_READY, &ns->flags))
251                 return true;
252         return false;
253 }
254
255 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
256 {
257         int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
258         struct nvme_ns *found = NULL, *fallback = NULL, *ns;
259
260         list_for_each_entry_rcu(ns, &head->list, siblings) {
261                 if (nvme_path_is_disabled(ns))
262                         continue;
263
264                 if (ns->ctrl->numa_node != NUMA_NO_NODE &&
265                     READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
266                         distance = node_distance(node, ns->ctrl->numa_node);
267                 else
268                         distance = LOCAL_DISTANCE;
269
270                 switch (ns->ana_state) {
271                 case NVME_ANA_OPTIMIZED:
272                         if (distance < found_distance) {
273                                 found_distance = distance;
274                                 found = ns;
275                         }
276                         break;
277                 case NVME_ANA_NONOPTIMIZED:
278                         if (distance < fallback_distance) {
279                                 fallback_distance = distance;
280                                 fallback = ns;
281                         }
282                         break;
283                 default:
284                         break;
285                 }
286         }
287
288         if (!found)
289                 found = fallback;
290         if (found)
291                 rcu_assign_pointer(head->current_path[node], found);
292         return found;
293 }
294
295 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
296                 struct nvme_ns *ns)
297 {
298         ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
299                         siblings);
300         if (ns)
301                 return ns;
302         return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
303 }
304
305 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head)
306 {
307         struct nvme_ns *ns, *found = NULL;
308         int node = numa_node_id();
309         struct nvme_ns *old = srcu_dereference(head->current_path[node],
310                                                &head->srcu);
311
312         if (unlikely(!old))
313                 return __nvme_find_path(head, node);
314
315         if (list_is_singular(&head->list)) {
316                 if (nvme_path_is_disabled(old))
317                         return NULL;
318                 return old;
319         }
320
321         for (ns = nvme_next_ns(head, old);
322              ns && ns != old;
323              ns = nvme_next_ns(head, ns)) {
324                 if (nvme_path_is_disabled(ns))
325                         continue;
326
327                 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
328                         found = ns;
329                         goto out;
330                 }
331                 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
332                         found = ns;
333         }
334
335         /*
336          * The loop above skips the current path for round-robin semantics.
337          * Fall back to the current path if either:
338          *  - no other optimized path found and current is optimized,
339          *  - no other usable path found and current is usable.
340          */
341         if (!nvme_path_is_disabled(old) &&
342             (old->ana_state == NVME_ANA_OPTIMIZED ||
343              (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
344                 return old;
345
346         if (!found)
347                 return NULL;
348 out:
349         rcu_assign_pointer(head->current_path[node], found);
350         return found;
351 }
352
353 static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
354 {
355         struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
356         unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
357         unsigned int depth;
358
359         list_for_each_entry_rcu(ns, &head->list, siblings) {
360                 if (nvme_path_is_disabled(ns))
361                         continue;
362
363                 depth = atomic_read(&ns->ctrl->nr_active);
364
365                 switch (ns->ana_state) {
366                 case NVME_ANA_OPTIMIZED:
367                         if (depth < min_depth_opt) {
368                                 min_depth_opt = depth;
369                                 best_opt = ns;
370                         }
371                         break;
372                 case NVME_ANA_NONOPTIMIZED:
373                         if (depth < min_depth_nonopt) {
374                                 min_depth_nonopt = depth;
375                                 best_nonopt = ns;
376                         }
377                         break;
378                 default:
379                         break;
380                 }
381
382                 if (min_depth_opt == 0)
383                         return best_opt;
384         }
385
386         return best_opt ? best_opt : best_nonopt;
387 }
388
389 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
390 {
391         return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
392                 ns->ana_state == NVME_ANA_OPTIMIZED;
393 }
394
395 static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
396 {
397         int node = numa_node_id();
398         struct nvme_ns *ns;
399
400         ns = srcu_dereference(head->current_path[node], &head->srcu);
401         if (unlikely(!ns))
402                 return __nvme_find_path(head, node);
403         if (unlikely(!nvme_path_is_optimized(ns)))
404                 return __nvme_find_path(head, node);
405         return ns;
406 }
407
408 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
409 {
410         switch (READ_ONCE(head->subsys->iopolicy)) {
411         case NVME_IOPOLICY_QD:
412                 return nvme_queue_depth_path(head);
413         case NVME_IOPOLICY_RR:
414                 return nvme_round_robin_path(head);
415         default:
416                 return nvme_numa_path(head);
417         }
418 }
419
420 static bool nvme_available_path(struct nvme_ns_head *head)
421 {
422         struct nvme_ns *ns;
423
424         if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
425                 return NULL;
426
427         list_for_each_entry_rcu(ns, &head->list, siblings) {
428                 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
429                         continue;
430                 switch (nvme_ctrl_state(ns->ctrl)) {
431                 case NVME_CTRL_LIVE:
432                 case NVME_CTRL_RESETTING:
433                 case NVME_CTRL_CONNECTING:
434                         /* fallthru */
435                         return true;
436                 default:
437                         break;
438                 }
439         }
440         return false;
441 }
442
443 static void nvme_ns_head_submit_bio(struct bio *bio)
444 {
445         struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
446         struct device *dev = disk_to_dev(head->disk);
447         struct nvme_ns *ns;
448         int srcu_idx;
449
450         /*
451          * The namespace might be going away and the bio might be moved to a
452          * different queue via blk_steal_bios(), so we need to use the bio_split
453          * pool from the original queue to allocate the bvecs from.
454          */
455         bio = bio_split_to_limits(bio);
456         if (!bio)
457                 return;
458
459         srcu_idx = srcu_read_lock(&head->srcu);
460         ns = nvme_find_path(head);
461         if (likely(ns)) {
462                 bio_set_dev(bio, ns->disk->part0);
463                 bio->bi_opf |= REQ_NVME_MPATH;
464                 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
465                                       bio->bi_iter.bi_sector);
466                 submit_bio_noacct(bio);
467         } else if (nvme_available_path(head)) {
468                 dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
469
470                 spin_lock_irq(&head->requeue_lock);
471                 bio_list_add(&head->requeue_list, bio);
472                 spin_unlock_irq(&head->requeue_lock);
473         } else {
474                 dev_warn_ratelimited(dev, "no available path - failing I/O\n");
475
476                 bio_io_error(bio);
477         }
478
479         srcu_read_unlock(&head->srcu, srcu_idx);
480 }
481
482 static int nvme_ns_head_open(struct gendisk *disk, blk_mode_t mode)
483 {
484         if (!nvme_tryget_ns_head(disk->private_data))
485                 return -ENXIO;
486         return 0;
487 }
488
489 static void nvme_ns_head_release(struct gendisk *disk)
490 {
491         nvme_put_ns_head(disk->private_data);
492 }
493
494 static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16],
495                 enum blk_unique_id type)
496 {
497         struct nvme_ns_head *head = disk->private_data;
498         struct nvme_ns *ns;
499         int srcu_idx, ret = -EWOULDBLOCK;
500
501         srcu_idx = srcu_read_lock(&head->srcu);
502         ns = nvme_find_path(head);
503         if (ns)
504                 ret = nvme_ns_get_unique_id(ns, id, type);
505         srcu_read_unlock(&head->srcu, srcu_idx);
506         return ret;
507 }
508
509 #ifdef CONFIG_BLK_DEV_ZONED
510 static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector,
511                 unsigned int nr_zones, report_zones_cb cb, void *data)
512 {
513         struct nvme_ns_head *head = disk->private_data;
514         struct nvme_ns *ns;
515         int srcu_idx, ret = -EWOULDBLOCK;
516
517         srcu_idx = srcu_read_lock(&head->srcu);
518         ns = nvme_find_path(head);
519         if (ns)
520                 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
521         srcu_read_unlock(&head->srcu, srcu_idx);
522         return ret;
523 }
524 #else
525 #define nvme_ns_head_report_zones       NULL
526 #endif /* CONFIG_BLK_DEV_ZONED */
527
528 const struct block_device_operations nvme_ns_head_ops = {
529         .owner          = THIS_MODULE,
530         .submit_bio     = nvme_ns_head_submit_bio,
531         .open           = nvme_ns_head_open,
532         .release        = nvme_ns_head_release,
533         .ioctl          = nvme_ns_head_ioctl,
534         .compat_ioctl   = blkdev_compat_ptr_ioctl,
535         .getgeo         = nvme_getgeo,
536         .get_unique_id  = nvme_ns_head_get_unique_id,
537         .report_zones   = nvme_ns_head_report_zones,
538         .pr_ops         = &nvme_pr_ops,
539 };
540
541 static inline struct nvme_ns_head *cdev_to_ns_head(struct cdev *cdev)
542 {
543         return container_of(cdev, struct nvme_ns_head, cdev);
544 }
545
546 static int nvme_ns_head_chr_open(struct inode *inode, struct file *file)
547 {
548         if (!nvme_tryget_ns_head(cdev_to_ns_head(inode->i_cdev)))
549                 return -ENXIO;
550         return 0;
551 }
552
553 static int nvme_ns_head_chr_release(struct inode *inode, struct file *file)
554 {
555         nvme_put_ns_head(cdev_to_ns_head(inode->i_cdev));
556         return 0;
557 }
558
559 static const struct file_operations nvme_ns_head_chr_fops = {
560         .owner          = THIS_MODULE,
561         .open           = nvme_ns_head_chr_open,
562         .release        = nvme_ns_head_chr_release,
563         .unlocked_ioctl = nvme_ns_head_chr_ioctl,
564         .compat_ioctl   = compat_ptr_ioctl,
565         .uring_cmd      = nvme_ns_head_chr_uring_cmd,
566         .uring_cmd_iopoll = nvme_ns_chr_uring_cmd_iopoll,
567 };
568
569 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
570 {
571         int ret;
572
573         head->cdev_device.parent = &head->subsys->dev;
574         ret = dev_set_name(&head->cdev_device, "ng%dn%d",
575                            head->subsys->instance, head->instance);
576         if (ret)
577                 return ret;
578         ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
579                             &nvme_ns_head_chr_fops, THIS_MODULE);
580         return ret;
581 }
582
583 static void nvme_requeue_work(struct work_struct *work)
584 {
585         struct nvme_ns_head *head =
586                 container_of(work, struct nvme_ns_head, requeue_work);
587         struct bio *bio, *next;
588
589         spin_lock_irq(&head->requeue_lock);
590         next = bio_list_get(&head->requeue_list);
591         spin_unlock_irq(&head->requeue_lock);
592
593         while ((bio = next) != NULL) {
594                 next = bio->bi_next;
595                 bio->bi_next = NULL;
596
597                 submit_bio_noacct(bio);
598         }
599 }
600
601 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
602 {
603         struct queue_limits lim;
604
605         mutex_init(&head->lock);
606         bio_list_init(&head->requeue_list);
607         spin_lock_init(&head->requeue_lock);
608         INIT_WORK(&head->requeue_work, nvme_requeue_work);
609
610         /*
611          * Add a multipath node if the subsystems supports multiple controllers.
612          * We also do this for private namespaces as the namespace sharing flag
613          * could change after a rescan.
614          */
615         if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
616             !nvme_is_unique_nsid(ctrl, head) || !multipath)
617                 return 0;
618
619         blk_set_stacking_limits(&lim);
620         lim.dma_alignment = 3;
621         lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | BLK_FEAT_POLL;
622         if (head->ids.csi == NVME_CSI_ZNS)
623                 lim.features |= BLK_FEAT_ZONED;
624         else
625                 lim.max_zone_append_sectors = 0;
626
627         head->disk = blk_alloc_disk(&lim, ctrl->numa_node);
628         if (IS_ERR(head->disk))
629                 return PTR_ERR(head->disk);
630         head->disk->fops = &nvme_ns_head_ops;
631         head->disk->private_data = head;
632         sprintf(head->disk->disk_name, "nvme%dn%d",
633                         ctrl->subsys->instance, head->instance);
634         return 0;
635 }
636
637 static void nvme_mpath_set_live(struct nvme_ns *ns)
638 {
639         struct nvme_ns_head *head = ns->head;
640         int rc;
641
642         if (!head->disk)
643                 return;
644
645         /*
646          * test_and_set_bit() is used because it is protecting against two nvme
647          * paths simultaneously calling device_add_disk() on the same namespace
648          * head.
649          */
650         if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
651                 rc = device_add_disk(&head->subsys->dev, head->disk,
652                                      nvme_ns_attr_groups);
653                 if (rc) {
654                         clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags);
655                         return;
656                 }
657                 nvme_add_ns_head_cdev(head);
658         }
659
660         mutex_lock(&head->lock);
661         if (nvme_path_is_optimized(ns)) {
662                 int node, srcu_idx;
663
664                 srcu_idx = srcu_read_lock(&head->srcu);
665                 for_each_online_node(node)
666                         __nvme_find_path(head, node);
667                 srcu_read_unlock(&head->srcu, srcu_idx);
668         }
669         mutex_unlock(&head->lock);
670
671         synchronize_srcu(&head->srcu);
672         kblockd_schedule_work(&head->requeue_work);
673 }
674
675 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
676                 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
677                         void *))
678 {
679         void *base = ctrl->ana_log_buf;
680         size_t offset = sizeof(struct nvme_ana_rsp_hdr);
681         int error, i;
682
683         lockdep_assert_held(&ctrl->ana_lock);
684
685         for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
686                 struct nvme_ana_group_desc *desc = base + offset;
687                 u32 nr_nsids;
688                 size_t nsid_buf_size;
689
690                 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
691                         return -EINVAL;
692
693                 nr_nsids = le32_to_cpu(desc->nnsids);
694                 nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
695
696                 if (WARN_ON_ONCE(desc->grpid == 0))
697                         return -EINVAL;
698                 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
699                         return -EINVAL;
700                 if (WARN_ON_ONCE(desc->state == 0))
701                         return -EINVAL;
702                 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
703                         return -EINVAL;
704
705                 offset += sizeof(*desc);
706                 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
707                         return -EINVAL;
708
709                 error = cb(ctrl, desc, data);
710                 if (error)
711                         return error;
712
713                 offset += nsid_buf_size;
714         }
715
716         return 0;
717 }
718
719 static inline bool nvme_state_is_live(enum nvme_ana_state state)
720 {
721         return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
722 }
723
724 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
725                 struct nvme_ns *ns)
726 {
727         ns->ana_grpid = le32_to_cpu(desc->grpid);
728         ns->ana_state = desc->state;
729         clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
730         /*
731          * nvme_mpath_set_live() will trigger I/O to the multipath path device
732          * and in turn to this path device.  However we cannot accept this I/O
733          * if the controller is not live.  This may deadlock if called from
734          * nvme_mpath_init_identify() and the ctrl will never complete
735          * initialization, preventing I/O from completing.  For this case we
736          * will reprocess the ANA log page in nvme_mpath_update() once the
737          * controller is ready.
738          */
739         if (nvme_state_is_live(ns->ana_state) &&
740             nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE)
741                 nvme_mpath_set_live(ns);
742 }
743
744 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
745                 struct nvme_ana_group_desc *desc, void *data)
746 {
747         u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
748         unsigned *nr_change_groups = data;
749         struct nvme_ns *ns;
750         int srcu_idx;
751
752         dev_dbg(ctrl->device, "ANA group %d: %s.\n",
753                         le32_to_cpu(desc->grpid),
754                         nvme_ana_state_names[desc->state]);
755
756         if (desc->state == NVME_ANA_CHANGE)
757                 (*nr_change_groups)++;
758
759         if (!nr_nsids)
760                 return 0;
761
762         srcu_idx = srcu_read_lock(&ctrl->srcu);
763         list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
764                 unsigned nsid;
765 again:
766                 nsid = le32_to_cpu(desc->nsids[n]);
767                 if (ns->head->ns_id < nsid)
768                         continue;
769                 if (ns->head->ns_id == nsid)
770                         nvme_update_ns_ana_state(desc, ns);
771                 if (++n == nr_nsids)
772                         break;
773                 if (ns->head->ns_id > nsid)
774                         goto again;
775         }
776         srcu_read_unlock(&ctrl->srcu, srcu_idx);
777         return 0;
778 }
779
780 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
781 {
782         u32 nr_change_groups = 0;
783         int error;
784
785         mutex_lock(&ctrl->ana_lock);
786         error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
787                         ctrl->ana_log_buf, ctrl->ana_log_size, 0);
788         if (error) {
789                 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
790                 goto out_unlock;
791         }
792
793         error = nvme_parse_ana_log(ctrl, &nr_change_groups,
794                         nvme_update_ana_state);
795         if (error)
796                 goto out_unlock;
797
798         /*
799          * In theory we should have an ANATT timer per group as they might enter
800          * the change state at different times.  But that is a lot of overhead
801          * just to protect against a target that keeps entering new changes
802          * states while never finishing previous ones.  But we'll still
803          * eventually time out once all groups are in change state, so this
804          * isn't a big deal.
805          *
806          * We also double the ANATT value to provide some slack for transports
807          * or AEN processing overhead.
808          */
809         if (nr_change_groups)
810                 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
811         else
812                 del_timer_sync(&ctrl->anatt_timer);
813 out_unlock:
814         mutex_unlock(&ctrl->ana_lock);
815         return error;
816 }
817
818 static void nvme_ana_work(struct work_struct *work)
819 {
820         struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
821
822         if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
823                 return;
824
825         nvme_read_ana_log(ctrl);
826 }
827
828 void nvme_mpath_update(struct nvme_ctrl *ctrl)
829 {
830         u32 nr_change_groups = 0;
831
832         if (!ctrl->ana_log_buf)
833                 return;
834
835         mutex_lock(&ctrl->ana_lock);
836         nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
837         mutex_unlock(&ctrl->ana_lock);
838 }
839
840 static void nvme_anatt_timeout(struct timer_list *t)
841 {
842         struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
843
844         dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
845         nvme_reset_ctrl(ctrl);
846 }
847
848 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
849 {
850         if (!nvme_ctrl_use_ana(ctrl))
851                 return;
852         del_timer_sync(&ctrl->anatt_timer);
853         cancel_work_sync(&ctrl->ana_work);
854 }
855
856 #define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
857         struct device_attribute subsys_attr_##_name =   \
858                 __ATTR(_name, _mode, _show, _store)
859
860 static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
861                 struct device_attribute *attr, char *buf)
862 {
863         struct nvme_subsystem *subsys =
864                 container_of(dev, struct nvme_subsystem, dev);
865
866         return sysfs_emit(buf, "%s\n",
867                           nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
868 }
869
870 static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys,
871                 int iopolicy)
872 {
873         struct nvme_ctrl *ctrl;
874         int old_iopolicy = READ_ONCE(subsys->iopolicy);
875
876         if (old_iopolicy == iopolicy)
877                 return;
878
879         WRITE_ONCE(subsys->iopolicy, iopolicy);
880
881         /* iopolicy changes clear the mpath by design */
882         mutex_lock(&nvme_subsystems_lock);
883         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
884                 nvme_mpath_clear_ctrl_paths(ctrl);
885         mutex_unlock(&nvme_subsystems_lock);
886
887         pr_notice("subsysnqn %s iopolicy changed from %s to %s\n",
888                         subsys->subnqn,
889                         nvme_iopolicy_names[old_iopolicy],
890                         nvme_iopolicy_names[iopolicy]);
891 }
892
893 static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
894                 struct device_attribute *attr, const char *buf, size_t count)
895 {
896         struct nvme_subsystem *subsys =
897                 container_of(dev, struct nvme_subsystem, dev);
898         int i;
899
900         for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
901                 if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
902                         nvme_subsys_iopolicy_update(subsys, i);
903                         return count;
904                 }
905         }
906
907         return -EINVAL;
908 }
909 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
910                       nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
911
912 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
913                 char *buf)
914 {
915         return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
916 }
917 DEVICE_ATTR_RO(ana_grpid);
918
919 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
920                 char *buf)
921 {
922         struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
923
924         return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
925 }
926 DEVICE_ATTR_RO(ana_state);
927
928 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
929                 struct nvme_ana_group_desc *desc, void *data)
930 {
931         struct nvme_ana_group_desc *dst = data;
932
933         if (desc->grpid != dst->grpid)
934                 return 0;
935
936         *dst = *desc;
937         return -ENXIO; /* just break out of the loop */
938 }
939
940 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
941 {
942         if (nvme_ctrl_use_ana(ns->ctrl)) {
943                 struct nvme_ana_group_desc desc = {
944                         .grpid = anagrpid,
945                         .state = 0,
946                 };
947
948                 mutex_lock(&ns->ctrl->ana_lock);
949                 ns->ana_grpid = le32_to_cpu(anagrpid);
950                 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
951                 mutex_unlock(&ns->ctrl->ana_lock);
952                 if (desc.state) {
953                         /* found the group desc: update */
954                         nvme_update_ns_ana_state(&desc, ns);
955                 } else {
956                         /* group desc not found: trigger a re-read */
957                         set_bit(NVME_NS_ANA_PENDING, &ns->flags);
958                         queue_work(nvme_wq, &ns->ctrl->ana_work);
959                 }
960         } else {
961                 ns->ana_state = NVME_ANA_OPTIMIZED;
962                 nvme_mpath_set_live(ns);
963         }
964
965 #ifdef CONFIG_BLK_DEV_ZONED
966         if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
967                 ns->head->disk->nr_zones = ns->disk->nr_zones;
968 #endif
969 }
970
971 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
972 {
973         if (!head->disk)
974                 return;
975         if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
976                 nvme_cdev_del(&head->cdev, &head->cdev_device);
977                 del_gendisk(head->disk);
978         }
979         /*
980          * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared
981          * to allow multipath to fail all I/O.
982          */
983         synchronize_srcu(&head->srcu);
984         kblockd_schedule_work(&head->requeue_work);
985 }
986
987 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
988 {
989         if (!head->disk)
990                 return;
991         /* make sure all pending bios are cleaned up */
992         kblockd_schedule_work(&head->requeue_work);
993         flush_work(&head->requeue_work);
994         put_disk(head->disk);
995 }
996
997 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
998 {
999         mutex_init(&ctrl->ana_lock);
1000         timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
1001         INIT_WORK(&ctrl->ana_work, nvme_ana_work);
1002 }
1003
1004 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
1005 {
1006         size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
1007         size_t ana_log_size;
1008         int error = 0;
1009
1010         /* check if multipath is enabled and we have the capability */
1011         if (!multipath || !ctrl->subsys ||
1012             !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
1013                 return 0;
1014
1015         /* initialize this in the identify path to cover controller resets */
1016         atomic_set(&ctrl->nr_active, 0);
1017
1018         if (!ctrl->max_namespaces ||
1019             ctrl->max_namespaces > le32_to_cpu(id->nn)) {
1020                 dev_err(ctrl->device,
1021                         "Invalid MNAN value %u\n", ctrl->max_namespaces);
1022                 return -EINVAL;
1023         }
1024
1025         ctrl->anacap = id->anacap;
1026         ctrl->anatt = id->anatt;
1027         ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
1028         ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
1029
1030         ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
1031                 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
1032                 ctrl->max_namespaces * sizeof(__le32);
1033         if (ana_log_size > max_transfer_size) {
1034                 dev_err(ctrl->device,
1035                         "ANA log page size (%zd) larger than MDTS (%zd).\n",
1036                         ana_log_size, max_transfer_size);
1037                 dev_err(ctrl->device, "disabling ANA support.\n");
1038                 goto out_uninit;
1039         }
1040         if (ana_log_size > ctrl->ana_log_size) {
1041                 nvme_mpath_stop(ctrl);
1042                 nvme_mpath_uninit(ctrl);
1043                 ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
1044                 if (!ctrl->ana_log_buf)
1045                         return -ENOMEM;
1046         }
1047         ctrl->ana_log_size = ana_log_size;
1048         error = nvme_read_ana_log(ctrl);
1049         if (error)
1050                 goto out_uninit;
1051         return 0;
1052
1053 out_uninit:
1054         nvme_mpath_uninit(ctrl);
1055         return error;
1056 }
1057
1058 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
1059 {
1060         kvfree(ctrl->ana_log_buf);
1061         ctrl->ana_log_buf = NULL;
1062         ctrl->ana_log_size = 0;
1063 }
This page took 0.093691 seconds and 4 git commands to generate.