]> Git Repo - linux.git/blob - drivers/nvme/host/rdma.c
86a2891d9bcc7a990cd214a7fe93fa5c55b292c7
[linux.git] / drivers / nvme / host / rdma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics RDMA host code.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <rdma/mr_pool.h>
11 #include <linux/err.h>
12 #include <linux/string.h>
13 #include <linux/atomic.h>
14 #include <linux/blk-mq.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/mutex.h>
19 #include <linux/scatterlist.h>
20 #include <linux/nvme.h>
21 #include <linux/unaligned.h>
22
23 #include <rdma/ib_verbs.h>
24 #include <rdma/rdma_cm.h>
25 #include <linux/nvme-rdma.h>
26
27 #include "nvme.h"
28 #include "fabrics.h"
29
30
31 #define NVME_RDMA_CM_TIMEOUT_MS         3000            /* 3 second */
32
33 #define NVME_RDMA_MAX_SEGMENTS          256
34
35 #define NVME_RDMA_MAX_INLINE_SEGMENTS   4
36
37 #define NVME_RDMA_DATA_SGL_SIZE \
38         (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
39 #define NVME_RDMA_METADATA_SGL_SIZE \
40         (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
41
42 struct nvme_rdma_device {
43         struct ib_device        *dev;
44         struct ib_pd            *pd;
45         struct kref             ref;
46         struct list_head        entry;
47         unsigned int            num_inline_segments;
48 };
49
50 struct nvme_rdma_qe {
51         struct ib_cqe           cqe;
52         void                    *data;
53         u64                     dma;
54 };
55
56 struct nvme_rdma_sgl {
57         int                     nents;
58         struct sg_table         sg_table;
59 };
60
61 struct nvme_rdma_queue;
62 struct nvme_rdma_request {
63         struct nvme_request     req;
64         struct ib_mr            *mr;
65         struct nvme_rdma_qe     sqe;
66         union nvme_result       result;
67         __le16                  status;
68         refcount_t              ref;
69         struct ib_sge           sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
70         u32                     num_sge;
71         struct ib_reg_wr        reg_wr;
72         struct ib_cqe           reg_cqe;
73         struct nvme_rdma_queue  *queue;
74         struct nvme_rdma_sgl    data_sgl;
75         struct nvme_rdma_sgl    *metadata_sgl;
76         bool                    use_sig_mr;
77 };
78
79 enum nvme_rdma_queue_flags {
80         NVME_RDMA_Q_ALLOCATED           = 0,
81         NVME_RDMA_Q_LIVE                = 1,
82         NVME_RDMA_Q_TR_READY            = 2,
83 };
84
85 struct nvme_rdma_queue {
86         struct nvme_rdma_qe     *rsp_ring;
87         int                     queue_size;
88         size_t                  cmnd_capsule_len;
89         struct nvme_rdma_ctrl   *ctrl;
90         struct nvme_rdma_device *device;
91         struct ib_cq            *ib_cq;
92         struct ib_qp            *qp;
93
94         unsigned long           flags;
95         struct rdma_cm_id       *cm_id;
96         int                     cm_error;
97         struct completion       cm_done;
98         bool                    pi_support;
99         int                     cq_size;
100         struct mutex            queue_lock;
101 };
102
103 struct nvme_rdma_ctrl {
104         /* read only in the hot path */
105         struct nvme_rdma_queue  *queues;
106
107         /* other member variables */
108         struct blk_mq_tag_set   tag_set;
109         struct work_struct      err_work;
110
111         struct nvme_rdma_qe     async_event_sqe;
112
113         struct delayed_work     reconnect_work;
114
115         struct list_head        list;
116
117         struct blk_mq_tag_set   admin_tag_set;
118         struct nvme_rdma_device *device;
119
120         u32                     max_fr_pages;
121
122         struct sockaddr_storage addr;
123         struct sockaddr_storage src_addr;
124
125         struct nvme_ctrl        ctrl;
126         bool                    use_inline_data;
127         u32                     io_queues[HCTX_MAX_TYPES];
128 };
129
130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
131 {
132         return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
133 }
134
135 static LIST_HEAD(device_list);
136 static DEFINE_MUTEX(device_list_mutex);
137
138 static LIST_HEAD(nvme_rdma_ctrl_list);
139 static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
140
141 /*
142  * Disabling this option makes small I/O goes faster, but is fundamentally
143  * unsafe.  With it turned off we will have to register a global rkey that
144  * allows read and write access to all physical memory.
145  */
146 static bool register_always = true;
147 module_param(register_always, bool, 0444);
148 MODULE_PARM_DESC(register_always,
149          "Use memory registration even for contiguous memory regions");
150
151 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
152                 struct rdma_cm_event *event);
153 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
154 static void nvme_rdma_complete_rq(struct request *rq);
155
156 static const struct blk_mq_ops nvme_rdma_mq_ops;
157 static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
158
159 static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
160 {
161         return queue - queue->ctrl->queues;
162 }
163
164 static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
165 {
166         return nvme_rdma_queue_idx(queue) >
167                 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
168                 queue->ctrl->io_queues[HCTX_TYPE_READ];
169 }
170
171 static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
172 {
173         return queue->cmnd_capsule_len - sizeof(struct nvme_command);
174 }
175
176 static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
177                 size_t capsule_size, enum dma_data_direction dir)
178 {
179         ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
180         kfree(qe->data);
181 }
182
183 static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
184                 size_t capsule_size, enum dma_data_direction dir)
185 {
186         qe->data = kzalloc(capsule_size, GFP_KERNEL);
187         if (!qe->data)
188                 return -ENOMEM;
189
190         qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
191         if (ib_dma_mapping_error(ibdev, qe->dma)) {
192                 kfree(qe->data);
193                 qe->data = NULL;
194                 return -ENOMEM;
195         }
196
197         return 0;
198 }
199
200 static void nvme_rdma_free_ring(struct ib_device *ibdev,
201                 struct nvme_rdma_qe *ring, size_t ib_queue_size,
202                 size_t capsule_size, enum dma_data_direction dir)
203 {
204         int i;
205
206         for (i = 0; i < ib_queue_size; i++)
207                 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
208         kfree(ring);
209 }
210
211 static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
212                 size_t ib_queue_size, size_t capsule_size,
213                 enum dma_data_direction dir)
214 {
215         struct nvme_rdma_qe *ring;
216         int i;
217
218         ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
219         if (!ring)
220                 return NULL;
221
222         /*
223          * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
224          * lifetime. It's safe, since any chage in the underlying RDMA device
225          * will issue error recovery and queue re-creation.
226          */
227         for (i = 0; i < ib_queue_size; i++) {
228                 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
229                         goto out_free_ring;
230         }
231
232         return ring;
233
234 out_free_ring:
235         nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
236         return NULL;
237 }
238
239 static void nvme_rdma_qp_event(struct ib_event *event, void *context)
240 {
241         pr_debug("QP event %s (%d)\n",
242                  ib_event_msg(event->event), event->event);
243
244 }
245
246 static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
247 {
248         int ret;
249
250         ret = wait_for_completion_interruptible(&queue->cm_done);
251         if (ret)
252                 return ret;
253         WARN_ON_ONCE(queue->cm_error > 0);
254         return queue->cm_error;
255 }
256
257 static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
258 {
259         struct nvme_rdma_device *dev = queue->device;
260         struct ib_qp_init_attr init_attr;
261         int ret;
262
263         memset(&init_attr, 0, sizeof(init_attr));
264         init_attr.event_handler = nvme_rdma_qp_event;
265         /* +1 for drain */
266         init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
267         /* +1 for drain */
268         init_attr.cap.max_recv_wr = queue->queue_size + 1;
269         init_attr.cap.max_recv_sge = 1;
270         init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
271         init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
272         init_attr.qp_type = IB_QPT_RC;
273         init_attr.send_cq = queue->ib_cq;
274         init_attr.recv_cq = queue->ib_cq;
275         if (queue->pi_support)
276                 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
277         init_attr.qp_context = queue;
278
279         ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
280
281         queue->qp = queue->cm_id->qp;
282         return ret;
283 }
284
285 static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
286                 struct request *rq, unsigned int hctx_idx)
287 {
288         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
289
290         kfree(req->sqe.data);
291 }
292
293 static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
294                 struct request *rq, unsigned int hctx_idx,
295                 unsigned int numa_node)
296 {
297         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
298         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
299         int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
300         struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
301
302         nvme_req(rq)->ctrl = &ctrl->ctrl;
303         req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
304         if (!req->sqe.data)
305                 return -ENOMEM;
306
307         /* metadata nvme_rdma_sgl struct is located after command's data SGL */
308         if (queue->pi_support)
309                 req->metadata_sgl = (void *)nvme_req(rq) +
310                         sizeof(struct nvme_rdma_request) +
311                         NVME_RDMA_DATA_SGL_SIZE;
312
313         req->queue = queue;
314         nvme_req(rq)->cmd = req->sqe.data;
315
316         return 0;
317 }
318
319 static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
320                 unsigned int hctx_idx)
321 {
322         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
323         struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
324
325         BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
326
327         hctx->driver_data = queue;
328         return 0;
329 }
330
331 static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
332                 unsigned int hctx_idx)
333 {
334         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
335         struct nvme_rdma_queue *queue = &ctrl->queues[0];
336
337         BUG_ON(hctx_idx != 0);
338
339         hctx->driver_data = queue;
340         return 0;
341 }
342
343 static void nvme_rdma_free_dev(struct kref *ref)
344 {
345         struct nvme_rdma_device *ndev =
346                 container_of(ref, struct nvme_rdma_device, ref);
347
348         mutex_lock(&device_list_mutex);
349         list_del(&ndev->entry);
350         mutex_unlock(&device_list_mutex);
351
352         ib_dealloc_pd(ndev->pd);
353         kfree(ndev);
354 }
355
356 static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
357 {
358         kref_put(&dev->ref, nvme_rdma_free_dev);
359 }
360
361 static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
362 {
363         return kref_get_unless_zero(&dev->ref);
364 }
365
366 static struct nvme_rdma_device *
367 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
368 {
369         struct nvme_rdma_device *ndev;
370
371         mutex_lock(&device_list_mutex);
372         list_for_each_entry(ndev, &device_list, entry) {
373                 if (ndev->dev->node_guid == cm_id->device->node_guid &&
374                     nvme_rdma_dev_get(ndev))
375                         goto out_unlock;
376         }
377
378         ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
379         if (!ndev)
380                 goto out_err;
381
382         ndev->dev = cm_id->device;
383         kref_init(&ndev->ref);
384
385         ndev->pd = ib_alloc_pd(ndev->dev,
386                 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
387         if (IS_ERR(ndev->pd))
388                 goto out_free_dev;
389
390         if (!(ndev->dev->attrs.device_cap_flags &
391               IB_DEVICE_MEM_MGT_EXTENSIONS)) {
392                 dev_err(&ndev->dev->dev,
393                         "Memory registrations not supported.\n");
394                 goto out_free_pd;
395         }
396
397         ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
398                                         ndev->dev->attrs.max_send_sge - 1);
399         list_add(&ndev->entry, &device_list);
400 out_unlock:
401         mutex_unlock(&device_list_mutex);
402         return ndev;
403
404 out_free_pd:
405         ib_dealloc_pd(ndev->pd);
406 out_free_dev:
407         kfree(ndev);
408 out_err:
409         mutex_unlock(&device_list_mutex);
410         return NULL;
411 }
412
413 static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
414 {
415         if (nvme_rdma_poll_queue(queue))
416                 ib_free_cq(queue->ib_cq);
417         else
418                 ib_cq_pool_put(queue->ib_cq, queue->cq_size);
419 }
420
421 static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
422 {
423         struct nvme_rdma_device *dev;
424         struct ib_device *ibdev;
425
426         if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
427                 return;
428
429         dev = queue->device;
430         ibdev = dev->dev;
431
432         if (queue->pi_support)
433                 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
434         ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
435
436         /*
437          * The cm_id object might have been destroyed during RDMA connection
438          * establishment error flow to avoid getting other cma events, thus
439          * the destruction of the QP shouldn't use rdma_cm API.
440          */
441         ib_destroy_qp(queue->qp);
442         nvme_rdma_free_cq(queue);
443
444         nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
445                         sizeof(struct nvme_completion), DMA_FROM_DEVICE);
446
447         nvme_rdma_dev_put(dev);
448 }
449
450 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
451 {
452         u32 max_page_list_len;
453
454         if (pi_support)
455                 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
456         else
457                 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
458
459         return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
460 }
461
462 static int nvme_rdma_create_cq(struct ib_device *ibdev,
463                 struct nvme_rdma_queue *queue)
464 {
465         int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
466
467         /*
468          * Spread I/O queues completion vectors according their queue index.
469          * Admin queues can always go on completion vector 0.
470          */
471         comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
472
473         /* Polling queues need direct cq polling context */
474         if (nvme_rdma_poll_queue(queue))
475                 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
476                                            comp_vector, IB_POLL_DIRECT);
477         else
478                 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
479                                               comp_vector, IB_POLL_SOFTIRQ);
480
481         if (IS_ERR(queue->ib_cq)) {
482                 ret = PTR_ERR(queue->ib_cq);
483                 return ret;
484         }
485
486         return 0;
487 }
488
489 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
490 {
491         struct ib_device *ibdev;
492         const int send_wr_factor = 3;                   /* MR, SEND, INV */
493         const int cq_factor = send_wr_factor + 1;       /* + RECV */
494         int ret, pages_per_mr;
495
496         queue->device = nvme_rdma_find_get_device(queue->cm_id);
497         if (!queue->device) {
498                 dev_err(queue->cm_id->device->dev.parent,
499                         "no client data found!\n");
500                 return -ECONNREFUSED;
501         }
502         ibdev = queue->device->dev;
503
504         /* +1 for ib_drain_qp */
505         queue->cq_size = cq_factor * queue->queue_size + 1;
506
507         ret = nvme_rdma_create_cq(ibdev, queue);
508         if (ret)
509                 goto out_put_dev;
510
511         ret = nvme_rdma_create_qp(queue, send_wr_factor);
512         if (ret)
513                 goto out_destroy_ib_cq;
514
515         queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
516                         sizeof(struct nvme_completion), DMA_FROM_DEVICE);
517         if (!queue->rsp_ring) {
518                 ret = -ENOMEM;
519                 goto out_destroy_qp;
520         }
521
522         /*
523          * Currently we don't use SG_GAPS MR's so if the first entry is
524          * misaligned we'll end up using two entries for a single data page,
525          * so one additional entry is required.
526          */
527         pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
528         ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
529                               queue->queue_size,
530                               IB_MR_TYPE_MEM_REG,
531                               pages_per_mr, 0);
532         if (ret) {
533                 dev_err(queue->ctrl->ctrl.device,
534                         "failed to initialize MR pool sized %d for QID %d\n",
535                         queue->queue_size, nvme_rdma_queue_idx(queue));
536                 goto out_destroy_ring;
537         }
538
539         if (queue->pi_support) {
540                 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
541                                       queue->queue_size, IB_MR_TYPE_INTEGRITY,
542                                       pages_per_mr, pages_per_mr);
543                 if (ret) {
544                         dev_err(queue->ctrl->ctrl.device,
545                                 "failed to initialize PI MR pool sized %d for QID %d\n",
546                                 queue->queue_size, nvme_rdma_queue_idx(queue));
547                         goto out_destroy_mr_pool;
548                 }
549         }
550
551         set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
552
553         return 0;
554
555 out_destroy_mr_pool:
556         ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
557 out_destroy_ring:
558         nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
559                             sizeof(struct nvme_completion), DMA_FROM_DEVICE);
560 out_destroy_qp:
561         rdma_destroy_qp(queue->cm_id);
562 out_destroy_ib_cq:
563         nvme_rdma_free_cq(queue);
564 out_put_dev:
565         nvme_rdma_dev_put(queue->device);
566         return ret;
567 }
568
569 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
570                 int idx, size_t queue_size)
571 {
572         struct nvme_rdma_queue *queue;
573         struct sockaddr *src_addr = NULL;
574         int ret;
575
576         queue = &ctrl->queues[idx];
577         mutex_init(&queue->queue_lock);
578         queue->ctrl = ctrl;
579         if (idx && ctrl->ctrl.max_integrity_segments)
580                 queue->pi_support = true;
581         else
582                 queue->pi_support = false;
583         init_completion(&queue->cm_done);
584
585         if (idx > 0)
586                 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
587         else
588                 queue->cmnd_capsule_len = sizeof(struct nvme_command);
589
590         queue->queue_size = queue_size;
591
592         queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
593                         RDMA_PS_TCP, IB_QPT_RC);
594         if (IS_ERR(queue->cm_id)) {
595                 dev_info(ctrl->ctrl.device,
596                         "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
597                 ret = PTR_ERR(queue->cm_id);
598                 goto out_destroy_mutex;
599         }
600
601         if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
602                 src_addr = (struct sockaddr *)&ctrl->src_addr;
603
604         queue->cm_error = -ETIMEDOUT;
605         ret = rdma_resolve_addr(queue->cm_id, src_addr,
606                         (struct sockaddr *)&ctrl->addr,
607                         NVME_RDMA_CM_TIMEOUT_MS);
608         if (ret) {
609                 dev_info(ctrl->ctrl.device,
610                         "rdma_resolve_addr failed (%d).\n", ret);
611                 goto out_destroy_cm_id;
612         }
613
614         ret = nvme_rdma_wait_for_cm(queue);
615         if (ret) {
616                 dev_info(ctrl->ctrl.device,
617                         "rdma connection establishment failed (%d)\n", ret);
618                 goto out_destroy_cm_id;
619         }
620
621         set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
622
623         return 0;
624
625 out_destroy_cm_id:
626         rdma_destroy_id(queue->cm_id);
627         nvme_rdma_destroy_queue_ib(queue);
628 out_destroy_mutex:
629         mutex_destroy(&queue->queue_lock);
630         return ret;
631 }
632
633 static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
634 {
635         rdma_disconnect(queue->cm_id);
636         ib_drain_qp(queue->qp);
637 }
638
639 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
640 {
641         if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
642                 return;
643
644         mutex_lock(&queue->queue_lock);
645         if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
646                 __nvme_rdma_stop_queue(queue);
647         mutex_unlock(&queue->queue_lock);
648 }
649
650 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
651 {
652         if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
653                 return;
654
655         rdma_destroy_id(queue->cm_id);
656         nvme_rdma_destroy_queue_ib(queue);
657         mutex_destroy(&queue->queue_lock);
658 }
659
660 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
661 {
662         int i;
663
664         for (i = 1; i < ctrl->ctrl.queue_count; i++)
665                 nvme_rdma_free_queue(&ctrl->queues[i]);
666 }
667
668 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
669 {
670         int i;
671
672         for (i = 1; i < ctrl->ctrl.queue_count; i++)
673                 nvme_rdma_stop_queue(&ctrl->queues[i]);
674 }
675
676 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
677 {
678         struct nvme_rdma_queue *queue = &ctrl->queues[idx];
679         int ret;
680
681         if (idx)
682                 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
683         else
684                 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
685
686         if (!ret) {
687                 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
688         } else {
689                 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
690                         __nvme_rdma_stop_queue(queue);
691                 dev_info(ctrl->ctrl.device,
692                         "failed to connect queue: %d ret=%d\n", idx, ret);
693         }
694         return ret;
695 }
696
697 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
698                                      int first, int last)
699 {
700         int i, ret = 0;
701
702         for (i = first; i < last; i++) {
703                 ret = nvme_rdma_start_queue(ctrl, i);
704                 if (ret)
705                         goto out_stop_queues;
706         }
707
708         return 0;
709
710 out_stop_queues:
711         for (i--; i >= first; i--)
712                 nvme_rdma_stop_queue(&ctrl->queues[i]);
713         return ret;
714 }
715
716 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
717 {
718         struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
719         unsigned int nr_io_queues;
720         int i, ret;
721
722         nr_io_queues = nvmf_nr_io_queues(opts);
723         ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
724         if (ret)
725                 return ret;
726
727         if (nr_io_queues == 0) {
728                 dev_err(ctrl->ctrl.device,
729                         "unable to set any I/O queues\n");
730                 return -ENOMEM;
731         }
732
733         ctrl->ctrl.queue_count = nr_io_queues + 1;
734         dev_info(ctrl->ctrl.device,
735                 "creating %d I/O queues.\n", nr_io_queues);
736
737         nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
738         for (i = 1; i < ctrl->ctrl.queue_count; i++) {
739                 ret = nvme_rdma_alloc_queue(ctrl, i,
740                                 ctrl->ctrl.sqsize + 1);
741                 if (ret)
742                         goto out_free_queues;
743         }
744
745         return 0;
746
747 out_free_queues:
748         for (i--; i >= 1; i--)
749                 nvme_rdma_free_queue(&ctrl->queues[i]);
750
751         return ret;
752 }
753
754 static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
755 {
756         unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
757                                 NVME_RDMA_DATA_SGL_SIZE;
758
759         if (ctrl->max_integrity_segments)
760                 cmd_size += sizeof(struct nvme_rdma_sgl) +
761                             NVME_RDMA_METADATA_SGL_SIZE;
762
763         return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
764                         &nvme_rdma_mq_ops,
765                         ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
766                         cmd_size);
767 }
768
769 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
770 {
771         if (ctrl->async_event_sqe.data) {
772                 cancel_work_sync(&ctrl->ctrl.async_event_work);
773                 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
774                                 sizeof(struct nvme_command), DMA_TO_DEVICE);
775                 ctrl->async_event_sqe.data = NULL;
776         }
777         nvme_rdma_free_queue(&ctrl->queues[0]);
778 }
779
780 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
781                 bool new)
782 {
783         bool pi_capable = false;
784         int error;
785
786         error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
787         if (error)
788                 return error;
789
790         ctrl->device = ctrl->queues[0].device;
791         ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
792
793         /* T10-PI support */
794         if (ctrl->device->dev->attrs.kernel_cap_flags &
795             IBK_INTEGRITY_HANDOVER)
796                 pi_capable = true;
797
798         ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
799                                                         pi_capable);
800
801         /*
802          * Bind the async event SQE DMA mapping to the admin queue lifetime.
803          * It's safe, since any chage in the underlying RDMA device will issue
804          * error recovery and queue re-creation.
805          */
806         error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
807                         sizeof(struct nvme_command), DMA_TO_DEVICE);
808         if (error)
809                 goto out_free_queue;
810
811         if (new) {
812                 error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
813                                 &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
814                                 sizeof(struct nvme_rdma_request) +
815                                 NVME_RDMA_DATA_SGL_SIZE);
816                 if (error)
817                         goto out_free_async_qe;
818
819         }
820
821         error = nvme_rdma_start_queue(ctrl, 0);
822         if (error)
823                 goto out_remove_admin_tag_set;
824
825         error = nvme_enable_ctrl(&ctrl->ctrl);
826         if (error)
827                 goto out_stop_queue;
828
829         ctrl->ctrl.max_segments = ctrl->max_fr_pages;
830         ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
831         if (pi_capable)
832                 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
833         else
834                 ctrl->ctrl.max_integrity_segments = 0;
835
836         nvme_unquiesce_admin_queue(&ctrl->ctrl);
837
838         error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
839         if (error)
840                 goto out_quiesce_queue;
841
842         return 0;
843
844 out_quiesce_queue:
845         nvme_quiesce_admin_queue(&ctrl->ctrl);
846         blk_sync_queue(ctrl->ctrl.admin_q);
847 out_stop_queue:
848         nvme_rdma_stop_queue(&ctrl->queues[0]);
849         nvme_cancel_admin_tagset(&ctrl->ctrl);
850 out_remove_admin_tag_set:
851         if (new)
852                 nvme_remove_admin_tag_set(&ctrl->ctrl);
853 out_free_async_qe:
854         if (ctrl->async_event_sqe.data) {
855                 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
856                         sizeof(struct nvme_command), DMA_TO_DEVICE);
857                 ctrl->async_event_sqe.data = NULL;
858         }
859 out_free_queue:
860         nvme_rdma_free_queue(&ctrl->queues[0]);
861         return error;
862 }
863
864 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
865 {
866         int ret, nr_queues;
867
868         ret = nvme_rdma_alloc_io_queues(ctrl);
869         if (ret)
870                 return ret;
871
872         if (new) {
873                 ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
874                 if (ret)
875                         goto out_free_io_queues;
876         }
877
878         /*
879          * Only start IO queues for which we have allocated the tagset
880          * and limitted it to the available queues. On reconnects, the
881          * queue number might have changed.
882          */
883         nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
884         ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
885         if (ret)
886                 goto out_cleanup_tagset;
887
888         if (!new) {
889                 nvme_start_freeze(&ctrl->ctrl);
890                 nvme_unquiesce_io_queues(&ctrl->ctrl);
891                 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
892                         /*
893                          * If we timed out waiting for freeze we are likely to
894                          * be stuck.  Fail the controller initialization just
895                          * to be safe.
896                          */
897                         ret = -ENODEV;
898                         nvme_unfreeze(&ctrl->ctrl);
899                         goto out_wait_freeze_timed_out;
900                 }
901                 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
902                         ctrl->ctrl.queue_count - 1);
903                 nvme_unfreeze(&ctrl->ctrl);
904         }
905
906         /*
907          * If the number of queues has increased (reconnect case)
908          * start all new queues now.
909          */
910         ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
911                                         ctrl->tag_set.nr_hw_queues + 1);
912         if (ret)
913                 goto out_wait_freeze_timed_out;
914
915         return 0;
916
917 out_wait_freeze_timed_out:
918         nvme_quiesce_io_queues(&ctrl->ctrl);
919         nvme_sync_io_queues(&ctrl->ctrl);
920         nvme_rdma_stop_io_queues(ctrl);
921 out_cleanup_tagset:
922         nvme_cancel_tagset(&ctrl->ctrl);
923         if (new)
924                 nvme_remove_io_tag_set(&ctrl->ctrl);
925 out_free_io_queues:
926         nvme_rdma_free_io_queues(ctrl);
927         return ret;
928 }
929
930 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
931                 bool remove)
932 {
933         nvme_quiesce_admin_queue(&ctrl->ctrl);
934         blk_sync_queue(ctrl->ctrl.admin_q);
935         nvme_rdma_stop_queue(&ctrl->queues[0]);
936         nvme_cancel_admin_tagset(&ctrl->ctrl);
937         if (remove) {
938                 nvme_unquiesce_admin_queue(&ctrl->ctrl);
939                 nvme_remove_admin_tag_set(&ctrl->ctrl);
940         }
941         nvme_rdma_destroy_admin_queue(ctrl);
942 }
943
944 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
945                 bool remove)
946 {
947         if (ctrl->ctrl.queue_count > 1) {
948                 nvme_quiesce_io_queues(&ctrl->ctrl);
949                 nvme_sync_io_queues(&ctrl->ctrl);
950                 nvme_rdma_stop_io_queues(ctrl);
951                 nvme_cancel_tagset(&ctrl->ctrl);
952                 if (remove) {
953                         nvme_unquiesce_io_queues(&ctrl->ctrl);
954                         nvme_remove_io_tag_set(&ctrl->ctrl);
955                 }
956                 nvme_rdma_free_io_queues(ctrl);
957         }
958 }
959
960 static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
961 {
962         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
963
964         flush_work(&ctrl->err_work);
965         cancel_delayed_work_sync(&ctrl->reconnect_work);
966 }
967
968 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
969 {
970         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
971
972         if (list_empty(&ctrl->list))
973                 goto free_ctrl;
974
975         mutex_lock(&nvme_rdma_ctrl_mutex);
976         list_del(&ctrl->list);
977         mutex_unlock(&nvme_rdma_ctrl_mutex);
978
979         nvmf_free_options(nctrl->opts);
980 free_ctrl:
981         kfree(ctrl->queues);
982         kfree(ctrl);
983 }
984
985 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl,
986                                           int status)
987 {
988         enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
989
990         /* If we are resetting/deleting then do nothing */
991         if (state != NVME_CTRL_CONNECTING) {
992                 WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
993                 return;
994         }
995
996         if (nvmf_should_reconnect(&ctrl->ctrl, status)) {
997                 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
998                         ctrl->ctrl.opts->reconnect_delay);
999                 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
1000                                 ctrl->ctrl.opts->reconnect_delay * HZ);
1001         } else {
1002                 nvme_delete_ctrl(&ctrl->ctrl);
1003         }
1004 }
1005
1006 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
1007 {
1008         int ret;
1009         bool changed;
1010         u16 max_queue_size;
1011
1012         ret = nvme_rdma_configure_admin_queue(ctrl, new);
1013         if (ret)
1014                 return ret;
1015
1016         if (ctrl->ctrl.icdoff) {
1017                 ret = -EOPNOTSUPP;
1018                 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1019                 goto destroy_admin;
1020         }
1021
1022         if (!(ctrl->ctrl.sgls & NVME_CTRL_SGLS_KSDBDS)) {
1023                 ret = -EOPNOTSUPP;
1024                 dev_err(ctrl->ctrl.device,
1025                         "Mandatory keyed sgls are not supported!\n");
1026                 goto destroy_admin;
1027         }
1028
1029         if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
1030                 dev_warn(ctrl->ctrl.device,
1031                         "queue_size %zu > ctrl sqsize %u, clamping down\n",
1032                         ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
1033         }
1034
1035         if (ctrl->ctrl.max_integrity_segments)
1036                 max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
1037         else
1038                 max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
1039
1040         if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
1041                 dev_warn(ctrl->ctrl.device,
1042                          "ctrl sqsize %u > max queue size %u, clamping down\n",
1043                          ctrl->ctrl.sqsize + 1, max_queue_size);
1044                 ctrl->ctrl.sqsize = max_queue_size - 1;
1045         }
1046
1047         if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1048                 dev_warn(ctrl->ctrl.device,
1049                         "sqsize %u > ctrl maxcmd %u, clamping down\n",
1050                         ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1051                 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1052         }
1053
1054         if (ctrl->ctrl.sgls & NVME_CTRL_SGLS_SAOS)
1055                 ctrl->use_inline_data = true;
1056
1057         if (ctrl->ctrl.queue_count > 1) {
1058                 ret = nvme_rdma_configure_io_queues(ctrl, new);
1059                 if (ret)
1060                         goto destroy_admin;
1061         }
1062
1063         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
1064         if (!changed) {
1065                 /*
1066                  * state change failure is ok if we started ctrl delete,
1067                  * unless we're during creation of a new controller to
1068                  * avoid races with teardown flow.
1069                  */
1070                 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
1071
1072                 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
1073                              state != NVME_CTRL_DELETING_NOIO);
1074                 WARN_ON_ONCE(new);
1075                 ret = -EINVAL;
1076                 goto destroy_io;
1077         }
1078
1079         nvme_start_ctrl(&ctrl->ctrl);
1080         return 0;
1081
1082 destroy_io:
1083         if (ctrl->ctrl.queue_count > 1) {
1084                 nvme_quiesce_io_queues(&ctrl->ctrl);
1085                 nvme_sync_io_queues(&ctrl->ctrl);
1086                 nvme_rdma_stop_io_queues(ctrl);
1087                 nvme_cancel_tagset(&ctrl->ctrl);
1088                 if (new)
1089                         nvme_remove_io_tag_set(&ctrl->ctrl);
1090                 nvme_rdma_free_io_queues(ctrl);
1091         }
1092 destroy_admin:
1093         nvme_stop_keep_alive(&ctrl->ctrl);
1094         nvme_rdma_teardown_admin_queue(ctrl, new);
1095         return ret;
1096 }
1097
1098 static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
1099 {
1100         struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
1101                         struct nvme_rdma_ctrl, reconnect_work);
1102         int ret;
1103
1104         ++ctrl->ctrl.nr_reconnects;
1105
1106         ret = nvme_rdma_setup_ctrl(ctrl, false);
1107         if (ret)
1108                 goto requeue;
1109
1110         dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
1111                         ctrl->ctrl.nr_reconnects);
1112
1113         ctrl->ctrl.nr_reconnects = 0;
1114
1115         return;
1116
1117 requeue:
1118         dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d/%d\n",
1119                  ctrl->ctrl.nr_reconnects, ctrl->ctrl.opts->max_reconnects);
1120         nvme_rdma_reconnect_or_remove(ctrl, ret);
1121 }
1122
1123 static void nvme_rdma_error_recovery_work(struct work_struct *work)
1124 {
1125         struct nvme_rdma_ctrl *ctrl = container_of(work,
1126                         struct nvme_rdma_ctrl, err_work);
1127
1128         nvme_stop_keep_alive(&ctrl->ctrl);
1129         flush_work(&ctrl->ctrl.async_event_work);
1130         nvme_rdma_teardown_io_queues(ctrl, false);
1131         nvme_unquiesce_io_queues(&ctrl->ctrl);
1132         nvme_rdma_teardown_admin_queue(ctrl, false);
1133         nvme_unquiesce_admin_queue(&ctrl->ctrl);
1134         nvme_auth_stop(&ctrl->ctrl);
1135
1136         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
1137                 /* state change failure is ok if we started ctrl delete */
1138                 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
1139
1140                 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
1141                              state != NVME_CTRL_DELETING_NOIO);
1142                 return;
1143         }
1144
1145         nvme_rdma_reconnect_or_remove(ctrl, 0);
1146 }
1147
1148 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
1149 {
1150         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
1151                 return;
1152
1153         dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1154         queue_work(nvme_reset_wq, &ctrl->err_work);
1155 }
1156
1157 static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1158 {
1159         struct request *rq = blk_mq_rq_from_pdu(req);
1160
1161         if (!refcount_dec_and_test(&req->ref))
1162                 return;
1163         if (!nvme_try_complete_req(rq, req->status, req->result))
1164                 nvme_rdma_complete_rq(rq);
1165 }
1166
1167 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
1168                 const char *op)
1169 {
1170         struct nvme_rdma_queue *queue = wc->qp->qp_context;
1171         struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1172
1173         if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
1174                 dev_info(ctrl->ctrl.device,
1175                              "%s for CQE 0x%p failed with status %s (%d)\n",
1176                              op, wc->wr_cqe,
1177                              ib_wc_status_msg(wc->status), wc->status);
1178         nvme_rdma_error_recovery(ctrl);
1179 }
1180
1181 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
1182 {
1183         if (unlikely(wc->status != IB_WC_SUCCESS))
1184                 nvme_rdma_wr_error(cq, wc, "MEMREG");
1185 }
1186
1187 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1188 {
1189         struct nvme_rdma_request *req =
1190                 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
1191
1192         if (unlikely(wc->status != IB_WC_SUCCESS))
1193                 nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
1194         else
1195                 nvme_rdma_end_request(req);
1196 }
1197
1198 static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
1199                 struct nvme_rdma_request *req)
1200 {
1201         struct ib_send_wr wr = {
1202                 .opcode             = IB_WR_LOCAL_INV,
1203                 .next               = NULL,
1204                 .num_sge            = 0,
1205                 .send_flags         = IB_SEND_SIGNALED,
1206                 .ex.invalidate_rkey = req->mr->rkey,
1207         };
1208
1209         req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1210         wr.wr_cqe = &req->reg_cqe;
1211
1212         return ib_post_send(queue->qp, &wr, NULL);
1213 }
1214
1215 static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
1216 {
1217         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1218
1219         if (blk_integrity_rq(rq)) {
1220                 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1221                                 req->metadata_sgl->nents, rq_dma_dir(rq));
1222                 sg_free_table_chained(&req->metadata_sgl->sg_table,
1223                                       NVME_INLINE_METADATA_SG_CNT);
1224         }
1225
1226         ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1227                         rq_dma_dir(rq));
1228         sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1229 }
1230
1231 static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1232                 struct request *rq)
1233 {
1234         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1235         struct nvme_rdma_device *dev = queue->device;
1236         struct ib_device *ibdev = dev->dev;
1237         struct list_head *pool = &queue->qp->rdma_mrs;
1238
1239         if (!blk_rq_nr_phys_segments(rq))
1240                 return;
1241
1242         if (req->use_sig_mr)
1243                 pool = &queue->qp->sig_mrs;
1244
1245         if (req->mr) {
1246                 ib_mr_pool_put(queue->qp, pool, req->mr);
1247                 req->mr = NULL;
1248         }
1249
1250         nvme_rdma_dma_unmap_req(ibdev, rq);
1251 }
1252
1253 static int nvme_rdma_set_sg_null(struct nvme_command *c)
1254 {
1255         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1256
1257         sg->addr = 0;
1258         put_unaligned_le24(0, sg->length);
1259         put_unaligned_le32(0, sg->key);
1260         sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1261         return 0;
1262 }
1263
1264 static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
1265                 struct nvme_rdma_request *req, struct nvme_command *c,
1266                 int count)
1267 {
1268         struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1269         struct ib_sge *sge = &req->sge[1];
1270         struct scatterlist *sgl;
1271         u32 len = 0;
1272         int i;
1273
1274         for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
1275                 sge->addr = sg_dma_address(sgl);
1276                 sge->length = sg_dma_len(sgl);
1277                 sge->lkey = queue->device->pd->local_dma_lkey;
1278                 len += sge->length;
1279                 sge++;
1280         }
1281
1282         sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1283         sg->length = cpu_to_le32(len);
1284         sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1285
1286         req->num_sge += count;
1287         return 0;
1288 }
1289
1290 static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
1291                 struct nvme_rdma_request *req, struct nvme_command *c)
1292 {
1293         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1294
1295         sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
1296         put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
1297         put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
1298         sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1299         return 0;
1300 }
1301
1302 static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
1303                 struct nvme_rdma_request *req, struct nvme_command *c,
1304                 int count)
1305 {
1306         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1307         int nr;
1308
1309         req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
1310         if (WARN_ON_ONCE(!req->mr))
1311                 return -EAGAIN;
1312
1313         /*
1314          * Align the MR to a 4K page size to match the ctrl page size and
1315          * the block virtual boundary.
1316          */
1317         nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
1318                           SZ_4K);
1319         if (unlikely(nr < count)) {
1320                 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
1321                 req->mr = NULL;
1322                 if (nr < 0)
1323                         return nr;
1324                 return -EINVAL;
1325         }
1326
1327         ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1328
1329         req->reg_cqe.done = nvme_rdma_memreg_done;
1330         memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1331         req->reg_wr.wr.opcode = IB_WR_REG_MR;
1332         req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1333         req->reg_wr.wr.num_sge = 0;
1334         req->reg_wr.mr = req->mr;
1335         req->reg_wr.key = req->mr->rkey;
1336         req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1337                              IB_ACCESS_REMOTE_READ |
1338                              IB_ACCESS_REMOTE_WRITE;
1339
1340         sg->addr = cpu_to_le64(req->mr->iova);
1341         put_unaligned_le24(req->mr->length, sg->length);
1342         put_unaligned_le32(req->mr->rkey, sg->key);
1343         sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
1344                         NVME_SGL_FMT_INVALIDATE;
1345
1346         return 0;
1347 }
1348
1349 static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
1350                 struct nvme_command *cmd, struct ib_sig_domain *domain,
1351                 u16 control, u8 pi_type)
1352 {
1353         domain->sig_type = IB_SIG_TYPE_T10_DIF;
1354         domain->sig.dif.bg_type = IB_T10DIF_CRC;
1355         domain->sig.dif.pi_interval = 1 << bi->interval_exp;
1356         domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
1357         if (control & NVME_RW_PRINFO_PRCHK_REF)
1358                 domain->sig.dif.ref_remap = true;
1359
1360         domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
1361         domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
1362         domain->sig.dif.app_escape = true;
1363         if (pi_type == NVME_NS_DPS_PI_TYPE3)
1364                 domain->sig.dif.ref_escape = true;
1365 }
1366
1367 static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
1368                 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
1369                 u8 pi_type)
1370 {
1371         u16 control = le16_to_cpu(cmd->rw.control);
1372
1373         memset(sig_attrs, 0, sizeof(*sig_attrs));
1374         if (control & NVME_RW_PRINFO_PRACT) {
1375                 /* for WRITE_INSERT/READ_STRIP no memory domain */
1376                 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
1377                 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1378                                          pi_type);
1379                 /* Clear the PRACT bit since HCA will generate/verify the PI */
1380                 control &= ~NVME_RW_PRINFO_PRACT;
1381                 cmd->rw.control = cpu_to_le16(control);
1382         } else {
1383                 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
1384                 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1385                                          pi_type);
1386                 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
1387                                          pi_type);
1388         }
1389 }
1390
1391 static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
1392 {
1393         *mask = 0;
1394         if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
1395                 *mask |= IB_SIG_CHECK_REFTAG;
1396         if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
1397                 *mask |= IB_SIG_CHECK_GUARD;
1398 }
1399
1400 static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
1401 {
1402         if (unlikely(wc->status != IB_WC_SUCCESS))
1403                 nvme_rdma_wr_error(cq, wc, "SIG");
1404 }
1405
1406 static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
1407                 struct nvme_rdma_request *req, struct nvme_command *c,
1408                 int count, int pi_count)
1409 {
1410         struct nvme_rdma_sgl *sgl = &req->data_sgl;
1411         struct ib_reg_wr *wr = &req->reg_wr;
1412         struct request *rq = blk_mq_rq_from_pdu(req);
1413         struct nvme_ns *ns = rq->q->queuedata;
1414         struct bio *bio = rq->bio;
1415         struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1416         struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
1417         u32 xfer_len;
1418         int nr;
1419
1420         req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
1421         if (WARN_ON_ONCE(!req->mr))
1422                 return -EAGAIN;
1423
1424         nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
1425                              req->metadata_sgl->sg_table.sgl, pi_count, NULL,
1426                              SZ_4K);
1427         if (unlikely(nr))
1428                 goto mr_put;
1429
1430         nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
1431         nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
1432
1433         ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1434
1435         req->reg_cqe.done = nvme_rdma_sig_done;
1436         memset(wr, 0, sizeof(*wr));
1437         wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
1438         wr->wr.wr_cqe = &req->reg_cqe;
1439         wr->wr.num_sge = 0;
1440         wr->wr.send_flags = 0;
1441         wr->mr = req->mr;
1442         wr->key = req->mr->rkey;
1443         wr->access = IB_ACCESS_LOCAL_WRITE |
1444                      IB_ACCESS_REMOTE_READ |
1445                      IB_ACCESS_REMOTE_WRITE;
1446
1447         sg->addr = cpu_to_le64(req->mr->iova);
1448         xfer_len = req->mr->length;
1449         /* Check if PI is added by the HW */
1450         if (!pi_count)
1451                 xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
1452         put_unaligned_le24(xfer_len, sg->length);
1453         put_unaligned_le32(req->mr->rkey, sg->key);
1454         sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1455
1456         return 0;
1457
1458 mr_put:
1459         ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
1460         req->mr = NULL;
1461         if (nr < 0)
1462                 return nr;
1463         return -EINVAL;
1464 }
1465
1466 static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
1467                 int *count, int *pi_count)
1468 {
1469         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1470         int ret;
1471
1472         req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
1473         ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
1474                         blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
1475                         NVME_INLINE_SG_CNT);
1476         if (ret)
1477                 return -ENOMEM;
1478
1479         req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1480                                             req->data_sgl.sg_table.sgl);
1481
1482         *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1483                                req->data_sgl.nents, rq_dma_dir(rq));
1484         if (unlikely(*count <= 0)) {
1485                 ret = -EIO;
1486                 goto out_free_table;
1487         }
1488
1489         if (blk_integrity_rq(rq)) {
1490                 req->metadata_sgl->sg_table.sgl =
1491                         (struct scatterlist *)(req->metadata_sgl + 1);
1492                 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
1493                                 rq->nr_integrity_segments,
1494                                 req->metadata_sgl->sg_table.sgl,
1495                                 NVME_INLINE_METADATA_SG_CNT);
1496                 if (unlikely(ret)) {
1497                         ret = -ENOMEM;
1498                         goto out_unmap_sg;
1499                 }
1500
1501                 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
1502                                 req->metadata_sgl->sg_table.sgl);
1503                 *pi_count = ib_dma_map_sg(ibdev,
1504                                           req->metadata_sgl->sg_table.sgl,
1505                                           req->metadata_sgl->nents,
1506                                           rq_dma_dir(rq));
1507                 if (unlikely(*pi_count <= 0)) {
1508                         ret = -EIO;
1509                         goto out_free_pi_table;
1510                 }
1511         }
1512
1513         return 0;
1514
1515 out_free_pi_table:
1516         sg_free_table_chained(&req->metadata_sgl->sg_table,
1517                               NVME_INLINE_METADATA_SG_CNT);
1518 out_unmap_sg:
1519         ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1520                         rq_dma_dir(rq));
1521 out_free_table:
1522         sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1523         return ret;
1524 }
1525
1526 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1527                 struct request *rq, struct nvme_command *c)
1528 {
1529         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1530         struct nvme_rdma_device *dev = queue->device;
1531         struct ib_device *ibdev = dev->dev;
1532         int pi_count = 0;
1533         int count, ret;
1534
1535         req->num_sge = 1;
1536         refcount_set(&req->ref, 2); /* send and recv completions */
1537
1538         c->common.flags |= NVME_CMD_SGL_METABUF;
1539
1540         if (!blk_rq_nr_phys_segments(rq))
1541                 return nvme_rdma_set_sg_null(c);
1542
1543         ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
1544         if (unlikely(ret))
1545                 return ret;
1546
1547         if (req->use_sig_mr) {
1548                 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
1549                 goto out;
1550         }
1551
1552         if (count <= dev->num_inline_segments) {
1553                 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1554                     queue->ctrl->use_inline_data &&
1555                     blk_rq_payload_bytes(rq) <=
1556                                 nvme_rdma_inline_data_size(queue)) {
1557                         ret = nvme_rdma_map_sg_inline(queue, req, c, count);
1558                         goto out;
1559                 }
1560
1561                 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
1562                         ret = nvme_rdma_map_sg_single(queue, req, c);
1563                         goto out;
1564                 }
1565         }
1566
1567         ret = nvme_rdma_map_sg_fr(queue, req, c, count);
1568 out:
1569         if (unlikely(ret))
1570                 goto out_dma_unmap_req;
1571
1572         return 0;
1573
1574 out_dma_unmap_req:
1575         nvme_rdma_dma_unmap_req(ibdev, rq);
1576         return ret;
1577 }
1578
1579 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1580 {
1581         struct nvme_rdma_qe *qe =
1582                 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1583         struct nvme_rdma_request *req =
1584                 container_of(qe, struct nvme_rdma_request, sqe);
1585
1586         if (unlikely(wc->status != IB_WC_SUCCESS))
1587                 nvme_rdma_wr_error(cq, wc, "SEND");
1588         else
1589                 nvme_rdma_end_request(req);
1590 }
1591
1592 static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1593                 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
1594                 struct ib_send_wr *first)
1595 {
1596         struct ib_send_wr wr;
1597         int ret;
1598
1599         sge->addr   = qe->dma;
1600         sge->length = sizeof(struct nvme_command);
1601         sge->lkey   = queue->device->pd->local_dma_lkey;
1602
1603         wr.next       = NULL;
1604         wr.wr_cqe     = &qe->cqe;
1605         wr.sg_list    = sge;
1606         wr.num_sge    = num_sge;
1607         wr.opcode     = IB_WR_SEND;
1608         wr.send_flags = IB_SEND_SIGNALED;
1609
1610         if (first)
1611                 first->next = &wr;
1612         else
1613                 first = &wr;
1614
1615         ret = ib_post_send(queue->qp, first, NULL);
1616         if (unlikely(ret)) {
1617                 dev_err(queue->ctrl->ctrl.device,
1618                              "%s failed with error code %d\n", __func__, ret);
1619         }
1620         return ret;
1621 }
1622
1623 static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1624                 struct nvme_rdma_qe *qe)
1625 {
1626         struct ib_recv_wr wr;
1627         struct ib_sge list;
1628         int ret;
1629
1630         list.addr   = qe->dma;
1631         list.length = sizeof(struct nvme_completion);
1632         list.lkey   = queue->device->pd->local_dma_lkey;
1633
1634         qe->cqe.done = nvme_rdma_recv_done;
1635
1636         wr.next     = NULL;
1637         wr.wr_cqe   = &qe->cqe;
1638         wr.sg_list  = &list;
1639         wr.num_sge  = 1;
1640
1641         ret = ib_post_recv(queue->qp, &wr, NULL);
1642         if (unlikely(ret)) {
1643                 dev_err(queue->ctrl->ctrl.device,
1644                         "%s failed with error code %d\n", __func__, ret);
1645         }
1646         return ret;
1647 }
1648
1649 static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1650 {
1651         u32 queue_idx = nvme_rdma_queue_idx(queue);
1652
1653         if (queue_idx == 0)
1654                 return queue->ctrl->admin_tag_set.tags[queue_idx];
1655         return queue->ctrl->tag_set.tags[queue_idx - 1];
1656 }
1657
1658 static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
1659 {
1660         if (unlikely(wc->status != IB_WC_SUCCESS))
1661                 nvme_rdma_wr_error(cq, wc, "ASYNC");
1662 }
1663
1664 static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
1665 {
1666         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1667         struct nvme_rdma_queue *queue = &ctrl->queues[0];
1668         struct ib_device *dev = queue->device->dev;
1669         struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1670         struct nvme_command *cmd = sqe->data;
1671         struct ib_sge sge;
1672         int ret;
1673
1674         ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1675
1676         memset(cmd, 0, sizeof(*cmd));
1677         cmd->common.opcode = nvme_admin_async_event;
1678         cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1679         cmd->common.flags |= NVME_CMD_SGL_METABUF;
1680         nvme_rdma_set_sg_null(cmd);
1681
1682         sqe->cqe.done = nvme_rdma_async_done;
1683
1684         ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1685                         DMA_TO_DEVICE);
1686
1687         ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
1688         WARN_ON_ONCE(ret);
1689 }
1690
1691 static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1692                 struct nvme_completion *cqe, struct ib_wc *wc)
1693 {
1694         struct request *rq;
1695         struct nvme_rdma_request *req;
1696
1697         rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
1698         if (!rq) {
1699                 dev_err(queue->ctrl->ctrl.device,
1700                         "got bad command_id %#x on QP %#x\n",
1701                         cqe->command_id, queue->qp->qp_num);
1702                 nvme_rdma_error_recovery(queue->ctrl);
1703                 return;
1704         }
1705         req = blk_mq_rq_to_pdu(rq);
1706
1707         req->status = cqe->status;
1708         req->result = cqe->result;
1709
1710         if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
1711                 if (unlikely(!req->mr ||
1712                              wc->ex.invalidate_rkey != req->mr->rkey)) {
1713                         dev_err(queue->ctrl->ctrl.device,
1714                                 "Bogus remote invalidation for rkey %#x\n",
1715                                 req->mr ? req->mr->rkey : 0);
1716                         nvme_rdma_error_recovery(queue->ctrl);
1717                 }
1718         } else if (req->mr) {
1719                 int ret;
1720
1721                 ret = nvme_rdma_inv_rkey(queue, req);
1722                 if (unlikely(ret < 0)) {
1723                         dev_err(queue->ctrl->ctrl.device,
1724                                 "Queueing INV WR for rkey %#x failed (%d)\n",
1725                                 req->mr->rkey, ret);
1726                         nvme_rdma_error_recovery(queue->ctrl);
1727                 }
1728                 /* the local invalidation completion will end the request */
1729                 return;
1730         }
1731
1732         nvme_rdma_end_request(req);
1733 }
1734
1735 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1736 {
1737         struct nvme_rdma_qe *qe =
1738                 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1739         struct nvme_rdma_queue *queue = wc->qp->qp_context;
1740         struct ib_device *ibdev = queue->device->dev;
1741         struct nvme_completion *cqe = qe->data;
1742         const size_t len = sizeof(struct nvme_completion);
1743
1744         if (unlikely(wc->status != IB_WC_SUCCESS)) {
1745                 nvme_rdma_wr_error(cq, wc, "RECV");
1746                 return;
1747         }
1748
1749         /* sanity checking for received data length */
1750         if (unlikely(wc->byte_len < len)) {
1751                 dev_err(queue->ctrl->ctrl.device,
1752                         "Unexpected nvme completion length(%d)\n", wc->byte_len);
1753                 nvme_rdma_error_recovery(queue->ctrl);
1754                 return;
1755         }
1756
1757         ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1758         /*
1759          * AEN requests are special as they don't time out and can
1760          * survive any kind of queue freeze and often don't respond to
1761          * aborts.  We don't even bother to allocate a struct request
1762          * for them but rather special case them here.
1763          */
1764         if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
1765                                      cqe->command_id)))
1766                 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1767                                 &cqe->result);
1768         else
1769                 nvme_rdma_process_nvme_rsp(queue, cqe, wc);
1770         ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1771
1772         nvme_rdma_post_recv(queue, qe);
1773 }
1774
1775 static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1776 {
1777         int ret, i;
1778
1779         for (i = 0; i < queue->queue_size; i++) {
1780                 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1781                 if (ret)
1782                         return ret;
1783         }
1784
1785         return 0;
1786 }
1787
1788 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1789                 struct rdma_cm_event *ev)
1790 {
1791         struct rdma_cm_id *cm_id = queue->cm_id;
1792         int status = ev->status;
1793         const char *rej_msg;
1794         const struct nvme_rdma_cm_rej *rej_data;
1795         u8 rej_data_len;
1796
1797         rej_msg = rdma_reject_msg(cm_id, status);
1798         rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1799
1800         if (rej_data && rej_data_len >= sizeof(u16)) {
1801                 u16 sts = le16_to_cpu(rej_data->sts);
1802
1803                 dev_err(queue->ctrl->ctrl.device,
1804                       "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1805                       status, rej_msg, sts, nvme_rdma_cm_msg(sts));
1806         } else {
1807                 dev_err(queue->ctrl->ctrl.device,
1808                         "Connect rejected: status %d (%s).\n", status, rej_msg);
1809         }
1810
1811         return -ECONNRESET;
1812 }
1813
1814 static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1815 {
1816         struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
1817         int ret;
1818
1819         ret = nvme_rdma_create_queue_ib(queue);
1820         if (ret)
1821                 return ret;
1822
1823         if (ctrl->opts->tos >= 0)
1824                 rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
1825         ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
1826         if (ret) {
1827                 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
1828                         queue->cm_error);
1829                 goto out_destroy_queue;
1830         }
1831
1832         return 0;
1833
1834 out_destroy_queue:
1835         nvme_rdma_destroy_queue_ib(queue);
1836         return ret;
1837 }
1838
1839 static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1840 {
1841         struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1842         struct rdma_conn_param param = { };
1843         struct nvme_rdma_cm_req priv = { };
1844         int ret;
1845
1846         param.qp_num = queue->qp->qp_num;
1847         param.flow_control = 1;
1848
1849         param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
1850         /* maximum retry count */
1851         param.retry_count = 7;
1852         param.rnr_retry_count = 7;
1853         param.private_data = &priv;
1854         param.private_data_len = sizeof(priv);
1855
1856         priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1857         priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
1858         /*
1859          * set the admin queue depth to the minimum size
1860          * specified by the Fabrics standard.
1861          */
1862         if (priv.qid == 0) {
1863                 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
1864                 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
1865         } else {
1866                 /*
1867                  * current interpretation of the fabrics spec
1868                  * is at minimum you make hrqsize sqsize+1, or a
1869                  * 1's based representation of sqsize.
1870                  */
1871                 priv.hrqsize = cpu_to_le16(queue->queue_size);
1872                 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
1873                 /* cntlid should only be set when creating an I/O queue */
1874                 priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid);
1875         }
1876
1877         ret = rdma_connect_locked(queue->cm_id, &param);
1878         if (ret) {
1879                 dev_err(ctrl->ctrl.device,
1880                         "rdma_connect_locked failed (%d).\n", ret);
1881                 return ret;
1882         }
1883
1884         return 0;
1885 }
1886
1887 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1888                 struct rdma_cm_event *ev)
1889 {
1890         struct nvme_rdma_queue *queue = cm_id->context;
1891         int cm_error = 0;
1892
1893         dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1894                 rdma_event_msg(ev->event), ev->event,
1895                 ev->status, cm_id);
1896
1897         switch (ev->event) {
1898         case RDMA_CM_EVENT_ADDR_RESOLVED:
1899                 cm_error = nvme_rdma_addr_resolved(queue);
1900                 break;
1901         case RDMA_CM_EVENT_ROUTE_RESOLVED:
1902                 cm_error = nvme_rdma_route_resolved(queue);
1903                 break;
1904         case RDMA_CM_EVENT_ESTABLISHED:
1905                 queue->cm_error = nvme_rdma_conn_established(queue);
1906                 /* complete cm_done regardless of success/failure */
1907                 complete(&queue->cm_done);
1908                 return 0;
1909         case RDMA_CM_EVENT_REJECTED:
1910                 cm_error = nvme_rdma_conn_rejected(queue, ev);
1911                 break;
1912         case RDMA_CM_EVENT_ROUTE_ERROR:
1913         case RDMA_CM_EVENT_CONNECT_ERROR:
1914         case RDMA_CM_EVENT_UNREACHABLE:
1915         case RDMA_CM_EVENT_ADDR_ERROR:
1916                 dev_dbg(queue->ctrl->ctrl.device,
1917                         "CM error event %d\n", ev->event);
1918                 cm_error = -ECONNRESET;
1919                 break;
1920         case RDMA_CM_EVENT_DISCONNECTED:
1921         case RDMA_CM_EVENT_ADDR_CHANGE:
1922         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1923                 dev_dbg(queue->ctrl->ctrl.device,
1924                         "disconnect received - connection closed\n");
1925                 nvme_rdma_error_recovery(queue->ctrl);
1926                 break;
1927         case RDMA_CM_EVENT_DEVICE_REMOVAL:
1928                 /* device removal is handled via the ib_client API */
1929                 break;
1930         default:
1931                 dev_err(queue->ctrl->ctrl.device,
1932                         "Unexpected RDMA CM event (%d)\n", ev->event);
1933                 nvme_rdma_error_recovery(queue->ctrl);
1934                 break;
1935         }
1936
1937         if (cm_error) {
1938                 queue->cm_error = cm_error;
1939                 complete(&queue->cm_done);
1940         }
1941
1942         return 0;
1943 }
1944
1945 static void nvme_rdma_complete_timed_out(struct request *rq)
1946 {
1947         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1948         struct nvme_rdma_queue *queue = req->queue;
1949
1950         nvme_rdma_stop_queue(queue);
1951         nvmf_complete_timed_out_request(rq);
1952 }
1953
1954 static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
1955 {
1956         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1957         struct nvme_rdma_queue *queue = req->queue;
1958         struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1959         struct nvme_command *cmd = req->req.cmd;
1960         int qid = nvme_rdma_queue_idx(queue);
1961
1962         dev_warn(ctrl->ctrl.device,
1963                  "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
1964                  rq->tag, nvme_cid(rq), cmd->common.opcode,
1965                  nvme_fabrics_opcode_str(qid, cmd), qid);
1966
1967         if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
1968                 /*
1969                  * If we are resetting, connecting or deleting we should
1970                  * complete immediately because we may block controller
1971                  * teardown or setup sequence
1972                  * - ctrl disable/shutdown fabrics requests
1973                  * - connect requests
1974                  * - initialization admin requests
1975                  * - I/O requests that entered after unquiescing and
1976                  *   the controller stopped responding
1977                  *
1978                  * All other requests should be cancelled by the error
1979                  * recovery work, so it's fine that we fail it here.
1980                  */
1981                 nvme_rdma_complete_timed_out(rq);
1982                 return BLK_EH_DONE;
1983         }
1984
1985         /*
1986          * LIVE state should trigger the normal error recovery which will
1987          * handle completing this request.
1988          */
1989         nvme_rdma_error_recovery(ctrl);
1990         return BLK_EH_RESET_TIMER;
1991 }
1992
1993 static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1994                 const struct blk_mq_queue_data *bd)
1995 {
1996         struct nvme_ns *ns = hctx->queue->queuedata;
1997         struct nvme_rdma_queue *queue = hctx->driver_data;
1998         struct request *rq = bd->rq;
1999         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2000         struct nvme_rdma_qe *sqe = &req->sqe;
2001         struct nvme_command *c = nvme_req(rq)->cmd;
2002         struct ib_device *dev;
2003         bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
2004         blk_status_t ret;
2005         int err;
2006
2007         WARN_ON_ONCE(rq->tag < 0);
2008
2009         if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2010                 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2011
2012         dev = queue->device->dev;
2013
2014         req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
2015                                          sizeof(struct nvme_command),
2016                                          DMA_TO_DEVICE);
2017         err = ib_dma_mapping_error(dev, req->sqe.dma);
2018         if (unlikely(err))
2019                 return BLK_STS_RESOURCE;
2020
2021         ib_dma_sync_single_for_cpu(dev, sqe->dma,
2022                         sizeof(struct nvme_command), DMA_TO_DEVICE);
2023
2024         ret = nvme_setup_cmd(ns, rq);
2025         if (ret)
2026                 goto unmap_qe;
2027
2028         nvme_start_request(rq);
2029
2030         if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
2031             queue->pi_support &&
2032             (c->common.opcode == nvme_cmd_write ||
2033              c->common.opcode == nvme_cmd_read) &&
2034             nvme_ns_has_pi(ns->head))
2035                 req->use_sig_mr = true;
2036         else
2037                 req->use_sig_mr = false;
2038
2039         err = nvme_rdma_map_data(queue, rq, c);
2040         if (unlikely(err < 0)) {
2041                 dev_err(queue->ctrl->ctrl.device,
2042                              "Failed to map data (%d)\n", err);
2043                 goto err;
2044         }
2045
2046         sqe->cqe.done = nvme_rdma_send_done;
2047
2048         ib_dma_sync_single_for_device(dev, sqe->dma,
2049                         sizeof(struct nvme_command), DMA_TO_DEVICE);
2050
2051         err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
2052                         req->mr ? &req->reg_wr.wr : NULL);
2053         if (unlikely(err))
2054                 goto err_unmap;
2055
2056         return BLK_STS_OK;
2057
2058 err_unmap:
2059         nvme_rdma_unmap_data(queue, rq);
2060 err:
2061         if (err == -EIO)
2062                 ret = nvme_host_path_error(rq);
2063         else if (err == -ENOMEM || err == -EAGAIN)
2064                 ret = BLK_STS_RESOURCE;
2065         else
2066                 ret = BLK_STS_IOERR;
2067         nvme_cleanup_cmd(rq);
2068 unmap_qe:
2069         ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
2070                             DMA_TO_DEVICE);
2071         return ret;
2072 }
2073
2074 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2075 {
2076         struct nvme_rdma_queue *queue = hctx->driver_data;
2077
2078         return ib_process_cq_direct(queue->ib_cq, -1);
2079 }
2080
2081 static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
2082 {
2083         struct request *rq = blk_mq_rq_from_pdu(req);
2084         struct ib_mr_status mr_status;
2085         int ret;
2086
2087         ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
2088         if (ret) {
2089                 pr_err("ib_check_mr_status failed, ret %d\n", ret);
2090                 nvme_req(rq)->status = NVME_SC_INVALID_PI;
2091                 return;
2092         }
2093
2094         if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
2095                 switch (mr_status.sig_err.err_type) {
2096                 case IB_SIG_BAD_GUARD:
2097                         nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
2098                         break;
2099                 case IB_SIG_BAD_REFTAG:
2100                         nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
2101                         break;
2102                 case IB_SIG_BAD_APPTAG:
2103                         nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
2104                         break;
2105                 }
2106                 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
2107                        mr_status.sig_err.err_type, mr_status.sig_err.expected,
2108                        mr_status.sig_err.actual);
2109         }
2110 }
2111
2112 static void nvme_rdma_complete_rq(struct request *rq)
2113 {
2114         struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2115         struct nvme_rdma_queue *queue = req->queue;
2116         struct ib_device *ibdev = queue->device->dev;
2117
2118         if (req->use_sig_mr)
2119                 nvme_rdma_check_pi_status(req);
2120
2121         nvme_rdma_unmap_data(queue, rq);
2122         ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
2123                             DMA_TO_DEVICE);
2124         nvme_complete_rq(rq);
2125 }
2126
2127 static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
2128 {
2129         struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
2130
2131         nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
2132 }
2133
2134 static const struct blk_mq_ops nvme_rdma_mq_ops = {
2135         .queue_rq       = nvme_rdma_queue_rq,
2136         .complete       = nvme_rdma_complete_rq,
2137         .init_request   = nvme_rdma_init_request,
2138         .exit_request   = nvme_rdma_exit_request,
2139         .init_hctx      = nvme_rdma_init_hctx,
2140         .timeout        = nvme_rdma_timeout,
2141         .map_queues     = nvme_rdma_map_queues,
2142         .poll           = nvme_rdma_poll,
2143 };
2144
2145 static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
2146         .queue_rq       = nvme_rdma_queue_rq,
2147         .complete       = nvme_rdma_complete_rq,
2148         .init_request   = nvme_rdma_init_request,
2149         .exit_request   = nvme_rdma_exit_request,
2150         .init_hctx      = nvme_rdma_init_admin_hctx,
2151         .timeout        = nvme_rdma_timeout,
2152 };
2153
2154 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
2155 {
2156         nvme_rdma_teardown_io_queues(ctrl, shutdown);
2157         nvme_quiesce_admin_queue(&ctrl->ctrl);
2158         nvme_disable_ctrl(&ctrl->ctrl, shutdown);
2159         nvme_rdma_teardown_admin_queue(ctrl, shutdown);
2160 }
2161
2162 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2163 {
2164         nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
2165 }
2166
2167 static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
2168 {
2169         struct nvme_rdma_ctrl *ctrl =
2170                 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
2171         int ret;
2172
2173         nvme_stop_ctrl(&ctrl->ctrl);
2174         nvme_rdma_shutdown_ctrl(ctrl, false);
2175
2176         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2177                 /* state change failure should never happen */
2178                 WARN_ON_ONCE(1);
2179                 return;
2180         }
2181
2182         ret = nvme_rdma_setup_ctrl(ctrl, false);
2183         if (ret)
2184                 goto out_fail;
2185
2186         return;
2187
2188 out_fail:
2189         ++ctrl->ctrl.nr_reconnects;
2190         nvme_rdma_reconnect_or_remove(ctrl, ret);
2191 }
2192
2193 static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
2194         .name                   = "rdma",
2195         .module                 = THIS_MODULE,
2196         .flags                  = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
2197         .reg_read32             = nvmf_reg_read32,
2198         .reg_read64             = nvmf_reg_read64,
2199         .reg_write32            = nvmf_reg_write32,
2200         .subsystem_reset        = nvmf_subsystem_reset,
2201         .free_ctrl              = nvme_rdma_free_ctrl,
2202         .submit_async_event     = nvme_rdma_submit_async_event,
2203         .delete_ctrl            = nvme_rdma_delete_ctrl,
2204         .get_address            = nvmf_get_address,
2205         .stop_ctrl              = nvme_rdma_stop_ctrl,
2206 };
2207
2208 /*
2209  * Fails a connection request if it matches an existing controller
2210  * (association) with the same tuple:
2211  * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
2212  *
2213  * if local address is not specified in the request, it will match an
2214  * existing controller with all the other parameters the same and no
2215  * local port address specified as well.
2216  *
2217  * The ports don't need to be compared as they are intrinsically
2218  * already matched by the port pointers supplied.
2219  */
2220 static bool
2221 nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
2222 {
2223         struct nvme_rdma_ctrl *ctrl;
2224         bool found = false;
2225
2226         mutex_lock(&nvme_rdma_ctrl_mutex);
2227         list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2228                 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2229                 if (found)
2230                         break;
2231         }
2232         mutex_unlock(&nvme_rdma_ctrl_mutex);
2233
2234         return found;
2235 }
2236
2237 static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev,
2238                 struct nvmf_ctrl_options *opts)
2239 {
2240         struct nvme_rdma_ctrl *ctrl;
2241         int ret;
2242
2243         ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2244         if (!ctrl)
2245                 return ERR_PTR(-ENOMEM);
2246         ctrl->ctrl.opts = opts;
2247         INIT_LIST_HEAD(&ctrl->list);
2248
2249         if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2250                 opts->trsvcid =
2251                         kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
2252                 if (!opts->trsvcid) {
2253                         ret = -ENOMEM;
2254                         goto out_free_ctrl;
2255                 }
2256                 opts->mask |= NVMF_OPT_TRSVCID;
2257         }
2258
2259         ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2260                         opts->traddr, opts->trsvcid, &ctrl->addr);
2261         if (ret) {
2262                 pr_err("malformed address passed: %s:%s\n",
2263                         opts->traddr, opts->trsvcid);
2264                 goto out_free_ctrl;
2265         }
2266
2267         if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2268                 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2269                         opts->host_traddr, NULL, &ctrl->src_addr);
2270                 if (ret) {
2271                         pr_err("malformed src address passed: %s\n",
2272                                opts->host_traddr);
2273                         goto out_free_ctrl;
2274                 }
2275         }
2276
2277         if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
2278                 ret = -EALREADY;
2279                 goto out_free_ctrl;
2280         }
2281
2282         INIT_DELAYED_WORK(&ctrl->reconnect_work,
2283                         nvme_rdma_reconnect_ctrl_work);
2284         INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
2285         INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
2286
2287         ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2288                                 opts->nr_poll_queues + 1;
2289         ctrl->ctrl.sqsize = opts->queue_size - 1;
2290         ctrl->ctrl.kato = opts->kato;
2291
2292         ret = -ENOMEM;
2293         ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2294                                 GFP_KERNEL);
2295         if (!ctrl->queues)
2296                 goto out_free_ctrl;
2297
2298         ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
2299                                 0 /* no quirks, we're perfect! */);
2300         if (ret)
2301                 goto out_kfree_queues;
2302
2303         return ctrl;
2304
2305 out_kfree_queues:
2306         kfree(ctrl->queues);
2307 out_free_ctrl:
2308         kfree(ctrl);
2309         return ERR_PTR(ret);
2310 }
2311
2312 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2313                 struct nvmf_ctrl_options *opts)
2314 {
2315         struct nvme_rdma_ctrl *ctrl;
2316         bool changed;
2317         int ret;
2318
2319         ctrl = nvme_rdma_alloc_ctrl(dev, opts);
2320         if (IS_ERR(ctrl))
2321                 return ERR_CAST(ctrl);
2322
2323         ret = nvme_add_ctrl(&ctrl->ctrl);
2324         if (ret)
2325                 goto out_put_ctrl;
2326
2327         changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
2328         WARN_ON_ONCE(!changed);
2329
2330         ret = nvme_rdma_setup_ctrl(ctrl, true);
2331         if (ret)
2332                 goto out_uninit_ctrl;
2333
2334         dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
2335                 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
2336
2337         mutex_lock(&nvme_rdma_ctrl_mutex);
2338         list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
2339         mutex_unlock(&nvme_rdma_ctrl_mutex);
2340
2341         return &ctrl->ctrl;
2342
2343 out_uninit_ctrl:
2344         nvme_uninit_ctrl(&ctrl->ctrl);
2345 out_put_ctrl:
2346         nvme_put_ctrl(&ctrl->ctrl);
2347         if (ret > 0)
2348                 ret = -EIO;
2349         return ERR_PTR(ret);
2350 }
2351
2352 static struct nvmf_transport_ops nvme_rdma_transport = {
2353         .name           = "rdma",
2354         .module         = THIS_MODULE,
2355         .required_opts  = NVMF_OPT_TRADDR,
2356         .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2357                           NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2358                           NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2359                           NVMF_OPT_TOS,
2360         .create_ctrl    = nvme_rdma_create_ctrl,
2361 };
2362
2363 static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2364 {
2365         struct nvme_rdma_ctrl *ctrl;
2366         struct nvme_rdma_device *ndev;
2367         bool found = false;
2368
2369         mutex_lock(&device_list_mutex);
2370         list_for_each_entry(ndev, &device_list, entry) {
2371                 if (ndev->dev == ib_device) {
2372                         found = true;
2373                         break;
2374                 }
2375         }
2376         mutex_unlock(&device_list_mutex);
2377
2378         if (!found)
2379                 return;
2380
2381         /* Delete all controllers using this device */
2382         mutex_lock(&nvme_rdma_ctrl_mutex);
2383         list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2384                 if (ctrl->device->dev != ib_device)
2385                         continue;
2386                 nvme_delete_ctrl(&ctrl->ctrl);
2387         }
2388         mutex_unlock(&nvme_rdma_ctrl_mutex);
2389
2390         flush_workqueue(nvme_delete_wq);
2391 }
2392
2393 static struct ib_client nvme_rdma_ib_client = {
2394         .name   = "nvme_rdma",
2395         .remove = nvme_rdma_remove_one
2396 };
2397
2398 static int __init nvme_rdma_init_module(void)
2399 {
2400         int ret;
2401
2402         ret = ib_register_client(&nvme_rdma_ib_client);
2403         if (ret)
2404                 return ret;
2405
2406         ret = nvmf_register_transport(&nvme_rdma_transport);
2407         if (ret)
2408                 goto err_unreg_client;
2409
2410         return 0;
2411
2412 err_unreg_client:
2413         ib_unregister_client(&nvme_rdma_ib_client);
2414         return ret;
2415 }
2416
2417 static void __exit nvme_rdma_cleanup_module(void)
2418 {
2419         struct nvme_rdma_ctrl *ctrl;
2420
2421         nvmf_unregister_transport(&nvme_rdma_transport);
2422         ib_unregister_client(&nvme_rdma_ib_client);
2423
2424         mutex_lock(&nvme_rdma_ctrl_mutex);
2425         list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2426                 nvme_delete_ctrl(&ctrl->ctrl);
2427         mutex_unlock(&nvme_rdma_ctrl_mutex);
2428         flush_workqueue(nvme_delete_wq);
2429 }
2430
2431 module_init(nvme_rdma_init_module);
2432 module_exit(nvme_rdma_cleanup_module);
2433
2434 MODULE_DESCRIPTION("NVMe host RDMA transport driver");
2435 MODULE_LICENSE("GPL v2");
This page took 0.158414 seconds and 4 git commands to generate.