]> Git Repo - linux.git/blame - drivers/nvme/host/rdma.c
Linux 6.14-rc3
[linux.git] / drivers / nvme / host / rdma.c
CommitLineData
5d8762d5 1// SPDX-License-Identifier: GPL-2.0
71102307
CH
2/*
3 * NVMe over Fabrics RDMA host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
71102307
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71102307
CH
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
f41725bb 10#include <rdma/mr_pool.h>
71102307
CH
11#include <linux/err.h>
12#include <linux/string.h>
71102307
CH
13#include <linux/atomic.h>
14#include <linux/blk-mq.h>
fe45e630 15#include <linux/blk-integrity.h>
71102307
CH
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/mutex.h>
19#include <linux/scatterlist.h>
20#include <linux/nvme.h>
5f60d5f6 21#include <linux/unaligned.h>
71102307
CH
22
23#include <rdma/ib_verbs.h>
24#include <rdma/rdma_cm.h>
71102307
CH
25#include <linux/nvme-rdma.h>
26
27#include "nvme.h"
28#include "fabrics.h"
29
30
0525af71 31#define NVME_RDMA_CM_TIMEOUT_MS 3000 /* 3 second */
71102307 32
71102307
CH
33#define NVME_RDMA_MAX_SEGMENTS 256
34
64a741c1 35#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
71102307 36
5ec5d3bd
MG
37#define NVME_RDMA_DATA_SGL_SIZE \
38 (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
39#define NVME_RDMA_METADATA_SGL_SIZE \
40 (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
41
71102307 42struct nvme_rdma_device {
f87c89ad
MG
43 struct ib_device *dev;
44 struct ib_pd *pd;
71102307
CH
45 struct kref ref;
46 struct list_head entry;
64a741c1 47 unsigned int num_inline_segments;
71102307
CH
48};
49
50struct nvme_rdma_qe {
51 struct ib_cqe cqe;
52 void *data;
53 u64 dma;
54};
55
324d9e78
IR
56struct nvme_rdma_sgl {
57 int nents;
58 struct sg_table sg_table;
59};
60
71102307
CH
61struct nvme_rdma_queue;
62struct nvme_rdma_request {
d49187e9 63 struct nvme_request req;
71102307
CH
64 struct ib_mr *mr;
65 struct nvme_rdma_qe sqe;
4af7f7ff
SG
66 union nvme_result result;
67 __le16 status;
68 refcount_t ref;
71102307
CH
69 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
70 u32 num_sge;
71102307
CH
71 struct ib_reg_wr reg_wr;
72 struct ib_cqe reg_cqe;
73 struct nvme_rdma_queue *queue;
324d9e78 74 struct nvme_rdma_sgl data_sgl;
5ec5d3bd
MG
75 struct nvme_rdma_sgl *metadata_sgl;
76 bool use_sig_mr;
71102307
CH
77};
78
79enum nvme_rdma_queue_flags {
5013e98b
SG
80 NVME_RDMA_Q_ALLOCATED = 0,
81 NVME_RDMA_Q_LIVE = 1,
eb1bd249 82 NVME_RDMA_Q_TR_READY = 2,
71102307
CH
83};
84
85struct nvme_rdma_queue {
86 struct nvme_rdma_qe *rsp_ring;
71102307
CH
87 int queue_size;
88 size_t cmnd_capsule_len;
89 struct nvme_rdma_ctrl *ctrl;
90 struct nvme_rdma_device *device;
91 struct ib_cq *ib_cq;
92 struct ib_qp *qp;
93
94 unsigned long flags;
95 struct rdma_cm_id *cm_id;
96 int cm_error;
97 struct completion cm_done;
5ec5d3bd 98 bool pi_support;
287f329e 99 int cq_size;
7674073b 100 struct mutex queue_lock;
71102307
CH
101};
102
103struct nvme_rdma_ctrl {
71102307
CH
104 /* read only in the hot path */
105 struct nvme_rdma_queue *queues;
71102307
CH
106
107 /* other member variables */
71102307 108 struct blk_mq_tag_set tag_set;
71102307
CH
109 struct work_struct err_work;
110
111 struct nvme_rdma_qe async_event_sqe;
112
71102307
CH
113 struct delayed_work reconnect_work;
114
115 struct list_head list;
116
117 struct blk_mq_tag_set admin_tag_set;
118 struct nvme_rdma_device *device;
119
71102307
CH
120 u32 max_fr_pages;
121
0928f9b4
SG
122 struct sockaddr_storage addr;
123 struct sockaddr_storage src_addr;
71102307
CH
124
125 struct nvme_ctrl ctrl;
64a741c1 126 bool use_inline_data;
b1064d3e 127 u32 io_queues[HCTX_MAX_TYPES];
71102307
CH
128};
129
130static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
131{
132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
133}
134
135static LIST_HEAD(device_list);
136static DEFINE_MUTEX(device_list_mutex);
137
138static LIST_HEAD(nvme_rdma_ctrl_list);
139static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
140
71102307
CH
141/*
142 * Disabling this option makes small I/O goes faster, but is fundamentally
143 * unsafe. With it turned off we will have to register a global rkey that
144 * allows read and write access to all physical memory.
145 */
146static bool register_always = true;
147module_param(register_always, bool, 0444);
148MODULE_PARM_DESC(register_always,
149 "Use memory registration even for contiguous memory regions");
150
151static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
152 struct rdma_cm_event *event);
153static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
ff029451 154static void nvme_rdma_complete_rq(struct request *rq);
71102307 155
90af3512
SG
156static const struct blk_mq_ops nvme_rdma_mq_ops;
157static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
158
71102307
CH
159static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
160{
161 return queue - queue->ctrl->queues;
162}
163
ff8519f9
SG
164static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
165{
166 return nvme_rdma_queue_idx(queue) >
b1064d3e
SG
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
168 queue->ctrl->io_queues[HCTX_TYPE_READ];
ff8519f9
SG
169}
170
71102307
CH
171static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
172{
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
174}
175
176static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
177 size_t capsule_size, enum dma_data_direction dir)
178{
179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
180 kfree(qe->data);
181}
182
183static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
184 size_t capsule_size, enum dma_data_direction dir)
185{
186 qe->data = kzalloc(capsule_size, GFP_KERNEL);
187 if (!qe->data)
188 return -ENOMEM;
189
190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
191 if (ib_dma_mapping_error(ibdev, qe->dma)) {
192 kfree(qe->data);
6344d02d 193 qe->data = NULL;
71102307
CH
194 return -ENOMEM;
195 }
196
197 return 0;
198}
199
200static void nvme_rdma_free_ring(struct ib_device *ibdev,
201 struct nvme_rdma_qe *ring, size_t ib_queue_size,
202 size_t capsule_size, enum dma_data_direction dir)
203{
204 int i;
205
206 for (i = 0; i < ib_queue_size; i++)
207 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
208 kfree(ring);
209}
210
211static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
212 size_t ib_queue_size, size_t capsule_size,
213 enum dma_data_direction dir)
214{
215 struct nvme_rdma_qe *ring;
216 int i;
217
218 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
219 if (!ring)
220 return NULL;
221
62f99b62
MG
222 /*
223 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
224 * lifetime. It's safe, since any chage in the underlying RDMA device
225 * will issue error recovery and queue re-creation.
226 */
71102307
CH
227 for (i = 0; i < ib_queue_size; i++) {
228 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
229 goto out_free_ring;
230 }
231
232 return ring;
233
234out_free_ring:
235 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
236 return NULL;
237}
238
239static void nvme_rdma_qp_event(struct ib_event *event, void *context)
240{
27a4beef
MG
241 pr_debug("QP event %s (%d)\n",
242 ib_event_msg(event->event), event->event);
243
71102307
CH
244}
245
246static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
247{
35da77d5
BVA
248 int ret;
249
0525af71
IR
250 ret = wait_for_completion_interruptible(&queue->cm_done);
251 if (ret)
35da77d5 252 return ret;
35da77d5 253 WARN_ON_ONCE(queue->cm_error > 0);
71102307
CH
254 return queue->cm_error;
255}
256
257static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
258{
259 struct nvme_rdma_device *dev = queue->device;
260 struct ib_qp_init_attr init_attr;
261 int ret;
262
263 memset(&init_attr, 0, sizeof(init_attr));
264 init_attr.event_handler = nvme_rdma_qp_event;
265 /* +1 for drain */
266 init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
267 /* +1 for drain */
268 init_attr.cap.max_recv_wr = queue->queue_size + 1;
269 init_attr.cap.max_recv_sge = 1;
64a741c1 270 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
71102307
CH
271 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
272 init_attr.qp_type = IB_QPT_RC;
273 init_attr.send_cq = queue->ib_cq;
274 init_attr.recv_cq = queue->ib_cq;
5ec5d3bd
MG
275 if (queue->pi_support)
276 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
287f329e 277 init_attr.qp_context = queue;
71102307
CH
278
279 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
280
281 queue->qp = queue->cm_id->qp;
282 return ret;
283}
284
385475ee
CH
285static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
286 struct request *rq, unsigned int hctx_idx)
71102307
CH
287{
288 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307 289
62f99b62 290 kfree(req->sqe.data);
71102307
CH
291}
292
385475ee
CH
293static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
294 struct request *rq, unsigned int hctx_idx,
295 unsigned int numa_node)
71102307 296{
2d60738c 297 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
71102307 298 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
385475ee 299 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
71102307 300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
71102307 301
59e29ce6 302 nvme_req(rq)->ctrl = &ctrl->ctrl;
62f99b62
MG
303 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
304 if (!req->sqe.data)
305 return -ENOMEM;
71102307 306
5ec5d3bd
MG
307 /* metadata nvme_rdma_sgl struct is located after command's data SGL */
308 if (queue->pi_support)
309 req->metadata_sgl = (void *)nvme_req(rq) +
310 sizeof(struct nvme_rdma_request) +
311 NVME_RDMA_DATA_SGL_SIZE;
312
71102307 313 req->queue = queue;
f4b9e6c9 314 nvme_req(rq)->cmd = req->sqe.data;
71102307
CH
315
316 return 0;
71102307
CH
317}
318
71102307
CH
319static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
320 unsigned int hctx_idx)
321{
2d60738c 322 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
71102307
CH
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
324
d858e5f0 325 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
71102307
CH
326
327 hctx->driver_data = queue;
328 return 0;
329}
330
331static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
332 unsigned int hctx_idx)
333{
2d60738c 334 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(data);
71102307
CH
335 struct nvme_rdma_queue *queue = &ctrl->queues[0];
336
337 BUG_ON(hctx_idx != 0);
338
339 hctx->driver_data = queue;
340 return 0;
341}
342
343static void nvme_rdma_free_dev(struct kref *ref)
344{
345 struct nvme_rdma_device *ndev =
346 container_of(ref, struct nvme_rdma_device, ref);
347
348 mutex_lock(&device_list_mutex);
349 list_del(&ndev->entry);
350 mutex_unlock(&device_list_mutex);
351
71102307 352 ib_dealloc_pd(ndev->pd);
71102307
CH
353 kfree(ndev);
354}
355
356static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
357{
358 kref_put(&dev->ref, nvme_rdma_free_dev);
359}
360
361static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
362{
363 return kref_get_unless_zero(&dev->ref);
364}
365
366static struct nvme_rdma_device *
367nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
368{
369 struct nvme_rdma_device *ndev;
370
371 mutex_lock(&device_list_mutex);
372 list_for_each_entry(ndev, &device_list, entry) {
373 if (ndev->dev->node_guid == cm_id->device->node_guid &&
374 nvme_rdma_dev_get(ndev))
375 goto out_unlock;
376 }
377
378 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
379 if (!ndev)
380 goto out_err;
381
382 ndev->dev = cm_id->device;
383 kref_init(&ndev->ref);
384
11975e01
CH
385 ndev->pd = ib_alloc_pd(ndev->dev,
386 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
71102307
CH
387 if (IS_ERR(ndev->pd))
388 goto out_free_dev;
389
71102307
CH
390 if (!(ndev->dev->attrs.device_cap_flags &
391 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
392 dev_err(&ndev->dev->dev,
393 "Memory registrations not supported.\n");
11975e01 394 goto out_free_pd;
71102307
CH
395 }
396
64a741c1 397 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
0a3173a5 398 ndev->dev->attrs.max_send_sge - 1);
71102307
CH
399 list_add(&ndev->entry, &device_list);
400out_unlock:
401 mutex_unlock(&device_list_mutex);
402 return ndev;
403
71102307
CH
404out_free_pd:
405 ib_dealloc_pd(ndev->pd);
406out_free_dev:
407 kfree(ndev);
408out_err:
409 mutex_unlock(&device_list_mutex);
410 return NULL;
411}
412
287f329e
YF
413static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
414{
415 if (nvme_rdma_poll_queue(queue))
416 ib_free_cq(queue->ib_cq);
417 else
418 ib_cq_pool_put(queue->ib_cq, queue->cq_size);
419}
420
71102307
CH
421static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
422{
eb1bd249
MG
423 struct nvme_rdma_device *dev;
424 struct ib_device *ibdev;
425
426 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
427 return;
428
429 dev = queue->device;
430 ibdev = dev->dev;
71102307 431
5ec5d3bd
MG
432 if (queue->pi_support)
433 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
f41725bb
IR
434 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
435
eb1bd249
MG
436 /*
437 * The cm_id object might have been destroyed during RDMA connection
438 * establishment error flow to avoid getting other cma events, thus
439 * the destruction of the QP shouldn't use rdma_cm API.
440 */
441 ib_destroy_qp(queue->qp);
287f329e 442 nvme_rdma_free_cq(queue);
71102307
CH
443
444 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
445 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
446
447 nvme_rdma_dev_put(dev);
448}
449
5ec5d3bd 450static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
f41725bb 451{
5ec5d3bd
MG
452 u32 max_page_list_len;
453
454 if (pi_support)
455 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
456 else
457 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
458
459 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
f41725bb
IR
460}
461
287f329e
YF
462static int nvme_rdma_create_cq(struct ib_device *ibdev,
463 struct nvme_rdma_queue *queue)
464{
465 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
287f329e
YF
466
467 /*
468 * Spread I/O queues completion vectors according their queue index.
469 * Admin queues can always go on completion vector 0.
470 */
471 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
472
473 /* Polling queues need direct cq polling context */
015ad2b1 474 if (nvme_rdma_poll_queue(queue))
287f329e 475 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
015ad2b1 476 comp_vector, IB_POLL_DIRECT);
477 else
287f329e 478 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
015ad2b1 479 comp_vector, IB_POLL_SOFTIRQ);
287f329e
YF
480
481 if (IS_ERR(queue->ib_cq)) {
482 ret = PTR_ERR(queue->ib_cq);
483 return ret;
484 }
485
486 return 0;
487}
488
ca6e95bb 489static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
71102307 490{
ca6e95bb 491 struct ib_device *ibdev;
71102307
CH
492 const int send_wr_factor = 3; /* MR, SEND, INV */
493 const int cq_factor = send_wr_factor + 1; /* + RECV */
ff13c1b8 494 int ret, pages_per_mr;
71102307 495
ca6e95bb
SG
496 queue->device = nvme_rdma_find_get_device(queue->cm_id);
497 if (!queue->device) {
498 dev_err(queue->cm_id->device->dev.parent,
499 "no client data found!\n");
500 return -ECONNREFUSED;
501 }
502 ibdev = queue->device->dev;
71102307 503
f3f28373 504 /* +1 for ib_drain_qp */
287f329e
YF
505 queue->cq_size = cq_factor * queue->queue_size + 1;
506
507 ret = nvme_rdma_create_cq(ibdev, queue);
508 if (ret)
ca6e95bb 509 goto out_put_dev;
71102307
CH
510
511 ret = nvme_rdma_create_qp(queue, send_wr_factor);
512 if (ret)
513 goto out_destroy_ib_cq;
514
515 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
516 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
517 if (!queue->rsp_ring) {
518 ret = -ENOMEM;
519 goto out_destroy_qp;
520 }
521
ff13c1b8
MG
522 /*
523 * Currently we don't use SG_GAPS MR's so if the first entry is
524 * misaligned we'll end up using two entries for a single data page,
525 * so one additional entry is required.
526 */
5ec5d3bd 527 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
f41725bb
IR
528 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
529 queue->queue_size,
530 IB_MR_TYPE_MEM_REG,
ff13c1b8 531 pages_per_mr, 0);
f41725bb
IR
532 if (ret) {
533 dev_err(queue->ctrl->ctrl.device,
534 "failed to initialize MR pool sized %d for QID %d\n",
287f329e 535 queue->queue_size, nvme_rdma_queue_idx(queue));
f41725bb
IR
536 goto out_destroy_ring;
537 }
538
5ec5d3bd
MG
539 if (queue->pi_support) {
540 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
541 queue->queue_size, IB_MR_TYPE_INTEGRITY,
542 pages_per_mr, pages_per_mr);
543 if (ret) {
544 dev_err(queue->ctrl->ctrl.device,
545 "failed to initialize PI MR pool sized %d for QID %d\n",
287f329e 546 queue->queue_size, nvme_rdma_queue_idx(queue));
5ec5d3bd
MG
547 goto out_destroy_mr_pool;
548 }
549 }
550
eb1bd249
MG
551 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
552
71102307
CH
553 return 0;
554
5ec5d3bd
MG
555out_destroy_mr_pool:
556 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
f41725bb
IR
557out_destroy_ring:
558 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
559 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
71102307 560out_destroy_qp:
1f61def9 561 rdma_destroy_qp(queue->cm_id);
71102307 562out_destroy_ib_cq:
287f329e 563 nvme_rdma_free_cq(queue);
ca6e95bb
SG
564out_put_dev:
565 nvme_rdma_dev_put(queue->device);
71102307
CH
566 return ret;
567}
568
41e8cfa1 569static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
71102307
CH
570 int idx, size_t queue_size)
571{
572 struct nvme_rdma_queue *queue;
8f4e8dac 573 struct sockaddr *src_addr = NULL;
71102307
CH
574 int ret;
575
576 queue = &ctrl->queues[idx];
7674073b 577 mutex_init(&queue->queue_lock);
71102307 578 queue->ctrl = ctrl;
5ec5d3bd
MG
579 if (idx && ctrl->ctrl.max_integrity_segments)
580 queue->pi_support = true;
581 else
582 queue->pi_support = false;
71102307
CH
583 init_completion(&queue->cm_done);
584
585 if (idx > 0)
586 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
587 else
588 queue->cmnd_capsule_len = sizeof(struct nvme_command);
589
590 queue->queue_size = queue_size;
591
592 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
593 RDMA_PS_TCP, IB_QPT_RC);
594 if (IS_ERR(queue->cm_id)) {
595 dev_info(ctrl->ctrl.device,
596 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
7674073b
CL
597 ret = PTR_ERR(queue->cm_id);
598 goto out_destroy_mutex;
71102307
CH
599 }
600
8f4e8dac 601 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
0928f9b4 602 src_addr = (struct sockaddr *)&ctrl->src_addr;
8f4e8dac 603
0928f9b4
SG
604 queue->cm_error = -ETIMEDOUT;
605 ret = rdma_resolve_addr(queue->cm_id, src_addr,
606 (struct sockaddr *)&ctrl->addr,
0525af71 607 NVME_RDMA_CM_TIMEOUT_MS);
71102307
CH
608 if (ret) {
609 dev_info(ctrl->ctrl.device,
610 "rdma_resolve_addr failed (%d).\n", ret);
611 goto out_destroy_cm_id;
612 }
613
614 ret = nvme_rdma_wait_for_cm(queue);
615 if (ret) {
616 dev_info(ctrl->ctrl.device,
d8bfceeb 617 "rdma connection establishment failed (%d)\n", ret);
71102307
CH
618 goto out_destroy_cm_id;
619 }
620
5013e98b 621 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
71102307
CH
622
623 return 0;
624
625out_destroy_cm_id:
626 rdma_destroy_id(queue->cm_id);
eb1bd249 627 nvme_rdma_destroy_queue_ib(queue);
7674073b
CL
628out_destroy_mutex:
629 mutex_destroy(&queue->queue_lock);
71102307
CH
630 return ret;
631}
632
d94211b8
SG
633static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
634{
635 rdma_disconnect(queue->cm_id);
636 ib_drain_qp(queue->qp);
637}
638
71102307
CH
639static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
640{
3820c4fd
ML
641 if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
642 return;
643
7674073b
CL
644 mutex_lock(&queue->queue_lock);
645 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
646 __nvme_rdma_stop_queue(queue);
647 mutex_unlock(&queue->queue_lock);
71102307
CH
648}
649
650static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
651{
5013e98b 652 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
a57bd541
SG
653 return;
654
71102307 655 rdma_destroy_id(queue->cm_id);
9817d763 656 nvme_rdma_destroy_queue_ib(queue);
7674073b 657 mutex_destroy(&queue->queue_lock);
71102307
CH
658}
659
a57bd541 660static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307 661{
a57bd541
SG
662 int i;
663
664 for (i = 1; i < ctrl->ctrl.queue_count; i++)
665 nvme_rdma_free_queue(&ctrl->queues[i]);
71102307
CH
666}
667
a57bd541 668static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307
CH
669{
670 int i;
671
d858e5f0 672 for (i = 1; i < ctrl->ctrl.queue_count; i++)
a57bd541 673 nvme_rdma_stop_queue(&ctrl->queues[i]);
71102307
CH
674}
675
68e16fcf
SG
676static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
677{
ff8519f9 678 struct nvme_rdma_queue *queue = &ctrl->queues[idx];
68e16fcf
SG
679 int ret;
680
681 if (idx)
be42a33b 682 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
68e16fcf
SG
683 else
684 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
685
d94211b8 686 if (!ret) {
ff8519f9 687 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
d94211b8 688 } else {
67b483dd
SG
689 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
690 __nvme_rdma_stop_queue(queue);
68e16fcf
SG
691 dev_info(ctrl->ctrl.device,
692 "failed to connect queue: %d ret=%d\n", idx, ret);
d94211b8 693 }
68e16fcf
SG
694 return ret;
695}
696
1c467e25
DW
697static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl,
698 int first, int last)
71102307
CH
699{
700 int i, ret = 0;
701
1c467e25 702 for (i = first; i < last; i++) {
68e16fcf
SG
703 ret = nvme_rdma_start_queue(ctrl, i);
704 if (ret)
a57bd541 705 goto out_stop_queues;
71102307
CH
706 }
707
c8dbc37c
SW
708 return 0;
709
a57bd541 710out_stop_queues:
1c467e25 711 for (i--; i >= first; i--)
68e16fcf 712 nvme_rdma_stop_queue(&ctrl->queues[i]);
71102307
CH
713 return ret;
714}
715
41e8cfa1 716static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307 717{
c248c643 718 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
a249d306 719 unsigned int nr_io_queues;
71102307
CH
720 int i, ret;
721
a249d306 722 nr_io_queues = nvmf_nr_io_queues(opts);
c248c643
SG
723 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
724 if (ret)
725 return ret;
726
85032874 727 if (nr_io_queues == 0) {
c4c6df5f
SG
728 dev_err(ctrl->ctrl.device,
729 "unable to set any I/O queues\n");
730 return -ENOMEM;
731 }
c248c643 732
85032874 733 ctrl->ctrl.queue_count = nr_io_queues + 1;
c248c643
SG
734 dev_info(ctrl->ctrl.device,
735 "creating %d I/O queues.\n", nr_io_queues);
736
a249d306 737 nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues);
d858e5f0 738 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
41e8cfa1
SG
739 ret = nvme_rdma_alloc_queue(ctrl, i,
740 ctrl->ctrl.sqsize + 1);
741 if (ret)
71102307 742 goto out_free_queues;
71102307
CH
743 }
744
745 return 0;
746
747out_free_queues:
f361e5a0 748 for (i--; i >= 1; i--)
a57bd541 749 nvme_rdma_free_queue(&ctrl->queues[i]);
71102307
CH
750
751 return ret;
752}
753
cefa1032 754static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
b28a308e 755{
cefa1032
CH
756 unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
757 NVME_RDMA_DATA_SGL_SIZE;
b28a308e 758
cefa1032
CH
759 if (ctrl->max_integrity_segments)
760 cmd_size += sizeof(struct nvme_rdma_sgl) +
761 NVME_RDMA_METADATA_SGL_SIZE;
a7f7b711 762
cefa1032 763 return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
db45e1a5 764 &nvme_rdma_mq_ops,
dcef7727
CH
765 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
766 cmd_size);
b28a308e
SG
767}
768
cefa1032 769static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
71102307 770{
682630f0 771 if (ctrl->async_event_sqe.data) {
925dd04c 772 cancel_work_sync(&ctrl->ctrl.async_event_work);
682630f0
SG
773 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
774 sizeof(struct nvme_command), DMA_TO_DEVICE);
775 ctrl->async_event_sqe.data = NULL;
776 }
a57bd541 777 nvme_rdma_free_queue(&ctrl->queues[0]);
71102307
CH
778}
779
3f02fffb
SG
780static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
781 bool new)
90af3512 782{
5ec5d3bd 783 bool pi_capable = false;
90af3512
SG
784 int error;
785
41e8cfa1 786 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
90af3512
SG
787 if (error)
788 return error;
789
790 ctrl->device = ctrl->queues[0].device;
22dd4c70 791 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev);
90af3512 792
5ec5d3bd 793 /* T10-PI support */
e945c653
JG
794 if (ctrl->device->dev->attrs.kernel_cap_flags &
795 IBK_INTEGRITY_HANDOVER)
5ec5d3bd
MG
796 pi_capable = true;
797
798 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
799 pi_capable);
90af3512 800
62f99b62
MG
801 /*
802 * Bind the async event SQE DMA mapping to the admin queue lifetime.
803 * It's safe, since any chage in the underlying RDMA device will issue
804 * error recovery and queue re-creation.
805 */
94e42213
SG
806 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
807 sizeof(struct nvme_command), DMA_TO_DEVICE);
808 if (error)
809 goto out_free_queue;
810
3f02fffb 811 if (new) {
cefa1032
CH
812 error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
813 &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
cefa1032
CH
814 sizeof(struct nvme_rdma_request) +
815 NVME_RDMA_DATA_SGL_SIZE);
a7f7b711 816 if (error)
94e42213 817 goto out_free_async_qe;
90af3512 818
90af3512
SG
819 }
820
68e16fcf 821 error = nvme_rdma_start_queue(ctrl, 0);
90af3512 822 if (error)
cefa1032 823 goto out_remove_admin_tag_set;
90af3512 824
c0f2f45b 825 error = nvme_enable_ctrl(&ctrl->ctrl);
90af3512 826 if (error)
2e050f00 827 goto out_stop_queue;
90af3512 828
ff13c1b8
MG
829 ctrl->ctrl.max_segments = ctrl->max_fr_pages;
830 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
5ec5d3bd
MG
831 if (pi_capable)
832 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
833 else
834 ctrl->ctrl.max_integrity_segments = 0;
90af3512 835
9f27bd70 836 nvme_unquiesce_admin_queue(&ctrl->ctrl);
e7832cb4 837
94cc781f 838 error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
90af3512 839 if (error)
958dc1d3 840 goto out_quiesce_queue;
90af3512 841
90af3512
SG
842 return 0;
843
958dc1d3 844out_quiesce_queue:
9f27bd70 845 nvme_quiesce_admin_queue(&ctrl->ctrl);
958dc1d3 846 blk_sync_queue(ctrl->ctrl.admin_q);
2e050f00
JW
847out_stop_queue:
848 nvme_rdma_stop_queue(&ctrl->queues[0]);
958dc1d3 849 nvme_cancel_admin_tagset(&ctrl->ctrl);
cefa1032 850out_remove_admin_tag_set:
3f02fffb 851 if (new)
cefa1032 852 nvme_remove_admin_tag_set(&ctrl->ctrl);
94e42213 853out_free_async_qe:
9134ae2a
PS
854 if (ctrl->async_event_sqe.data) {
855 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
856 sizeof(struct nvme_command), DMA_TO_DEVICE);
857 ctrl->async_event_sqe.data = NULL;
858 }
90af3512
SG
859out_free_queue:
860 nvme_rdma_free_queue(&ctrl->queues[0]);
861 return error;
862}
863
a57bd541
SG
864static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
865{
1c467e25 866 int ret, nr_queues;
a57bd541 867
41e8cfa1 868 ret = nvme_rdma_alloc_io_queues(ctrl);
a57bd541
SG
869 if (ret)
870 return ret;
871
872 if (new) {
a7f7b711
CH
873 ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
874 if (ret)
a57bd541 875 goto out_free_io_queues;
a57bd541
SG
876 }
877
1c467e25
DW
878 /*
879 * Only start IO queues for which we have allocated the tagset
880 * and limitted it to the available queues. On reconnects, the
881 * queue number might have changed.
882 */
883 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
884 ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
a57bd541 885 if (ret)
cefa1032 886 goto out_cleanup_tagset;
a57bd541 887
9f98772b 888 if (!new) {
29b434d1 889 nvme_start_freeze(&ctrl->ctrl);
9f27bd70 890 nvme_unquiesce_io_queues(&ctrl->ctrl);
2362acb6
SG
891 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
892 /*
893 * If we timed out waiting for freeze we are likely to
894 * be stuck. Fail the controller initialization just
895 * to be safe.
896 */
897 ret = -ENODEV;
29b434d1 898 nvme_unfreeze(&ctrl->ctrl);
2362acb6
SG
899 goto out_wait_freeze_timed_out;
900 }
9f98772b
SG
901 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
902 ctrl->ctrl.queue_count - 1);
903 nvme_unfreeze(&ctrl->ctrl);
904 }
905
1c467e25
DW
906 /*
907 * If the number of queues has increased (reconnect case)
908 * start all new queues now.
909 */
910 ret = nvme_rdma_start_io_queues(ctrl, nr_queues,
911 ctrl->tag_set.nr_hw_queues + 1);
912 if (ret)
913 goto out_wait_freeze_timed_out;
914
a57bd541
SG
915 return 0;
916
2362acb6 917out_wait_freeze_timed_out:
9f27bd70 918 nvme_quiesce_io_queues(&ctrl->ctrl);
958dc1d3 919 nvme_sync_io_queues(&ctrl->ctrl);
2362acb6 920 nvme_rdma_stop_io_queues(ctrl);
cefa1032 921out_cleanup_tagset:
958dc1d3 922 nvme_cancel_tagset(&ctrl->ctrl);
a57bd541 923 if (new)
cefa1032 924 nvme_remove_io_tag_set(&ctrl->ctrl);
a57bd541
SG
925out_free_io_queues:
926 nvme_rdma_free_io_queues(ctrl);
927 return ret;
71102307
CH
928}
929
75862c72
SG
930static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
931 bool remove)
932{
9f27bd70 933 nvme_quiesce_admin_queue(&ctrl->ctrl);
3017013d 934 blk_sync_queue(ctrl->ctrl.admin_q);
75862c72 935 nvme_rdma_stop_queue(&ctrl->queues[0]);
c4189d68 936 nvme_cancel_admin_tagset(&ctrl->ctrl);
cefa1032 937 if (remove) {
9f27bd70 938 nvme_unquiesce_admin_queue(&ctrl->ctrl);
cefa1032
CH
939 nvme_remove_admin_tag_set(&ctrl->ctrl);
940 }
941 nvme_rdma_destroy_admin_queue(ctrl);
75862c72
SG
942}
943
944static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
945 bool remove)
946{
947 if (ctrl->ctrl.queue_count > 1) {
9f27bd70 948 nvme_quiesce_io_queues(&ctrl->ctrl);
3017013d 949 nvme_sync_io_queues(&ctrl->ctrl);
75862c72 950 nvme_rdma_stop_io_queues(ctrl);
c4189d68 951 nvme_cancel_tagset(&ctrl->ctrl);
cefa1032 952 if (remove) {
9f27bd70 953 nvme_unquiesce_io_queues(&ctrl->ctrl);
cefa1032
CH
954 nvme_remove_io_tag_set(&ctrl->ctrl);
955 }
956 nvme_rdma_free_io_queues(ctrl);
75862c72
SG
957 }
958}
959
f7f70f4a
RL
960static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
961{
962 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
963
a1ae8d4d 964 flush_work(&ctrl->err_work);
f7f70f4a
RL
965 cancel_delayed_work_sync(&ctrl->reconnect_work);
966}
967
71102307
CH
968static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
969{
970 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
971
972 if (list_empty(&ctrl->list))
973 goto free_ctrl;
974
975 mutex_lock(&nvme_rdma_ctrl_mutex);
976 list_del(&ctrl->list);
977 mutex_unlock(&nvme_rdma_ctrl_mutex);
978
71102307
CH
979 nvmf_free_options(nctrl->opts);
980free_ctrl:
3d064101 981 kfree(ctrl->queues);
71102307
CH
982 kfree(ctrl);
983}
984
adfde7ed
HR
985static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl,
986 int status)
fd8563ce 987{
e6e7f7ac
KB
988 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
989
fd8563ce 990 /* If we are resetting/deleting then do nothing */
e6e7f7ac
KB
991 if (state != NVME_CTRL_CONNECTING) {
992 WARN_ON_ONCE(state == NVME_CTRL_NEW || state == NVME_CTRL_LIVE);
fd8563ce
SG
993 return;
994 }
995
adfde7ed 996 if (nvmf_should_reconnect(&ctrl->ctrl, status)) {
fd8563ce
SG
997 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
998 ctrl->ctrl.opts->reconnect_delay);
9a6327d2 999 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
fd8563ce
SG
1000 ctrl->ctrl.opts->reconnect_delay * HZ);
1001 } else {
12fa1304 1002 nvme_delete_ctrl(&ctrl->ctrl);
fd8563ce
SG
1003 }
1004}
1005
c66e2998 1006static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
71102307 1007{
13ce7e62 1008 int ret;
71102307 1009 bool changed;
ad178ba9 1010 u16 max_queue_size;
71102307 1011
c66e2998 1012 ret = nvme_rdma_configure_admin_queue(ctrl, new);
71102307 1013 if (ret)
c66e2998
SG
1014 return ret;
1015
1016 if (ctrl->ctrl.icdoff) {
09748122 1017 ret = -EOPNOTSUPP;
c66e2998
SG
1018 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1019 goto destroy_admin;
1020 }
1021
6399a0db 1022 if (!(ctrl->ctrl.sgls & NVME_CTRL_SGLS_KSDBDS)) {
09748122 1023 ret = -EOPNOTSUPP;
c66e2998
SG
1024 dev_err(ctrl->ctrl.device,
1025 "Mandatory keyed sgls are not supported!\n");
1026 goto destroy_admin;
1027 }
1028
1029 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
1030 dev_warn(ctrl->ctrl.device,
1031 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1032 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
1033 }
1034
ad178ba9
MG
1035 if (ctrl->ctrl.max_integrity_segments)
1036 max_queue_size = NVME_RDMA_MAX_METADATA_QUEUE_SIZE;
1037 else
1038 max_queue_size = NVME_RDMA_MAX_QUEUE_SIZE;
1039
1040 if (ctrl->ctrl.sqsize + 1 > max_queue_size) {
44c3c625 1041 dev_warn(ctrl->ctrl.device,
ad178ba9
MG
1042 "ctrl sqsize %u > max queue size %u, clamping down\n",
1043 ctrl->ctrl.sqsize + 1, max_queue_size);
1044 ctrl->ctrl.sqsize = max_queue_size - 1;
44c3c625
MG
1045 }
1046
c66e2998
SG
1047 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1048 dev_warn(ctrl->ctrl.device,
1049 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1050 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1051 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1052 }
71102307 1053
6399a0db 1054 if (ctrl->ctrl.sgls & NVME_CTRL_SGLS_SAOS)
64a741c1 1055 ctrl->use_inline_data = true;
71102307 1056
d858e5f0 1057 if (ctrl->ctrl.queue_count > 1) {
c66e2998 1058 ret = nvme_rdma_configure_io_queues(ctrl, new);
71102307 1059 if (ret)
5e1fe61d 1060 goto destroy_admin;
71102307
CH
1061 }
1062
1063 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
0a960afd 1064 if (!changed) {
96135862 1065 /*
ecca390e 1066 * state change failure is ok if we started ctrl delete,
96135862
IR
1067 * unless we're during creation of a new controller to
1068 * avoid races with teardown flow.
1069 */
e6e7f7ac
KB
1070 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
1071
1072 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
1073 state != NVME_CTRL_DELETING_NOIO);
96135862 1074 WARN_ON_ONCE(new);
c66e2998
SG
1075 ret = -EINVAL;
1076 goto destroy_io;
0a960afd
SG
1077 }
1078
d09f2b45 1079 nvme_start_ctrl(&ctrl->ctrl);
c66e2998
SG
1080 return 0;
1081
1082destroy_io:
958dc1d3 1083 if (ctrl->ctrl.queue_count > 1) {
9f27bd70 1084 nvme_quiesce_io_queues(&ctrl->ctrl);
958dc1d3
CL
1085 nvme_sync_io_queues(&ctrl->ctrl);
1086 nvme_rdma_stop_io_queues(ctrl);
1087 nvme_cancel_tagset(&ctrl->ctrl);
cefa1032
CH
1088 if (new)
1089 nvme_remove_io_tag_set(&ctrl->ctrl);
1090 nvme_rdma_free_io_queues(ctrl);
958dc1d3 1091 }
c66e2998 1092destroy_admin:
3af755a4 1093 nvme_stop_keep_alive(&ctrl->ctrl);
5858b687 1094 nvme_rdma_teardown_admin_queue(ctrl, new);
c66e2998
SG
1095 return ret;
1096}
1097
1098static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
1099{
1100 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
1101 struct nvme_rdma_ctrl, reconnect_work);
adfde7ed 1102 int ret;
c66e2998
SG
1103
1104 ++ctrl->ctrl.nr_reconnects;
1105
adfde7ed
HR
1106 ret = nvme_rdma_setup_ctrl(ctrl, false);
1107 if (ret)
c66e2998 1108 goto requeue;
71102307 1109
5e1fe61d
SG
1110 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
1111 ctrl->ctrl.nr_reconnects);
1112
1113 ctrl->ctrl.nr_reconnects = 0;
71102307
CH
1114
1115 return;
1116
71102307 1117requeue:
54a76c87
TI
1118 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d/%d\n",
1119 ctrl->ctrl.nr_reconnects, ctrl->ctrl.opts->max_reconnects);
adfde7ed 1120 nvme_rdma_reconnect_or_remove(ctrl, ret);
71102307
CH
1121}
1122
1123static void nvme_rdma_error_recovery_work(struct work_struct *work)
1124{
1125 struct nvme_rdma_ctrl *ctrl = container_of(work,
1126 struct nvme_rdma_ctrl, err_work);
1127
e4d753d7 1128 nvme_stop_keep_alive(&ctrl->ctrl);
b6bb1722 1129 flush_work(&ctrl->ctrl.async_event_work);
75862c72 1130 nvme_rdma_teardown_io_queues(ctrl, false);
9f27bd70 1131 nvme_unquiesce_io_queues(&ctrl->ctrl);
75862c72 1132 nvme_rdma_teardown_admin_queue(ctrl, false);
9f27bd70 1133 nvme_unquiesce_admin_queue(&ctrl->ctrl);
91c11d5f 1134 nvme_auth_stop(&ctrl->ctrl);
e818a5b4 1135
ad6a0a52 1136 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
ecca390e 1137 /* state change failure is ok if we started ctrl delete */
e6e7f7ac
KB
1138 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
1139
1140 WARN_ON_ONCE(state != NVME_CTRL_DELETING &&
1141 state != NVME_CTRL_DELETING_NOIO);
d5bf4b7f
SG
1142 return;
1143 }
1144
adfde7ed 1145 nvme_rdma_reconnect_or_remove(ctrl, 0);
71102307
CH
1146}
1147
1148static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
1149{
d5bf4b7f 1150 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
71102307
CH
1151 return;
1152
0475a8dc 1153 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
97b2512a 1154 queue_work(nvme_reset_wq, &ctrl->err_work);
71102307
CH
1155}
1156
8446546c
CH
1157static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1158{
1159 struct request *rq = blk_mq_rq_from_pdu(req);
1160
1161 if (!refcount_dec_and_test(&req->ref))
1162 return;
2eb81a33 1163 if (!nvme_try_complete_req(rq, req->status, req->result))
ff029451 1164 nvme_rdma_complete_rq(rq);
8446546c
CH
1165}
1166
71102307
CH
1167static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
1168 const char *op)
1169{
287f329e 1170 struct nvme_rdma_queue *queue = wc->qp->qp_context;
71102307
CH
1171 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1172
e6e7f7ac 1173 if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE)
71102307
CH
1174 dev_info(ctrl->ctrl.device,
1175 "%s for CQE 0x%p failed with status %s (%d)\n",
1176 op, wc->wr_cqe,
1177 ib_wc_status_msg(wc->status), wc->status);
1178 nvme_rdma_error_recovery(ctrl);
1179}
1180
1181static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
1182{
1183 if (unlikely(wc->status != IB_WC_SUCCESS))
1184 nvme_rdma_wr_error(cq, wc, "MEMREG");
1185}
1186
1187static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1188{
2f122e4f
SG
1189 struct nvme_rdma_request *req =
1190 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
2f122e4f 1191
8446546c 1192 if (unlikely(wc->status != IB_WC_SUCCESS))
71102307 1193 nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
8446546c
CH
1194 else
1195 nvme_rdma_end_request(req);
71102307
CH
1196}
1197
1198static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
1199 struct nvme_rdma_request *req)
1200{
71102307
CH
1201 struct ib_send_wr wr = {
1202 .opcode = IB_WR_LOCAL_INV,
1203 .next = NULL,
1204 .num_sge = 0,
2f122e4f 1205 .send_flags = IB_SEND_SIGNALED,
71102307
CH
1206 .ex.invalidate_rkey = req->mr->rkey,
1207 };
1208
1209 req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1210 wr.wr_cqe = &req->reg_cqe;
1211
45e3cc1a 1212 return ib_post_send(queue->qp, &wr, NULL);
71102307
CH
1213}
1214
4686af88
MG
1215static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
1216{
1217 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1218
1219 if (blk_integrity_rq(rq)) {
1220 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1221 req->metadata_sgl->nents, rq_dma_dir(rq));
1222 sg_free_table_chained(&req->metadata_sgl->sg_table,
1223 NVME_INLINE_METADATA_SG_CNT);
1224 }
1225
1226 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1227 rq_dma_dir(rq));
1228 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1229}
1230
71102307
CH
1231static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1232 struct request *rq)
1233{
1234 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307
CH
1235 struct nvme_rdma_device *dev = queue->device;
1236 struct ib_device *ibdev = dev->dev;
5ec5d3bd 1237 struct list_head *pool = &queue->qp->rdma_mrs;
71102307 1238
34e08191 1239 if (!blk_rq_nr_phys_segments(rq))
71102307
CH
1240 return;
1241
5ec5d3bd
MG
1242 if (req->use_sig_mr)
1243 pool = &queue->qp->sig_mrs;
1244
f41725bb 1245 if (req->mr) {
5ec5d3bd 1246 ib_mr_pool_put(queue->qp, pool, req->mr);
f41725bb
IR
1247 req->mr = NULL;
1248 }
1249
4686af88 1250 nvme_rdma_dma_unmap_req(ibdev, rq);
71102307
CH
1251}
1252
1253static int nvme_rdma_set_sg_null(struct nvme_command *c)
1254{
1255 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1256
1257 sg->addr = 0;
1258 put_unaligned_le24(0, sg->length);
1259 put_unaligned_le32(0, sg->key);
1260 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1261 return 0;
1262}
1263
1264static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
64a741c1
SW
1265 struct nvme_rdma_request *req, struct nvme_command *c,
1266 int count)
71102307
CH
1267{
1268 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
64a741c1 1269 struct ib_sge *sge = &req->sge[1];
12b2aaad 1270 struct scatterlist *sgl;
64a741c1
SW
1271 u32 len = 0;
1272 int i;
71102307 1273
12b2aaad 1274 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
64a741c1
SW
1275 sge->addr = sg_dma_address(sgl);
1276 sge->length = sg_dma_len(sgl);
1277 sge->lkey = queue->device->pd->local_dma_lkey;
1278 len += sge->length;
12b2aaad 1279 sge++;
64a741c1 1280 }
71102307
CH
1281
1282 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
64a741c1 1283 sg->length = cpu_to_le32(len);
71102307
CH
1284 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1285
64a741c1 1286 req->num_sge += count;
71102307
CH
1287 return 0;
1288}
1289
1290static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
1291 struct nvme_rdma_request *req, struct nvme_command *c)
1292{
1293 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1294
324d9e78
IR
1295 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
1296 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
11975e01 1297 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
71102307
CH
1298 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1299 return 0;
1300}
1301
1302static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
1303 struct nvme_rdma_request *req, struct nvme_command *c,
1304 int count)
1305{
1306 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1307 int nr;
1308
f41725bb
IR
1309 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
1310 if (WARN_ON_ONCE(!req->mr))
1311 return -EAGAIN;
1312
b925a2dc
MG
1313 /*
1314 * Align the MR to a 4K page size to match the ctrl page size and
1315 * the block virtual boundary.
1316 */
324d9e78
IR
1317 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
1318 SZ_4K);
a7b7c7a1 1319 if (unlikely(nr < count)) {
f41725bb
IR
1320 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
1321 req->mr = NULL;
71102307
CH
1322 if (nr < 0)
1323 return nr;
1324 return -EINVAL;
1325 }
1326
1327 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1328
1329 req->reg_cqe.done = nvme_rdma_memreg_done;
1330 memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1331 req->reg_wr.wr.opcode = IB_WR_REG_MR;
1332 req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1333 req->reg_wr.wr.num_sge = 0;
1334 req->reg_wr.mr = req->mr;
1335 req->reg_wr.key = req->mr->rkey;
1336 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1337 IB_ACCESS_REMOTE_READ |
1338 IB_ACCESS_REMOTE_WRITE;
1339
71102307
CH
1340 sg->addr = cpu_to_le64(req->mr->iova);
1341 put_unaligned_le24(req->mr->length, sg->length);
1342 put_unaligned_le32(req->mr->rkey, sg->key);
1343 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
1344 NVME_SGL_FMT_INVALIDATE;
1345
1346 return 0;
1347}
1348
5ec5d3bd
MG
1349static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
1350 struct nvme_command *cmd, struct ib_sig_domain *domain,
1351 u16 control, u8 pi_type)
1352{
1353 domain->sig_type = IB_SIG_TYPE_T10_DIF;
1354 domain->sig.dif.bg_type = IB_T10DIF_CRC;
1355 domain->sig.dif.pi_interval = 1 << bi->interval_exp;
1356 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
1357 if (control & NVME_RW_PRINFO_PRCHK_REF)
1358 domain->sig.dif.ref_remap = true;
1359
cead0b89
AG
1360 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat);
1361 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm);
5ec5d3bd
MG
1362 domain->sig.dif.app_escape = true;
1363 if (pi_type == NVME_NS_DPS_PI_TYPE3)
1364 domain->sig.dif.ref_escape = true;
1365}
1366
1367static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
1368 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
1369 u8 pi_type)
1370{
1371 u16 control = le16_to_cpu(cmd->rw.control);
1372
1373 memset(sig_attrs, 0, sizeof(*sig_attrs));
1374 if (control & NVME_RW_PRINFO_PRACT) {
1375 /* for WRITE_INSERT/READ_STRIP no memory domain */
1376 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
1377 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1378 pi_type);
1379 /* Clear the PRACT bit since HCA will generate/verify the PI */
1380 control &= ~NVME_RW_PRINFO_PRACT;
1381 cmd->rw.control = cpu_to_le16(control);
1382 } else {
1383 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
1384 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1385 pi_type);
1386 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
1387 pi_type);
1388 }
1389}
1390
1391static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
1392{
1393 *mask = 0;
1394 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
1395 *mask |= IB_SIG_CHECK_REFTAG;
1396 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
1397 *mask |= IB_SIG_CHECK_GUARD;
1398}
1399
1400static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
1401{
1402 if (unlikely(wc->status != IB_WC_SUCCESS))
1403 nvme_rdma_wr_error(cq, wc, "SIG");
1404}
1405
1406static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
1407 struct nvme_rdma_request *req, struct nvme_command *c,
1408 int count, int pi_count)
1409{
1410 struct nvme_rdma_sgl *sgl = &req->data_sgl;
1411 struct ib_reg_wr *wr = &req->reg_wr;
1412 struct request *rq = blk_mq_rq_from_pdu(req);
1413 struct nvme_ns *ns = rq->q->queuedata;
1414 struct bio *bio = rq->bio;
1415 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
15ade5bf
IR
1416 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
1417 u32 xfer_len;
5ec5d3bd
MG
1418 int nr;
1419
1420 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
1421 if (WARN_ON_ONCE(!req->mr))
1422 return -EAGAIN;
1423
1424 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
1425 req->metadata_sgl->sg_table.sgl, pi_count, NULL,
1426 SZ_4K);
1427 if (unlikely(nr))
1428 goto mr_put;
1429
15ade5bf 1430 nvme_rdma_set_sig_attrs(bi, c, req->mr->sig_attrs, ns->head->pi_type);
5ec5d3bd
MG
1431 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
1432
1433 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1434
1435 req->reg_cqe.done = nvme_rdma_sig_done;
1436 memset(wr, 0, sizeof(*wr));
1437 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
1438 wr->wr.wr_cqe = &req->reg_cqe;
1439 wr->wr.num_sge = 0;
1440 wr->wr.send_flags = 0;
1441 wr->mr = req->mr;
1442 wr->key = req->mr->rkey;
1443 wr->access = IB_ACCESS_LOCAL_WRITE |
1444 IB_ACCESS_REMOTE_READ |
1445 IB_ACCESS_REMOTE_WRITE;
1446
1447 sg->addr = cpu_to_le64(req->mr->iova);
15ade5bf
IR
1448 xfer_len = req->mr->length;
1449 /* Check if PI is added by the HW */
1450 if (!pi_count)
1451 xfer_len += (xfer_len >> bi->interval_exp) * ns->head->pi_size;
1452 put_unaligned_le24(xfer_len, sg->length);
5ec5d3bd
MG
1453 put_unaligned_le32(req->mr->rkey, sg->key);
1454 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1455
1456 return 0;
1457
1458mr_put:
1459 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
1460 req->mr = NULL;
1461 if (nr < 0)
1462 return nr;
1463 return -EINVAL;
1464}
1465
4686af88
MG
1466static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
1467 int *count, int *pi_count)
71102307
CH
1468{
1469 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
4686af88 1470 int ret;
71102307 1471
324d9e78
IR
1472 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
1473 ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
1474 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
38e18002 1475 NVME_INLINE_SG_CNT);
71102307
CH
1476 if (ret)
1477 return -ENOMEM;
1478
324d9e78
IR
1479 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1480 req->data_sgl.sg_table.sgl);
71102307 1481
4686af88
MG
1482 *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1483 req->data_sgl.nents, rq_dma_dir(rq));
1484 if (unlikely(*count <= 0)) {
94423a8f
MG
1485 ret = -EIO;
1486 goto out_free_table;
71102307
CH
1487 }
1488
5ec5d3bd
MG
1489 if (blk_integrity_rq(rq)) {
1490 req->metadata_sgl->sg_table.sgl =
1491 (struct scatterlist *)(req->metadata_sgl + 1);
1492 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
f4330766 1493 rq->nr_integrity_segments,
5ec5d3bd
MG
1494 req->metadata_sgl->sg_table.sgl,
1495 NVME_INLINE_METADATA_SG_CNT);
1496 if (unlikely(ret)) {
1497 ret = -ENOMEM;
1498 goto out_unmap_sg;
1499 }
1500
76c313f6
KB
1501 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
1502 req->metadata_sgl->sg_table.sgl);
4686af88
MG
1503 *pi_count = ib_dma_map_sg(ibdev,
1504 req->metadata_sgl->sg_table.sgl,
1505 req->metadata_sgl->nents,
1506 rq_dma_dir(rq));
1507 if (unlikely(*pi_count <= 0)) {
5ec5d3bd
MG
1508 ret = -EIO;
1509 goto out_free_pi_table;
1510 }
1511 }
1512
4686af88
MG
1513 return 0;
1514
1515out_free_pi_table:
1516 sg_free_table_chained(&req->metadata_sgl->sg_table,
1517 NVME_INLINE_METADATA_SG_CNT);
1518out_unmap_sg:
1519 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1520 rq_dma_dir(rq));
1521out_free_table:
1522 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
1523 return ret;
1524}
1525
1526static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
1527 struct request *rq, struct nvme_command *c)
1528{
1529 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1530 struct nvme_rdma_device *dev = queue->device;
1531 struct ib_device *ibdev = dev->dev;
1532 int pi_count = 0;
1533 int count, ret;
1534
1535 req->num_sge = 1;
1536 refcount_set(&req->ref, 2); /* send and recv completions */
1537
1538 c->common.flags |= NVME_CMD_SGL_METABUF;
1539
1540 if (!blk_rq_nr_phys_segments(rq))
1541 return nvme_rdma_set_sg_null(c);
1542
1543 ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
1544 if (unlikely(ret))
1545 return ret;
1546
5ec5d3bd
MG
1547 if (req->use_sig_mr) {
1548 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
1549 goto out;
1550 }
1551
64a741c1 1552 if (count <= dev->num_inline_segments) {
b131c61d 1553 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
64a741c1 1554 queue->ctrl->use_inline_data &&
b131c61d 1555 blk_rq_payload_bytes(rq) <=
94423a8f 1556 nvme_rdma_inline_data_size(queue)) {
64a741c1 1557 ret = nvme_rdma_map_sg_inline(queue, req, c, count);
94423a8f
MG
1558 goto out;
1559 }
71102307 1560
64a741c1 1561 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
94423a8f
MG
1562 ret = nvme_rdma_map_sg_single(queue, req, c);
1563 goto out;
1564 }
71102307
CH
1565 }
1566
94423a8f
MG
1567 ret = nvme_rdma_map_sg_fr(queue, req, c, count);
1568out:
1569 if (unlikely(ret))
4686af88 1570 goto out_dma_unmap_req;
94423a8f
MG
1571
1572 return 0;
1573
4686af88
MG
1574out_dma_unmap_req:
1575 nvme_rdma_dma_unmap_req(ibdev, rq);
94423a8f 1576 return ret;
71102307
CH
1577}
1578
1579static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1580{
4af7f7ff
SG
1581 struct nvme_rdma_qe *qe =
1582 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1583 struct nvme_rdma_request *req =
1584 container_of(qe, struct nvme_rdma_request, sqe);
4af7f7ff 1585
8446546c 1586 if (unlikely(wc->status != IB_WC_SUCCESS))
71102307 1587 nvme_rdma_wr_error(cq, wc, "SEND");
8446546c
CH
1588 else
1589 nvme_rdma_end_request(req);
71102307
CH
1590}
1591
1592static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1593 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
b4b591c8 1594 struct ib_send_wr *first)
71102307 1595{
45e3cc1a 1596 struct ib_send_wr wr;
71102307
CH
1597 int ret;
1598
1599 sge->addr = qe->dma;
a62315b8 1600 sge->length = sizeof(struct nvme_command);
71102307
CH
1601 sge->lkey = queue->device->pd->local_dma_lkey;
1602
71102307
CH
1603 wr.next = NULL;
1604 wr.wr_cqe = &qe->cqe;
1605 wr.sg_list = sge;
1606 wr.num_sge = num_sge;
1607 wr.opcode = IB_WR_SEND;
b4b591c8 1608 wr.send_flags = IB_SEND_SIGNALED;
71102307
CH
1609
1610 if (first)
1611 first->next = &wr;
1612 else
1613 first = &wr;
1614
45e3cc1a 1615 ret = ib_post_send(queue->qp, first, NULL);
a7b7c7a1 1616 if (unlikely(ret)) {
71102307
CH
1617 dev_err(queue->ctrl->ctrl.device,
1618 "%s failed with error code %d\n", __func__, ret);
1619 }
1620 return ret;
1621}
1622
1623static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1624 struct nvme_rdma_qe *qe)
1625{
45e3cc1a 1626 struct ib_recv_wr wr;
71102307
CH
1627 struct ib_sge list;
1628 int ret;
1629
1630 list.addr = qe->dma;
1631 list.length = sizeof(struct nvme_completion);
1632 list.lkey = queue->device->pd->local_dma_lkey;
1633
1634 qe->cqe.done = nvme_rdma_recv_done;
1635
1636 wr.next = NULL;
1637 wr.wr_cqe = &qe->cqe;
1638 wr.sg_list = &list;
1639 wr.num_sge = 1;
1640
45e3cc1a 1641 ret = ib_post_recv(queue->qp, &wr, NULL);
a7b7c7a1 1642 if (unlikely(ret)) {
71102307
CH
1643 dev_err(queue->ctrl->ctrl.device,
1644 "%s failed with error code %d\n", __func__, ret);
1645 }
1646 return ret;
1647}
1648
1649static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1650{
1651 u32 queue_idx = nvme_rdma_queue_idx(queue);
1652
1653 if (queue_idx == 0)
1654 return queue->ctrl->admin_tag_set.tags[queue_idx];
1655 return queue->ctrl->tag_set.tags[queue_idx - 1];
1656}
1657
b4b591c8
SG
1658static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
1659{
1660 if (unlikely(wc->status != IB_WC_SUCCESS))
1661 nvme_rdma_wr_error(cq, wc, "ASYNC");
1662}
1663
ad22c355 1664static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
71102307
CH
1665{
1666 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1667 struct nvme_rdma_queue *queue = &ctrl->queues[0];
1668 struct ib_device *dev = queue->device->dev;
1669 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1670 struct nvme_command *cmd = sqe->data;
1671 struct ib_sge sge;
1672 int ret;
1673
71102307
CH
1674 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1675
1676 memset(cmd, 0, sizeof(*cmd));
1677 cmd->common.opcode = nvme_admin_async_event;
38dabe21 1678 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
71102307
CH
1679 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1680 nvme_rdma_set_sg_null(cmd);
1681
b4b591c8
SG
1682 sqe->cqe.done = nvme_rdma_async_done;
1683
71102307
CH
1684 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1685 DMA_TO_DEVICE);
1686
b4b591c8 1687 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
71102307
CH
1688 WARN_ON_ONCE(ret);
1689}
1690
1052b8ac
JA
1691static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1692 struct nvme_completion *cqe, struct ib_wc *wc)
71102307 1693{
71102307
CH
1694 struct request *rq;
1695 struct nvme_rdma_request *req;
71102307 1696
e7006de6 1697 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
71102307
CH
1698 if (!rq) {
1699 dev_err(queue->ctrl->ctrl.device,
e7006de6 1700 "got bad command_id %#x on QP %#x\n",
71102307
CH
1701 cqe->command_id, queue->qp->qp_num);
1702 nvme_rdma_error_recovery(queue->ctrl);
1052b8ac 1703 return;
71102307
CH
1704 }
1705 req = blk_mq_rq_to_pdu(rq);
1706
4af7f7ff
SG
1707 req->status = cqe->status;
1708 req->result = cqe->result;
71102307 1709
3ef0279b 1710 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
a87da50f
CL
1711 if (unlikely(!req->mr ||
1712 wc->ex.invalidate_rkey != req->mr->rkey)) {
3ef0279b
SG
1713 dev_err(queue->ctrl->ctrl.device,
1714 "Bogus remote invalidation for rkey %#x\n",
a87da50f 1715 req->mr ? req->mr->rkey : 0);
3ef0279b
SG
1716 nvme_rdma_error_recovery(queue->ctrl);
1717 }
f41725bb 1718 } else if (req->mr) {
1052b8ac
JA
1719 int ret;
1720
2f122e4f
SG
1721 ret = nvme_rdma_inv_rkey(queue, req);
1722 if (unlikely(ret < 0)) {
1723 dev_err(queue->ctrl->ctrl.device,
1724 "Queueing INV WR for rkey %#x failed (%d)\n",
1725 req->mr->rkey, ret);
1726 nvme_rdma_error_recovery(queue->ctrl);
1727 }
1728 /* the local invalidation completion will end the request */
7a804c34 1729 return;
2f122e4f 1730 }
7a804c34
CH
1731
1732 nvme_rdma_end_request(req);
71102307
CH
1733}
1734
1052b8ac 1735static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
71102307
CH
1736{
1737 struct nvme_rdma_qe *qe =
1738 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
287f329e 1739 struct nvme_rdma_queue *queue = wc->qp->qp_context;
71102307
CH
1740 struct ib_device *ibdev = queue->device->dev;
1741 struct nvme_completion *cqe = qe->data;
1742 const size_t len = sizeof(struct nvme_completion);
71102307
CH
1743
1744 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1745 nvme_rdma_wr_error(cq, wc, "RECV");
1052b8ac 1746 return;
71102307
CH
1747 }
1748
25c1ca6e 1749 /* sanity checking for received data length */
1750 if (unlikely(wc->byte_len < len)) {
1751 dev_err(queue->ctrl->ctrl.device,
1752 "Unexpected nvme completion length(%d)\n", wc->byte_len);
1753 nvme_rdma_error_recovery(queue->ctrl);
1754 return;
1755 }
1756
71102307
CH
1757 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1758 /*
1759 * AEN requests are special as they don't time out and can
1760 * survive any kind of queue freeze and often don't respond to
1761 * aborts. We don't even bother to allocate a struct request
1762 * for them but rather special case them here.
1763 */
58a8df67
IR
1764 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
1765 cqe->command_id)))
7bf58533
CH
1766 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1767 &cqe->result);
71102307 1768 else
1052b8ac 1769 nvme_rdma_process_nvme_rsp(queue, cqe, wc);
71102307
CH
1770 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1771
1772 nvme_rdma_post_recv(queue, qe);
71102307
CH
1773}
1774
1775static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1776{
1777 int ret, i;
1778
1779 for (i = 0; i < queue->queue_size; i++) {
1780 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1781 if (ret)
9817d763 1782 return ret;
71102307
CH
1783 }
1784
1785 return 0;
71102307
CH
1786}
1787
1788static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1789 struct rdma_cm_event *ev)
1790{
7f03953c
SW
1791 struct rdma_cm_id *cm_id = queue->cm_id;
1792 int status = ev->status;
1793 const char *rej_msg;
1794 const struct nvme_rdma_cm_rej *rej_data;
1795 u8 rej_data_len;
1796
1797 rej_msg = rdma_reject_msg(cm_id, status);
1798 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1799
1800 if (rej_data && rej_data_len >= sizeof(u16)) {
1801 u16 sts = le16_to_cpu(rej_data->sts);
71102307
CH
1802
1803 dev_err(queue->ctrl->ctrl.device,
7f03953c
SW
1804 "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1805 status, rej_msg, sts, nvme_rdma_cm_msg(sts));
71102307
CH
1806 } else {
1807 dev_err(queue->ctrl->ctrl.device,
7f03953c 1808 "Connect rejected: status %d (%s).\n", status, rej_msg);
71102307
CH
1809 }
1810
1811 return -ECONNRESET;
1812}
1813
1814static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1815{
e63440d6 1816 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
71102307
CH
1817 int ret;
1818
ca6e95bb
SG
1819 ret = nvme_rdma_create_queue_ib(queue);
1820 if (ret)
1821 return ret;
71102307 1822
e63440d6
IR
1823 if (ctrl->opts->tos >= 0)
1824 rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
0525af71 1825 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS);
71102307 1826 if (ret) {
e63440d6 1827 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
71102307
CH
1828 queue->cm_error);
1829 goto out_destroy_queue;
1830 }
1831
1832 return 0;
1833
1834out_destroy_queue:
1835 nvme_rdma_destroy_queue_ib(queue);
71102307
CH
1836 return ret;
1837}
1838
1839static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1840{
1841 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1842 struct rdma_conn_param param = { };
0b857b44 1843 struct nvme_rdma_cm_req priv = { };
71102307
CH
1844 int ret;
1845
1846 param.qp_num = queue->qp->qp_num;
1847 param.flow_control = 1;
1848
1849 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
2ac17c28
SG
1850 /* maximum retry count */
1851 param.retry_count = 7;
71102307
CH
1852 param.rnr_retry_count = 7;
1853 param.private_data = &priv;
1854 param.private_data_len = sizeof(priv);
1855
1856 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1857 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
f994d9dc
JF
1858 /*
1859 * set the admin queue depth to the minimum size
1860 * specified by the Fabrics standard.
1861 */
1862 if (priv.qid == 0) {
7aa1f427
SG
1863 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
1864 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
f994d9dc 1865 } else {
c5af8654
JF
1866 /*
1867 * current interpretation of the fabrics spec
1868 * is at minimum you make hrqsize sqsize+1, or a
1869 * 1's based representation of sqsize.
1870 */
f994d9dc 1871 priv.hrqsize = cpu_to_le16(queue->queue_size);
c5af8654 1872 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
03c3d7c7
NC
1873 /* cntlid should only be set when creating an I/O queue */
1874 priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid);
f994d9dc 1875 }
71102307 1876
071ba4cc 1877 ret = rdma_connect_locked(queue->cm_id, &param);
71102307
CH
1878 if (ret) {
1879 dev_err(ctrl->ctrl.device,
071ba4cc 1880 "rdma_connect_locked failed (%d).\n", ret);
9817d763 1881 return ret;
71102307
CH
1882 }
1883
1884 return 0;
71102307
CH
1885}
1886
71102307
CH
1887static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1888 struct rdma_cm_event *ev)
1889{
1890 struct nvme_rdma_queue *queue = cm_id->context;
1891 int cm_error = 0;
1892
1893 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1894 rdma_event_msg(ev->event), ev->event,
1895 ev->status, cm_id);
1896
1897 switch (ev->event) {
1898 case RDMA_CM_EVENT_ADDR_RESOLVED:
1899 cm_error = nvme_rdma_addr_resolved(queue);
1900 break;
1901 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1902 cm_error = nvme_rdma_route_resolved(queue);
1903 break;
1904 case RDMA_CM_EVENT_ESTABLISHED:
1905 queue->cm_error = nvme_rdma_conn_established(queue);
1906 /* complete cm_done regardless of success/failure */
1907 complete(&queue->cm_done);
1908 return 0;
1909 case RDMA_CM_EVENT_REJECTED:
1910 cm_error = nvme_rdma_conn_rejected(queue, ev);
1911 break;
71102307
CH
1912 case RDMA_CM_EVENT_ROUTE_ERROR:
1913 case RDMA_CM_EVENT_CONNECT_ERROR:
1914 case RDMA_CM_EVENT_UNREACHABLE:
abf87d5e 1915 case RDMA_CM_EVENT_ADDR_ERROR:
71102307
CH
1916 dev_dbg(queue->ctrl->ctrl.device,
1917 "CM error event %d\n", ev->event);
1918 cm_error = -ECONNRESET;
1919 break;
1920 case RDMA_CM_EVENT_DISCONNECTED:
1921 case RDMA_CM_EVENT_ADDR_CHANGE:
1922 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1923 dev_dbg(queue->ctrl->ctrl.device,
1924 "disconnect received - connection closed\n");
1925 nvme_rdma_error_recovery(queue->ctrl);
1926 break;
1927 case RDMA_CM_EVENT_DEVICE_REMOVAL:
e87a911f
SW
1928 /* device removal is handled via the ib_client API */
1929 break;
71102307
CH
1930 default:
1931 dev_err(queue->ctrl->ctrl.device,
1932 "Unexpected RDMA CM event (%d)\n", ev->event);
1933 nvme_rdma_error_recovery(queue->ctrl);
1934 break;
1935 }
1936
1937 if (cm_error) {
1938 queue->cm_error = cm_error;
1939 complete(&queue->cm_done);
1940 }
1941
1942 return 0;
1943}
1944
0475a8dc
SG
1945static void nvme_rdma_complete_timed_out(struct request *rq)
1946{
1947 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1948 struct nvme_rdma_queue *queue = req->queue;
0475a8dc 1949
0475a8dc 1950 nvme_rdma_stop_queue(queue);
93ba75c9 1951 nvmf_complete_timed_out_request(rq);
0475a8dc
SG
1952}
1953
9bdb4833 1954static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
71102307
CH
1955{
1956 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
4c174e63
SG
1957 struct nvme_rdma_queue *queue = req->queue;
1958 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
7d23e836 1959 struct nvme_command *cmd = req->req.cmd;
a5c1a87c
MG
1960 int qid = nvme_rdma_queue_idx(queue);
1961
1962 dev_warn(ctrl->ctrl.device,
1963 "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
7d23e836
CS
1964 rq->tag, nvme_cid(rq), cmd->common.opcode,
1965 nvme_fabrics_opcode_str(qid, cmd), qid);
e62a538d 1966
e6e7f7ac 1967 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
4c174e63 1968 /*
0475a8dc
SG
1969 * If we are resetting, connecting or deleting we should
1970 * complete immediately because we may block controller
1971 * teardown or setup sequence
1972 * - ctrl disable/shutdown fabrics requests
1973 * - connect requests
1974 * - initialization admin requests
1975 * - I/O requests that entered after unquiescing and
1976 * the controller stopped responding
1977 *
1978 * All other requests should be cancelled by the error
1979 * recovery work, so it's fine that we fail it here.
4c174e63 1980 */
0475a8dc 1981 nvme_rdma_complete_timed_out(rq);
4c174e63
SG
1982 return BLK_EH_DONE;
1983 }
71102307 1984
0475a8dc
SG
1985 /*
1986 * LIVE state should trigger the normal error recovery which will
1987 * handle completing this request.
1988 */
4c174e63 1989 nvme_rdma_error_recovery(ctrl);
4c174e63 1990 return BLK_EH_RESET_TIMER;
71102307
CH
1991}
1992
fc17b653 1993static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
71102307
CH
1994 const struct blk_mq_queue_data *bd)
1995{
1996 struct nvme_ns *ns = hctx->queue->queuedata;
1997 struct nvme_rdma_queue *queue = hctx->driver_data;
1998 struct request *rq = bd->rq;
1999 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2000 struct nvme_rdma_qe *sqe = &req->sqe;
f4b9e6c9 2001 struct nvme_command *c = nvme_req(rq)->cmd;
71102307 2002 struct ib_device *dev;
3bc32bb1 2003 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
fc17b653
CH
2004 blk_status_t ret;
2005 int err;
71102307
CH
2006
2007 WARN_ON_ONCE(rq->tag < 0);
2008
a9715744
AC
2009 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2010 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
553cd9ef 2011
71102307 2012 dev = queue->device->dev;
62f99b62
MG
2013
2014 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
2015 sizeof(struct nvme_command),
2016 DMA_TO_DEVICE);
2017 err = ib_dma_mapping_error(dev, req->sqe.dma);
2018 if (unlikely(err))
2019 return BLK_STS_RESOURCE;
2020
71102307
CH
2021 ib_dma_sync_single_for_cpu(dev, sqe->dma,
2022 sizeof(struct nvme_command), DMA_TO_DEVICE);
2023
f4b9e6c9 2024 ret = nvme_setup_cmd(ns, rq);
fc17b653 2025 if (ret)
62f99b62 2026 goto unmap_qe;
71102307 2027
6887fc64 2028 nvme_start_request(rq);
71102307 2029
5ec5d3bd
MG
2030 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
2031 queue->pi_support &&
2032 (c->common.opcode == nvme_cmd_write ||
2033 c->common.opcode == nvme_cmd_read) &&
0372dd4e 2034 nvme_ns_has_pi(ns->head))
5ec5d3bd
MG
2035 req->use_sig_mr = true;
2036 else
2037 req->use_sig_mr = false;
2038
fc17b653 2039 err = nvme_rdma_map_data(queue, rq, c);
a7b7c7a1 2040 if (unlikely(err < 0)) {
71102307 2041 dev_err(queue->ctrl->ctrl.device,
fc17b653 2042 "Failed to map data (%d)\n", err);
71102307
CH
2043 goto err;
2044 }
2045
b4b591c8
SG
2046 sqe->cqe.done = nvme_rdma_send_done;
2047
71102307
CH
2048 ib_dma_sync_single_for_device(dev, sqe->dma,
2049 sizeof(struct nvme_command), DMA_TO_DEVICE);
2050
fc17b653 2051 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
f41725bb 2052 req->mr ? &req->reg_wr.wr : NULL);
16686f3a
MG
2053 if (unlikely(err))
2054 goto err_unmap;
71102307 2055
fc17b653 2056 return BLK_STS_OK;
62f99b62 2057
16686f3a
MG
2058err_unmap:
2059 nvme_rdma_unmap_data(queue, rq);
71102307 2060err:
62eca397
CL
2061 if (err == -EIO)
2062 ret = nvme_host_path_error(rq);
2063 else if (err == -ENOMEM || err == -EAGAIN)
62f99b62
MG
2064 ret = BLK_STS_RESOURCE;
2065 else
2066 ret = BLK_STS_IOERR;
16686f3a 2067 nvme_cleanup_cmd(rq);
62f99b62
MG
2068unmap_qe:
2069 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
2070 DMA_TO_DEVICE);
2071 return ret;
71102307
CH
2072}
2073
5a72e899 2074static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
ff8519f9
SG
2075{
2076 struct nvme_rdma_queue *queue = hctx->driver_data;
2077
2078 return ib_process_cq_direct(queue->ib_cq, -1);
2079}
2080
5ec5d3bd
MG
2081static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
2082{
2083 struct request *rq = blk_mq_rq_from_pdu(req);
2084 struct ib_mr_status mr_status;
2085 int ret;
2086
2087 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
2088 if (ret) {
2089 pr_err("ib_check_mr_status failed, ret %d\n", ret);
2090 nvme_req(rq)->status = NVME_SC_INVALID_PI;
2091 return;
2092 }
2093
2094 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
2095 switch (mr_status.sig_err.err_type) {
2096 case IB_SIG_BAD_GUARD:
2097 nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
2098 break;
2099 case IB_SIG_BAD_REFTAG:
2100 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
2101 break;
2102 case IB_SIG_BAD_APPTAG:
2103 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
2104 break;
2105 }
2106 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
2107 mr_status.sig_err.err_type, mr_status.sig_err.expected,
2108 mr_status.sig_err.actual);
2109 }
2110}
2111
71102307
CH
2112static void nvme_rdma_complete_rq(struct request *rq)
2113{
2114 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
62f99b62
MG
2115 struct nvme_rdma_queue *queue = req->queue;
2116 struct ib_device *ibdev = queue->device->dev;
71102307 2117
5ec5d3bd
MG
2118 if (req->use_sig_mr)
2119 nvme_rdma_check_pi_status(req);
2120
62f99b62
MG
2121 nvme_rdma_unmap_data(queue, rq);
2122 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
2123 DMA_TO_DEVICE);
77f02a7a 2124 nvme_complete_rq(rq);
71102307
CH
2125}
2126
a4e1d0b7 2127static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
0b36658c 2128{
2d60738c 2129 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data);
5651cd3c 2130
a249d306 2131 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues);
0b36658c
SG
2132}
2133
f363b089 2134static const struct blk_mq_ops nvme_rdma_mq_ops = {
71102307
CH
2135 .queue_rq = nvme_rdma_queue_rq,
2136 .complete = nvme_rdma_complete_rq,
71102307
CH
2137 .init_request = nvme_rdma_init_request,
2138 .exit_request = nvme_rdma_exit_request,
71102307 2139 .init_hctx = nvme_rdma_init_hctx,
71102307 2140 .timeout = nvme_rdma_timeout,
0b36658c 2141 .map_queues = nvme_rdma_map_queues,
ff8519f9 2142 .poll = nvme_rdma_poll,
71102307
CH
2143};
2144
f363b089 2145static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
71102307
CH
2146 .queue_rq = nvme_rdma_queue_rq,
2147 .complete = nvme_rdma_complete_rq,
385475ee
CH
2148 .init_request = nvme_rdma_init_request,
2149 .exit_request = nvme_rdma_exit_request,
71102307
CH
2150 .init_hctx = nvme_rdma_init_admin_hctx,
2151 .timeout = nvme_rdma_timeout,
2152};
2153
18398af2 2154static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
71102307 2155{
75862c72 2156 nvme_rdma_teardown_io_queues(ctrl, shutdown);
9f27bd70 2157 nvme_quiesce_admin_queue(&ctrl->ctrl);
285b6e9b 2158 nvme_disable_ctrl(&ctrl->ctrl, shutdown);
75862c72 2159 nvme_rdma_teardown_admin_queue(ctrl, shutdown);
71102307
CH
2160}
2161
c5017e85 2162static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2461a8dd 2163{
e9bc2587 2164 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
71102307
CH
2165}
2166
71102307
CH
2167static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
2168{
d86c4d8e
CH
2169 struct nvme_rdma_ctrl *ctrl =
2170 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
adfde7ed 2171 int ret;
71102307 2172
d09f2b45 2173 nvme_stop_ctrl(&ctrl->ctrl);
18398af2 2174 nvme_rdma_shutdown_ctrl(ctrl, false);
71102307 2175
ad6a0a52 2176 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
d5bf4b7f
SG
2177 /* state change failure should never happen */
2178 WARN_ON_ONCE(1);
2179 return;
2180 }
2181
adfde7ed
HR
2182 ret = nvme_rdma_setup_ctrl(ctrl, false);
2183 if (ret)
370ae6e4 2184 goto out_fail;
71102307 2185
71102307
CH
2186 return;
2187
370ae6e4 2188out_fail:
8000d1fd 2189 ++ctrl->ctrl.nr_reconnects;
adfde7ed 2190 nvme_rdma_reconnect_or_remove(ctrl, ret);
71102307
CH
2191}
2192
71102307
CH
2193static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
2194 .name = "rdma",
2195 .module = THIS_MODULE,
5ec5d3bd 2196 .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
71102307
CH
2197 .reg_read32 = nvmf_reg_read32,
2198 .reg_read64 = nvmf_reg_read64,
2199 .reg_write32 = nvmf_reg_write32,
210b1f65 2200 .subsystem_reset = nvmf_subsystem_reset,
71102307
CH
2201 .free_ctrl = nvme_rdma_free_ctrl,
2202 .submit_async_event = nvme_rdma_submit_async_event,
c5017e85 2203 .delete_ctrl = nvme_rdma_delete_ctrl,
71102307 2204 .get_address = nvmf_get_address,
f7f70f4a 2205 .stop_ctrl = nvme_rdma_stop_ctrl,
71102307
CH
2206};
2207
36e835f2
JS
2208/*
2209 * Fails a connection request if it matches an existing controller
2210 * (association) with the same tuple:
2211 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
2212 *
2213 * if local address is not specified in the request, it will match an
2214 * existing controller with all the other parameters the same and no
2215 * local port address specified as well.
2216 *
2217 * The ports don't need to be compared as they are intrinsically
2218 * already matched by the port pointers supplied.
2219 */
2220static bool
2221nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
2222{
2223 struct nvme_rdma_ctrl *ctrl;
2224 bool found = false;
2225
2226 mutex_lock(&nvme_rdma_ctrl_mutex);
2227 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
b7c7be6f 2228 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
36e835f2
JS
2229 if (found)
2230 break;
2231 }
2232 mutex_unlock(&nvme_rdma_ctrl_mutex);
2233
2234 return found;
2235}
2236
ea47c471 2237static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev,
71102307
CH
2238 struct nvmf_ctrl_options *opts)
2239{
2240 struct nvme_rdma_ctrl *ctrl;
2241 int ret;
71102307
CH
2242
2243 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2244 if (!ctrl)
2245 return ERR_PTR(-ENOMEM);
2246 ctrl->ctrl.opts = opts;
2247 INIT_LIST_HEAD(&ctrl->list);
2248
bb59b8e5
SG
2249 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2250 opts->trsvcid =
2251 kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
2252 if (!opts->trsvcid) {
2253 ret = -ENOMEM;
2254 goto out_free_ctrl;
2255 }
2256 opts->mask |= NVMF_OPT_TRSVCID;
2257 }
0928f9b4
SG
2258
2259 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
bb59b8e5 2260 opts->traddr, opts->trsvcid, &ctrl->addr);
71102307 2261 if (ret) {
bb59b8e5
SG
2262 pr_err("malformed address passed: %s:%s\n",
2263 opts->traddr, opts->trsvcid);
71102307
CH
2264 goto out_free_ctrl;
2265 }
2266
8f4e8dac 2267 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
0928f9b4
SG
2268 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2269 opts->host_traddr, NULL, &ctrl->src_addr);
8f4e8dac 2270 if (ret) {
0928f9b4 2271 pr_err("malformed src address passed: %s\n",
8f4e8dac
MG
2272 opts->host_traddr);
2273 goto out_free_ctrl;
2274 }
2275 }
2276
36e835f2
JS
2277 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
2278 ret = -EALREADY;
2279 goto out_free_ctrl;
2280 }
2281
71102307
CH
2282 INIT_DELAYED_WORK(&ctrl->reconnect_work,
2283 nvme_rdma_reconnect_ctrl_work);
2284 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
d86c4d8e 2285 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
71102307 2286
ff8519f9
SG
2287 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2288 opts->nr_poll_queues + 1;
c5af8654 2289 ctrl->ctrl.sqsize = opts->queue_size - 1;
71102307
CH
2290 ctrl->ctrl.kato = opts->kato;
2291
2292 ret = -ENOMEM;
d858e5f0 2293 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
71102307
CH
2294 GFP_KERNEL);
2295 if (!ctrl->queues)
3d064101
SG
2296 goto out_free_ctrl;
2297
2298 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
2299 0 /* no quirks, we're perfect! */);
2300 if (ret)
2301 goto out_kfree_queues;
71102307 2302
ea47c471
KB
2303 return ctrl;
2304
2305out_kfree_queues:
2306 kfree(ctrl->queues);
2307out_free_ctrl:
2308 kfree(ctrl);
2309 return ERR_PTR(ret);
2310}
2311
2312static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2313 struct nvmf_ctrl_options *opts)
2314{
2315 struct nvme_rdma_ctrl *ctrl;
2316 bool changed;
2317 int ret;
2318
2319 ctrl = nvme_rdma_alloc_ctrl(dev, opts);
2320 if (IS_ERR(ctrl))
2321 return ERR_CAST(ctrl);
2322
1a9e2181
KB
2323 ret = nvme_add_ctrl(&ctrl->ctrl);
2324 if (ret)
2325 goto out_put_ctrl;
2326
b754a32c
MG
2327 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
2328 WARN_ON_ONCE(!changed);
2329
c66e2998 2330 ret = nvme_rdma_setup_ctrl(ctrl, true);
71102307 2331 if (ret)
3d064101 2332 goto out_uninit_ctrl;
71102307 2333
d2045e6a
NY
2334 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs, hostnqn: %s\n",
2335 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn);
71102307 2336
71102307
CH
2337 mutex_lock(&nvme_rdma_ctrl_mutex);
2338 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
2339 mutex_unlock(&nvme_rdma_ctrl_mutex);
2340
71102307
CH
2341 return &ctrl->ctrl;
2342
71102307
CH
2343out_uninit_ctrl:
2344 nvme_uninit_ctrl(&ctrl->ctrl);
1a9e2181 2345out_put_ctrl:
71102307
CH
2346 nvme_put_ctrl(&ctrl->ctrl);
2347 if (ret > 0)
2348 ret = -EIO;
2349 return ERR_PTR(ret);
71102307
CH
2350}
2351
2352static struct nvmf_transport_ops nvme_rdma_transport = {
2353 .name = "rdma",
0de5cd36 2354 .module = THIS_MODULE,
71102307 2355 .required_opts = NVMF_OPT_TRADDR,
8f4e8dac 2356 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
b65bb777 2357 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
e63440d6
IR
2358 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2359 NVMF_OPT_TOS,
71102307
CH
2360 .create_ctrl = nvme_rdma_create_ctrl,
2361};
2362
e87a911f
SW
2363static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2364{
2365 struct nvme_rdma_ctrl *ctrl;
9bad0404
MG
2366 struct nvme_rdma_device *ndev;
2367 bool found = false;
2368
2369 mutex_lock(&device_list_mutex);
2370 list_for_each_entry(ndev, &device_list, entry) {
2371 if (ndev->dev == ib_device) {
2372 found = true;
2373 break;
2374 }
2375 }
2376 mutex_unlock(&device_list_mutex);
2377
2378 if (!found)
2379 return;
e87a911f
SW
2380
2381 /* Delete all controllers using this device */
2382 mutex_lock(&nvme_rdma_ctrl_mutex);
2383 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2384 if (ctrl->device->dev != ib_device)
2385 continue;
c5017e85 2386 nvme_delete_ctrl(&ctrl->ctrl);
e87a911f
SW
2387 }
2388 mutex_unlock(&nvme_rdma_ctrl_mutex);
2389
b227c59b 2390 flush_workqueue(nvme_delete_wq);
e87a911f
SW
2391}
2392
2393static struct ib_client nvme_rdma_ib_client = {
2394 .name = "nvme_rdma",
e87a911f
SW
2395 .remove = nvme_rdma_remove_one
2396};
2397
71102307
CH
2398static int __init nvme_rdma_init_module(void)
2399{
e87a911f
SW
2400 int ret;
2401
e87a911f 2402 ret = ib_register_client(&nvme_rdma_ib_client);
a56c79cf 2403 if (ret)
9a6327d2 2404 return ret;
a56c79cf
SG
2405
2406 ret = nvmf_register_transport(&nvme_rdma_transport);
2407 if (ret)
2408 goto err_unreg_client;
e87a911f 2409
a56c79cf 2410 return 0;
e87a911f 2411
a56c79cf
SG
2412err_unreg_client:
2413 ib_unregister_client(&nvme_rdma_ib_client);
a56c79cf 2414 return ret;
71102307
CH
2415}
2416
2417static void __exit nvme_rdma_cleanup_module(void)
2418{
9ad9e8d6
MG
2419 struct nvme_rdma_ctrl *ctrl;
2420
71102307 2421 nvmf_unregister_transport(&nvme_rdma_transport);
e87a911f 2422 ib_unregister_client(&nvme_rdma_ib_client);
9ad9e8d6
MG
2423
2424 mutex_lock(&nvme_rdma_ctrl_mutex);
2425 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2426 nvme_delete_ctrl(&ctrl->ctrl);
2427 mutex_unlock(&nvme_rdma_ctrl_mutex);
2428 flush_workqueue(nvme_delete_wq);
71102307
CH
2429}
2430
2431module_init(nvme_rdma_init_module);
2432module_exit(nvme_rdma_cleanup_module);
2433
92b0b0ff 2434MODULE_DESCRIPTION("NVMe host RDMA transport driver");
71102307 2435MODULE_LICENSE("GPL v2");
This page took 1.283779 seconds and 5 git commands to generate.