]> Git Repo - linux.git/blobdiff - drivers/nvme/host/rdma.c
Merge branch 'for-4.10/block' of git://git.kernel.dk/linux-block
[linux.git] / drivers / nvme / host / rdma.c
index 251101bf982f5f0a99ecf33bbeef50be2e2bf761..f42ab70ffa38b6a03fc9943cc8ddbfe1855bc3d0 100644 (file)
@@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags {
        NVME_RDMA_Q_CONNECTED = (1 << 0),
        NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
        NVME_RDMA_Q_DELETING = (1 << 2),
+       NVME_RDMA_Q_LIVE = (1 << 3),
 };
 
 struct nvme_rdma_queue {
@@ -626,10 +627,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl)
 
        for (i = 1; i < ctrl->queue_count; i++) {
                ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
-               if (ret)
-                       break;
+               if (ret) {
+                       dev_info(ctrl->ctrl.device,
+                               "failed to connect i/o queue: %d\n", ret);
+                       goto out_free_queues;
+               }
+               set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
        }
 
+       return 0;
+
+out_free_queues:
+       nvme_rdma_free_io_queues(ctrl);
        return ret;
 }
 
@@ -714,6 +723,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        if (ret)
                goto stop_admin_q;
 
+       set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
        ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (ret)
                goto stop_admin_q;
@@ -763,8 +774,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        nvme_stop_keep_alive(&ctrl->ctrl);
 
-       for (i = 0; i < ctrl->queue_count; i++)
+       for (i = 0; i < ctrl->queue_count; i++) {
                clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags);
+               clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags);
+       }
 
        if (ctrl->queue_count > 1)
                nvme_stop_queues(&ctrl->ctrl);
@@ -1372,6 +1385,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
        return BLK_EH_HANDLED;
 }
 
+/*
+ * We cannot accept any other command until the Connect command has completed.
+ */
+static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+               struct request *rq)
+{
+       if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
+               struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+
+               if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
+                   cmd->common.opcode != nvme_fabrics_command ||
+                   cmd->fabrics.fctype != nvme_fabrics_type_connect)
+                       return false;
+       }
+
+       return true;
+}
+
 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
@@ -1388,6 +1419,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        WARN_ON_ONCE(rq->tag < 0);
 
+       if (!nvme_rdma_queue_is_ready(queue, rq))
+               return BLK_MQ_RQ_QUEUE_BUSY;
+
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
@@ -1537,6 +1571,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
        if (error)
                goto out_cleanup_queue;
 
+       set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
+
        error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
        if (error) {
                dev_err(ctrl->ctrl.device,
This page took 0.034012 seconds and 4 git commands to generate.