1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe PCI Endpoint Function target driver.
5 * Copyright (c) 2024, Western Digital Corporation or its affiliates.
7 * REDS Institute, HEIG-VD, HES-SO, Switzerland
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
14 #include <linux/mempool.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/nvme.h>
18 #include <linux/pci_ids.h>
19 #include <linux/pci-epc.h>
20 #include <linux/pci-epf.h>
21 #include <linux/pci_regs.h>
22 #include <linux/slab.h>
26 static LIST_HEAD(nvmet_pci_epf_ports);
27 static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
30 * Default and maximum allowed data transfer size. For the default,
31 * allow up to 128 page-sized segments. For the maximum allowed,
32 * use 4 times the default (which is completely arbitrary).
34 #define NVMET_PCI_EPF_MAX_SEGS 128
35 #define NVMET_PCI_EPF_MDTS_KB \
36 (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10))
37 #define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4)
40 * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an
41 * interrupt vector to the host. This default 8 is completely arbitrary and can
42 * be changed by the host with a nvme_set_features command.
44 #define NVMET_PCI_EPF_IV_THRESHOLD 8
47 * BAR CC register and SQ polling intervals.
49 #define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5)
50 #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
51 #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
54 * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ.
56 #define NVMET_PCI_EPF_SQ_AB 8
59 * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ
60 * is full, in which case we retry the CQ processing after this interval.
62 #define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1)
64 enum nvmet_pci_epf_queue_flags {
65 NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */
66 NVMET_PCI_EPF_Q_LIVE, /* The queue is live */
67 NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */
71 * IRQ vector descriptor.
73 struct nvmet_pci_epf_irq_vector {
80 struct nvmet_pci_epf_queue {
82 struct nvmet_sq nvme_sq;
83 struct nvmet_cq nvme_cq;
85 struct nvmet_pci_epf_ctrl *ctrl;
90 struct pci_epc_map pci_map;
102 struct nvmet_pci_epf_irq_vector *iv;
103 struct workqueue_struct *iod_wq;
104 struct delayed_work work;
106 struct list_head list;
110 * PCI Root Complex (RC) address data segment for mapping an admin or
111 * I/O command buffer @buf of @length bytes to the PCI address @pci_addr.
113 struct nvmet_pci_epf_segment {
120 * Command descriptors.
122 struct nvmet_pci_epf_iod {
123 struct list_head link;
125 struct nvmet_req req;
126 struct nvme_command cmd;
127 struct nvme_completion cqe;
130 struct nvmet_pci_epf_ctrl *ctrl;
132 struct nvmet_pci_epf_queue *sq;
133 struct nvmet_pci_epf_queue *cq;
135 /* Data transfer size and direction for the command. */
137 enum dma_data_direction dma_dir;
140 * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we
141 * use only @data_seg. Otherwise, the array of segments @data_segs is
142 * allocated to manage multiple PCI address data segments. @data_sgl and
143 * @data_sgt are used to setup the command request for execution by the
146 unsigned int nr_data_segs;
147 struct nvmet_pci_epf_segment data_seg;
148 struct nvmet_pci_epf_segment *data_segs;
149 struct scatterlist data_sgl;
150 struct sg_table data_sgt;
152 struct work_struct work;
153 struct completion done;
157 * PCI target controller private data.
159 struct nvmet_pci_epf_ctrl {
160 struct nvmet_pci_epf *nvme_epf;
161 struct nvmet_port *port;
162 struct nvmet_ctrl *tctrl;
165 unsigned int nr_queues;
166 struct nvmet_pci_epf_queue *sq;
167 struct nvmet_pci_epf_queue *cq;
185 struct delayed_work poll_cc;
186 struct delayed_work poll_sqs;
188 struct mutex irq_lock;
189 struct nvmet_pci_epf_irq_vector *irq_vectors;
190 unsigned int irq_vector_threshold;
197 * PCI EPF driver private data.
199 struct nvmet_pci_epf {
202 const struct pci_epc_features *epc_features;
205 size_t msix_table_offset;
207 unsigned int irq_type;
208 unsigned int nr_vectors;
210 struct nvmet_pci_epf_ctrl ctrl;
213 struct dma_chan *dma_tx_chan;
214 struct mutex dma_tx_lock;
215 struct dma_chan *dma_rx_chan;
216 struct mutex dma_rx_lock;
218 struct mutex mmio_lock;
220 /* PCI endpoint function configfs attributes. */
221 struct config_group group;
223 char subsysnqn[NVMF_NQN_SIZE];
224 unsigned int mdts_kb;
227 static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl,
230 __le32 *bar_reg = ctrl->bar + off;
232 return le32_to_cpu(READ_ONCE(*bar_reg));
235 static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl,
238 __le32 *bar_reg = ctrl->bar + off;
240 WRITE_ONCE(*bar_reg, cpu_to_le32(val));
243 static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl,
246 return (u64)nvmet_pci_epf_bar_read32(ctrl, off) |
247 ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32);
250 static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl,
253 nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF);
254 nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF);
257 static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf,
258 u64 pci_addr, size_t size, struct pci_epc_map *map)
260 struct pci_epf *epf = nvme_epf->epf;
262 return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
263 pci_addr, size, map);
266 static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf,
267 struct pci_epc_map *map)
269 struct pci_epf *epf = nvme_epf->epf;
271 pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map);
274 struct nvmet_pci_epf_dma_filter {
279 static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg)
281 struct nvmet_pci_epf_dma_filter *filter = arg;
282 struct dma_slave_caps caps;
284 memset(&caps, 0, sizeof(caps));
285 dma_get_slave_caps(chan, &caps);
287 return chan->device->dev == filter->dev &&
288 (filter->dma_mask & caps.directions);
291 static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf)
293 struct pci_epf *epf = nvme_epf->epf;
294 struct device *dev = &epf->dev;
295 struct nvmet_pci_epf_dma_filter filter;
296 struct dma_chan *chan;
299 mutex_init(&nvme_epf->dma_rx_lock);
300 mutex_init(&nvme_epf->dma_tx_lock);
303 dma_cap_set(DMA_SLAVE, mask);
305 filter.dev = epf->epc->dev.parent;
306 filter.dma_mask = BIT(DMA_DEV_TO_MEM);
308 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
312 nvme_epf->dma_rx_chan = chan;
314 filter.dma_mask = BIT(DMA_MEM_TO_DEV);
315 chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter);
319 nvme_epf->dma_tx_chan = chan;
321 nvme_epf->dma_enabled = true;
323 dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n",
325 dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
327 dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n",
329 dma_get_max_seg_size(dmaengine_get_dma_device(chan)));
334 dma_release_channel(nvme_epf->dma_rx_chan);
335 nvme_epf->dma_rx_chan = NULL;
338 mutex_destroy(&nvme_epf->dma_rx_lock);
339 mutex_destroy(&nvme_epf->dma_tx_lock);
340 nvme_epf->dma_enabled = false;
342 dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n");
345 static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf)
347 if (!nvme_epf->dma_enabled)
350 dma_release_channel(nvme_epf->dma_tx_chan);
351 nvme_epf->dma_tx_chan = NULL;
352 dma_release_channel(nvme_epf->dma_rx_chan);
353 nvme_epf->dma_rx_chan = NULL;
354 mutex_destroy(&nvme_epf->dma_rx_lock);
355 mutex_destroy(&nvme_epf->dma_tx_lock);
356 nvme_epf->dma_enabled = false;
359 static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf,
360 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
362 struct pci_epf *epf = nvme_epf->epf;
363 struct dma_async_tx_descriptor *desc;
364 struct dma_slave_config sconf = {};
365 struct device *dev = &epf->dev;
366 struct device *dma_dev;
367 struct dma_chan *chan;
374 case DMA_FROM_DEVICE:
375 lock = &nvme_epf->dma_rx_lock;
376 chan = nvme_epf->dma_rx_chan;
377 sconf.direction = DMA_DEV_TO_MEM;
378 sconf.src_addr = seg->pci_addr;
381 lock = &nvme_epf->dma_tx_lock;
382 chan = nvme_epf->dma_tx_chan;
383 sconf.direction = DMA_MEM_TO_DEV;
384 sconf.dst_addr = seg->pci_addr;
392 dma_dev = dmaengine_get_dma_device(chan);
393 dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir);
394 ret = dma_mapping_error(dma_dev, dma_addr);
398 ret = dmaengine_slave_config(chan, &sconf);
400 dev_err(dev, "Failed to configure DMA channel\n");
404 desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length,
405 sconf.direction, DMA_CTRL_ACK);
407 dev_err(dev, "Failed to prepare DMA\n");
412 cookie = dmaengine_submit(desc);
413 ret = dma_submit_error(cookie);
415 dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret);
419 if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) {
420 dev_err(dev, "DMA transfer failed\n");
424 dmaengine_terminate_sync(chan);
427 dma_unmap_single(dma_dev, dma_addr, seg->length, dir);
435 static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf,
436 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
438 u64 pci_addr = seg->pci_addr;
439 u32 length = seg->length;
440 void *buf = seg->buf;
441 struct pci_epc_map map;
445 * Note: MMIO transfers do not need serialization but this is a
446 * simple way to avoid using too many mapping windows.
448 mutex_lock(&nvme_epf->mmio_lock);
451 ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map);
456 case DMA_FROM_DEVICE:
457 memcpy_fromio(buf, map.virt_addr, map.pci_size);
460 memcpy_toio(map.virt_addr, buf, map.pci_size);
467 pci_addr += map.pci_size;
469 length -= map.pci_size;
471 nvmet_pci_epf_mem_unmap(nvme_epf, &map);
475 mutex_unlock(&nvme_epf->mmio_lock);
480 static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf,
481 struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir)
483 if (nvme_epf->dma_enabled)
484 return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir);
486 return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir);
489 static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl,
490 void *buf, u64 pci_addr, u32 length,
491 enum dma_data_direction dir)
493 struct nvmet_pci_epf_segment seg = {
495 .pci_addr = pci_addr,
499 return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir);
502 static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
504 ctrl->irq_vectors = kcalloc(ctrl->nr_queues,
505 sizeof(struct nvmet_pci_epf_irq_vector),
507 if (!ctrl->irq_vectors)
510 mutex_init(&ctrl->irq_lock);
515 static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl)
517 if (ctrl->irq_vectors) {
518 mutex_destroy(&ctrl->irq_lock);
519 kfree(ctrl->irq_vectors);
520 ctrl->irq_vectors = NULL;
524 static struct nvmet_pci_epf_irq_vector *
525 nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
527 struct nvmet_pci_epf_irq_vector *iv;
530 lockdep_assert_held(&ctrl->irq_lock);
532 for (i = 0; i < ctrl->nr_queues; i++) {
533 iv = &ctrl->irq_vectors[i];
534 if (iv->ref && iv->vector == vector)
541 static struct nvmet_pci_epf_irq_vector *
542 nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector)
544 struct nvmet_pci_epf_irq_vector *iv;
547 mutex_lock(&ctrl->irq_lock);
549 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
555 for (i = 0; i < ctrl->nr_queues; i++) {
556 iv = &ctrl->irq_vectors[i];
561 if (WARN_ON_ONCE(!iv))
569 mutex_unlock(&ctrl->irq_lock);
574 static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl,
577 struct nvmet_pci_epf_irq_vector *iv;
579 mutex_lock(&ctrl->irq_lock);
581 iv = nvmet_pci_epf_find_irq_vector(ctrl, vector);
590 mutex_unlock(&ctrl->irq_lock);
593 static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
594 struct nvmet_pci_epf_queue *cq, bool force)
596 struct nvmet_pci_epf_irq_vector *iv = cq->iv;
599 if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
602 /* IRQ coalescing for the admin queue is not allowed. */
610 ret = iv->nr_irqs > 0;
613 ret = iv->nr_irqs >= ctrl->irq_vector_threshold;
621 static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl,
622 struct nvmet_pci_epf_queue *cq, bool force)
624 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
625 struct pci_epf *epf = nvme_epf->epf;
628 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
631 mutex_lock(&ctrl->irq_lock);
633 if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force))
636 switch (nvme_epf->irq_type) {
639 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
640 nvme_epf->irq_type, cq->vector + 1);
644 * If we got an error, it is likely because the host is using
645 * legacy IRQs (e.g. BIOS, grub).
649 ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no,
659 dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret);
662 mutex_unlock(&ctrl->irq_lock);
665 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod)
667 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode);
670 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work);
672 static struct nvmet_pci_epf_iod *
673 nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq)
675 struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl;
676 struct nvmet_pci_epf_iod *iod;
678 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL);
682 memset(iod, 0, sizeof(*iod));
683 iod->req.cmd = &iod->cmd;
684 iod->req.cqe = &iod->cqe;
685 iod->req.port = ctrl->port;
688 iod->cq = &ctrl->cq[sq->qid];
689 INIT_LIST_HEAD(&iod->link);
690 iod->dma_dir = DMA_NONE;
691 INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work);
692 init_completion(&iod->done);
698 * Allocate or grow a command table of PCI segments.
700 static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod,
703 struct nvmet_pci_epf_segment *segs;
704 int nr_segs = iod->nr_data_segs + nsegs;
706 segs = krealloc(iod->data_segs,
707 nr_segs * sizeof(struct nvmet_pci_epf_segment),
708 GFP_KERNEL | __GFP_ZERO);
712 iod->nr_data_segs = nr_segs;
713 iod->data_segs = segs;
718 static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod)
722 if (iod->data_segs) {
723 for (i = 0; i < iod->nr_data_segs; i++)
724 kfree(iod->data_segs[i].buf);
725 if (iod->data_segs != &iod->data_seg)
726 kfree(iod->data_segs);
728 if (iod->data_sgt.nents > 1)
729 sg_free_table(&iod->data_sgt);
730 mempool_free(iod, &iod->ctrl->iod_pool);
733 static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod)
735 struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf;
736 struct nvmet_pci_epf_segment *seg = &iod->data_segs[0];
739 /* Split the data transfer according to the PCI segments. */
740 for (i = 0; i < iod->nr_data_segs; i++, seg++) {
741 ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir);
743 iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR;
751 static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl,
754 return prp & ctrl->mps_mask;
757 static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl,
760 return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp);
764 * Transfer a PRP list from the host and return the number of prps.
766 static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp,
767 size_t xfer_len, __le64 *prps)
769 size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift;
774 * Compute the number of PRPs required for the number of bytes to
775 * transfer (xfer_len). If this number overflows the memory page size
776 * with the PRP list pointer specified, only return the space available
777 * in the memory page, the last PRP in there will be a PRP list pointer
778 * to the remaining PRPs.
780 length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3);
781 ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE);
788 static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl,
789 struct nvmet_pci_epf_iod *iod)
791 struct nvme_command *cmd = &iod->cmd;
792 struct nvmet_pci_epf_segment *seg;
793 size_t size = 0, ofst, prp_size, xfer_len;
794 size_t transfer_len = iod->data_len;
795 int nr_segs, nr_prps = 0;
800 prps = kzalloc(ctrl->mps, GFP_KERNEL);
805 * Allocate PCI segments for the command: this considers the worst case
806 * scenario where all prps are discontiguous, so get as many segments
807 * as we can have prps. In practice, most of the time, we will have
808 * far less PCI segments than prps.
810 prp = le64_to_cpu(cmd->common.dptr.prp1);
812 goto err_invalid_field;
814 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp);
815 nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift;
817 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
821 /* Set the first segment using prp1. */
822 seg = &iod->data_segs[0];
824 seg->length = nvmet_pci_epf_prp_size(ctrl, prp);
827 pci_addr = prp + size;
831 * Now build the PCI address segments using the PRP lists, starting
834 prp = le64_to_cpu(cmd->common.dptr.prp2);
836 goto err_invalid_field;
838 while (size < transfer_len) {
839 xfer_len = transfer_len - size;
842 nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp,
852 prp = le64_to_cpu(prps[i]);
854 goto err_invalid_field;
856 /* Did we reach the last PRP entry of the list? */
857 if (xfer_len > ctrl->mps && i == nr_prps - 1) {
858 /* We need more PRPs: PRP is a list pointer. */
863 /* Only the first PRP is allowed to have an offset. */
864 if (nvmet_pci_epf_prp_ofst(ctrl, prp))
865 goto err_invalid_offset;
867 if (prp != pci_addr) {
868 /* Discontiguous prp: new segment. */
870 if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs))
879 prp_size = min_t(size_t, ctrl->mps, xfer_len);
880 seg->length += prp_size;
881 pci_addr += prp_size;
887 iod->nr_data_segs = nr_segs;
890 if (size != transfer_len) {
892 "PRPs transfer length mismatch: got %zu B, need %zu B\n",
902 dev_err(ctrl->dev, "PRPs list invalid offset\n");
903 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
907 dev_err(ctrl->dev, "PRPs list invalid field\n");
908 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
912 dev_err(ctrl->dev, "PRPs list internal error\n");
913 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
920 static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl,
921 struct nvmet_pci_epf_iod *iod)
923 struct nvme_command *cmd = &iod->cmd;
924 size_t transfer_len = iod->data_len;
925 int ret, nr_segs = 1;
929 prp1 = le64_to_cpu(cmd->common.dptr.prp1);
930 prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1);
932 /* For commands crossing a page boundary, we should have prp2. */
933 if (transfer_len > prp1_size) {
934 prp2 = le64_to_cpu(cmd->common.dptr.prp2);
936 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
939 if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) {
941 NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
944 if (prp2 != prp1 + prp1_size)
949 iod->nr_data_segs = 1;
950 iod->data_segs = &iod->data_seg;
951 iod->data_segs[0].pci_addr = prp1;
952 iod->data_segs[0].length = transfer_len;
956 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs);
958 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
962 iod->data_segs[0].pci_addr = prp1;
963 iod->data_segs[0].length = prp1_size;
964 iod->data_segs[1].pci_addr = prp2;
965 iod->data_segs[1].length = transfer_len - prp1_size;
970 static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod)
972 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
973 u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1);
976 /* Get the PCI address segments for the command using its PRPs. */
977 ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1);
979 iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR;
983 if (iod->data_len + ofst <= ctrl->mps * 2)
984 return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod);
986 return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod);
990 * Transfer an SGL segment from the host and return the number of data
991 * descriptors and the next segment descriptor, if any.
993 static struct nvme_sgl_desc *
994 nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl,
995 struct nvme_sgl_desc *desc, unsigned int *nr_sgls)
997 struct nvme_sgl_desc *sgls;
998 u32 length = le32_to_cpu(desc->length);
1002 buf = kmalloc(length, GFP_KERNEL);
1006 ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length,
1014 nr_descs = length / sizeof(struct nvme_sgl_desc);
1015 if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) ||
1016 sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1018 * We have another SGL segment following this one: do not count
1019 * it as a regular data SGL descriptor and return it to the
1022 *desc = sgls[nr_descs - 1];
1025 /* We do not have another SGL segment after this one. */
1029 *nr_sgls = nr_descs;
1034 static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl,
1035 struct nvmet_pci_epf_iod *iod)
1037 struct nvme_command *cmd = &iod->cmd;
1038 struct nvme_sgl_desc seg = cmd->common.dptr.sgl;
1039 struct nvme_sgl_desc *sgls = NULL;
1040 int n = 0, i, nr_sgls;
1044 * We do not support inline data nor keyed SGLs, so we should be seeing
1045 * only segment descriptors.
1047 if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) &&
1048 seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) {
1049 iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR;
1053 while (seg.length) {
1054 sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls);
1056 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1060 /* Grow the PCI segment table as needed. */
1061 ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls);
1063 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1068 * Parse the SGL descriptors to build the PCI segment table,
1069 * checking the descriptor type as we go.
1071 for (i = 0; i < nr_sgls; i++) {
1072 if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) {
1073 iod->status = NVME_SC_SGL_INVALID_TYPE |
1077 iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr);
1078 iod->data_segs[n].length = le32_to_cpu(sgls[i].length);
1086 if (iod->status != NVME_SC_SUCCESS) {
1094 static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod)
1096 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1097 struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl;
1099 if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) {
1100 /* Single data descriptor case. */
1101 iod->nr_data_segs = 1;
1102 iod->data_segs = &iod->data_seg;
1103 iod->data_seg.pci_addr = le64_to_cpu(sgl->addr);
1104 iod->data_seg.length = le32_to_cpu(sgl->length);
1108 return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod);
1111 static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod)
1113 struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl;
1114 struct nvmet_req *req = &iod->req;
1115 struct nvmet_pci_epf_segment *seg;
1116 struct scatterlist *sg;
1119 if (iod->data_len > ctrl->mdts) {
1120 iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1125 * Get the PCI address segments for the command data buffer using either
1128 if (iod->cmd.common.flags & NVME_CMD_SGL_ALL)
1129 ret = nvmet_pci_epf_iod_parse_sgls(iod);
1131 ret = nvmet_pci_epf_iod_parse_prps(iod);
1135 /* Get a command buffer using SGLs matching the PCI segments. */
1136 if (iod->nr_data_segs == 1) {
1137 sg_init_table(&iod->data_sgl, 1);
1138 iod->data_sgt.sgl = &iod->data_sgl;
1139 iod->data_sgt.nents = 1;
1140 iod->data_sgt.orig_nents = 1;
1142 ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs,
1148 for_each_sgtable_sg(&iod->data_sgt, sg, i) {
1149 seg = &iod->data_segs[i];
1150 seg->buf = kmalloc(seg->length, GFP_KERNEL);
1153 sg_set_buf(sg, seg->buf, seg->length);
1156 req->transfer_len = iod->data_len;
1157 req->sg = iod->data_sgt.sgl;
1158 req->sg_cnt = iod->data_sgt.nents;
1163 iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1167 static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod)
1169 struct nvmet_pci_epf_queue *cq = iod->cq;
1170 unsigned long flags;
1172 /* Print an error message for failed commands, except AENs. */
1173 iod->status = le16_to_cpu(iod->cqe.status) >> 1;
1174 if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event)
1175 dev_err(iod->ctrl->dev,
1176 "CQ[%d]: Command %s (0x%x) status 0x%0x\n",
1177 iod->sq->qid, nvmet_pci_epf_iod_name(iod),
1178 iod->cmd.common.opcode, iod->status);
1181 * Add the command to the list of completed commands and schedule the
1184 spin_lock_irqsave(&cq->lock, flags);
1185 list_add_tail(&iod->link, &cq->list);
1186 queue_delayed_work(system_highpri_wq, &cq->work, 0);
1187 spin_unlock_irqrestore(&cq->lock, flags);
1190 static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue)
1192 struct nvmet_pci_epf_iod *iod;
1193 unsigned long flags;
1195 spin_lock_irqsave(&queue->lock, flags);
1196 while (!list_empty(&queue->list)) {
1197 iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod,
1199 list_del_init(&iod->link);
1200 nvmet_pci_epf_free_iod(iod);
1202 spin_unlock_irqrestore(&queue->lock, flags);
1205 static int nvmet_pci_epf_add_port(struct nvmet_port *port)
1207 mutex_lock(&nvmet_pci_epf_ports_mutex);
1208 list_add_tail(&port->entry, &nvmet_pci_epf_ports);
1209 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1213 static void nvmet_pci_epf_remove_port(struct nvmet_port *port)
1215 mutex_lock(&nvmet_pci_epf_ports_mutex);
1216 list_del_init(&port->entry);
1217 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1220 static struct nvmet_port *
1221 nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid)
1223 struct nvmet_port *p, *port = NULL;
1225 mutex_lock(&nvmet_pci_epf_ports_mutex);
1226 list_for_each_entry(p, &nvmet_pci_epf_ports, entry) {
1227 if (p->disc_addr.portid == portid) {
1232 mutex_unlock(&nvmet_pci_epf_ports_mutex);
1237 static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
1239 struct nvmet_pci_epf_iod *iod =
1240 container_of(req, struct nvmet_pci_epf_iod, req);
1242 iod->status = le16_to_cpu(req->cqe->status) >> 1;
1244 /* If we have no data to transfer, directly complete the command. */
1245 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
1246 nvmet_pci_epf_complete_iod(iod);
1250 complete(&iod->done);
1253 static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl)
1255 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1256 int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12;
1258 return ilog2(ctrl->mdts) - page_shift;
1261 static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
1262 u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector)
1264 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1265 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1268 if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1269 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1271 if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1272 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1274 if (flags & NVME_CQ_IRQ_ENABLED)
1275 set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
1277 cq->pci_addr = pci_addr;
1279 cq->depth = qsize + 1;
1280 cq->vector = vector;
1284 cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32));
1285 nvmet_pci_epf_bar_write32(ctrl, cq->db, 0);
1288 cq->qes = sizeof(struct nvme_completion);
1290 cq->qes = ctrl->io_cqes;
1291 cq->pci_size = cq->qes * cq->depth;
1293 cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
1295 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1299 status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
1300 if (status != NVME_SC_SUCCESS)
1303 dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
1304 cqid, qsize, cq->qes, cq->vector);
1306 return NVME_SC_SUCCESS;
1309 clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
1310 clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
1314 static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid)
1316 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1317 struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
1319 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
1320 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1322 cancel_delayed_work_sync(&cq->work);
1323 nvmet_pci_epf_drain_queue(cq);
1324 nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
1326 return NVME_SC_SUCCESS;
1329 static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
1330 u16 sqid, u16 flags, u16 qsize, u64 pci_addr)
1332 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1333 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1336 if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1337 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1339 if (!(flags & NVME_QUEUE_PHYS_CONTIG))
1340 return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
1342 sq->pci_addr = pci_addr;
1344 sq->depth = qsize + 1;
1348 sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32));
1349 nvmet_pci_epf_bar_write32(ctrl, sq->db, 0);
1351 sq->qes = 1UL << NVME_ADM_SQES;
1353 sq->qes = ctrl->io_sqes;
1354 sq->pci_size = sq->qes * sq->depth;
1356 status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
1357 if (status != NVME_SC_SUCCESS)
1360 sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
1361 min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
1363 dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid);
1364 status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
1365 goto out_destroy_sq;
1368 dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
1369 sqid, qsize, sq->qes);
1371 return NVME_SC_SUCCESS;
1374 nvmet_sq_destroy(&sq->nvme_sq);
1376 clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
1380 static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid)
1382 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1383 struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
1385 if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1386 return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1388 flush_workqueue(sq->iod_wq);
1389 destroy_workqueue(sq->iod_wq);
1392 nvmet_pci_epf_drain_queue(sq);
1394 if (sq->nvme_sq.ctrl)
1395 nvmet_sq_destroy(&sq->nvme_sq);
1397 return NVME_SC_SUCCESS;
1400 static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl,
1401 u8 feat, void *data)
1403 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1404 struct nvmet_feat_arbitration *arb;
1405 struct nvmet_feat_irq_coalesce *irqc;
1406 struct nvmet_feat_irq_config *irqcfg;
1407 struct nvmet_pci_epf_irq_vector *iv;
1411 case NVME_FEAT_ARBITRATION:
1416 arb->ab = ilog2(ctrl->sq_ab);
1417 return NVME_SC_SUCCESS;
1419 case NVME_FEAT_IRQ_COALESCE:
1421 irqc->thr = ctrl->irq_vector_threshold;
1423 return NVME_SC_SUCCESS;
1425 case NVME_FEAT_IRQ_CONFIG:
1427 mutex_lock(&ctrl->irq_lock);
1428 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1430 irqcfg->cd = iv->cd;
1431 status = NVME_SC_SUCCESS;
1433 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1435 mutex_unlock(&ctrl->irq_lock);
1439 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1443 static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
1444 u8 feat, void *data)
1446 struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata;
1447 struct nvmet_feat_arbitration *arb;
1448 struct nvmet_feat_irq_coalesce *irqc;
1449 struct nvmet_feat_irq_config *irqcfg;
1450 struct nvmet_pci_epf_irq_vector *iv;
1454 case NVME_FEAT_ARBITRATION:
1459 ctrl->sq_ab = 1 << arb->ab;
1460 return NVME_SC_SUCCESS;
1462 case NVME_FEAT_IRQ_COALESCE:
1464 * Since we do not implement precise IRQ coalescing timing,
1465 * ignore the time field.
1468 ctrl->irq_vector_threshold = irqc->thr + 1;
1469 return NVME_SC_SUCCESS;
1471 case NVME_FEAT_IRQ_CONFIG:
1473 mutex_lock(&ctrl->irq_lock);
1474 iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv);
1476 iv->cd = irqcfg->cd;
1477 status = NVME_SC_SUCCESS;
1479 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1481 mutex_unlock(&ctrl->irq_lock);
1485 return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
1489 static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
1490 .owner = THIS_MODULE,
1491 .type = NVMF_TRTYPE_PCI,
1492 .add_port = nvmet_pci_epf_add_port,
1493 .remove_port = nvmet_pci_epf_remove_port,
1494 .queue_response = nvmet_pci_epf_queue_response,
1495 .get_mdts = nvmet_pci_epf_get_mdts,
1496 .create_cq = nvmet_pci_epf_create_cq,
1497 .delete_cq = nvmet_pci_epf_delete_cq,
1498 .create_sq = nvmet_pci_epf_create_sq,
1499 .delete_sq = nvmet_pci_epf_delete_sq,
1500 .get_feature = nvmet_pci_epf_get_feat,
1501 .set_feature = nvmet_pci_epf_set_feat,
1504 static void nvmet_pci_epf_cq_work(struct work_struct *work);
1506 static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl,
1507 unsigned int qid, bool sq)
1509 struct nvmet_pci_epf_queue *queue;
1512 queue = &ctrl->sq[qid];
1513 set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags);
1515 queue = &ctrl->cq[qid];
1516 INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work);
1520 spin_lock_init(&queue->lock);
1521 INIT_LIST_HEAD(&queue->list);
1524 static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl)
1528 ctrl->sq = kcalloc(ctrl->nr_queues,
1529 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
1533 ctrl->cq = kcalloc(ctrl->nr_queues,
1534 sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL);
1541 for (qid = 0; qid < ctrl->nr_queues; qid++) {
1542 nvmet_pci_epf_init_queue(ctrl, qid, true);
1543 nvmet_pci_epf_init_queue(ctrl, qid, false);
1549 static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl)
1557 static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl,
1558 struct nvmet_pci_epf_queue *queue)
1560 struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf;
1563 ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr,
1564 queue->pci_size, &queue->pci_map);
1566 dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n",
1571 if (queue->pci_map.pci_size < queue->pci_size) {
1572 dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n",
1574 nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map);
1581 static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl,
1582 struct nvmet_pci_epf_queue *queue)
1584 nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map);
1587 static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
1589 struct nvmet_pci_epf_iod *iod =
1590 container_of(work, struct nvmet_pci_epf_iod, work);
1591 struct nvmet_req *req = &iod->req;
1594 if (!iod->ctrl->link_up) {
1595 nvmet_pci_epf_free_iod(iod);
1599 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) {
1600 iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR;
1604 if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq,
1605 &nvmet_pci_epf_fabrics_ops))
1608 iod->data_len = nvmet_req_transfer_len(req);
1609 if (iod->data_len) {
1611 * Get the data DMA transfer direction. Here "device" means the
1612 * PCI root-complex host.
1614 if (nvme_is_write(&iod->cmd))
1615 iod->dma_dir = DMA_FROM_DEVICE;
1617 iod->dma_dir = DMA_TO_DEVICE;
1620 * Setup the command data buffer and get the command data from
1621 * the host if needed.
1623 ret = nvmet_pci_epf_alloc_iod_data_buf(iod);
1624 if (!ret && iod->dma_dir == DMA_FROM_DEVICE)
1625 ret = nvmet_pci_epf_transfer_iod_data(iod);
1627 nvmet_req_uninit(req);
1635 * If we do not have data to transfer after the command execution
1636 * finishes, nvmet_pci_epf_queue_response() will complete the command
1637 * directly. No need to wait for the completion in this case.
1639 if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE)
1642 wait_for_completion(&iod->done);
1644 if (iod->status == NVME_SC_SUCCESS) {
1645 WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
1646 nvmet_pci_epf_transfer_iod_data(iod);
1650 nvmet_pci_epf_complete_iod(iod);
1653 static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl,
1654 struct nvmet_pci_epf_queue *sq)
1656 struct nvmet_pci_epf_iod *iod;
1659 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1660 while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) {
1661 iod = nvmet_pci_epf_alloc_iod(sq);
1665 /* Get the NVMe command submitted by the host. */
1666 ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd,
1667 sq->pci_addr + sq->head * sq->qes,
1668 sq->qes, DMA_FROM_DEVICE);
1670 /* Not much we can do... */
1671 nvmet_pci_epf_free_iod(iod);
1675 dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n",
1676 sq->qid, sq->head, sq->tail,
1677 nvmet_pci_epf_iod_name(iod));
1680 if (sq->head == sq->depth)
1684 queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work);
1686 sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db);
1692 static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
1694 struct nvmet_pci_epf_ctrl *ctrl =
1695 container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
1696 struct nvmet_pci_epf_queue *sq;
1697 unsigned long last = 0;
1700 while (ctrl->link_up && ctrl->enabled) {
1702 /* Do round-robin arbitration. */
1703 for (i = 0; i < ctrl->nr_queues; i++) {
1705 if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
1707 if (nvmet_pci_epf_process_sq(ctrl, sq))
1717 * If we have not received any command on any queue for more
1718 * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and
1719 * reschedule. This avoids "burning" a CPU when the controller
1720 * is idle for a long time.
1722 if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE))
1728 schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL);
1731 static void nvmet_pci_epf_cq_work(struct work_struct *work)
1733 struct nvmet_pci_epf_queue *cq =
1734 container_of(work, struct nvmet_pci_epf_queue, work.work);
1735 struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl;
1736 struct nvme_completion *cqe;
1737 struct nvmet_pci_epf_iod *iod;
1738 unsigned long flags;
1741 ret = nvmet_pci_epf_map_queue(ctrl, cq);
1745 while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) {
1747 /* Check that the CQ is not full. */
1748 cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db);
1749 if (cq->head == cq->tail + 1) {
1754 spin_lock_irqsave(&cq->lock, flags);
1755 iod = list_first_entry_or_null(&cq->list,
1756 struct nvmet_pci_epf_iod, link);
1758 list_del_init(&iod->link);
1759 spin_unlock_irqrestore(&cq->lock, flags);
1764 /* Post the IOD completion entry. */
1766 cqe->status = cpu_to_le16((iod->status << 1) | cq->phase);
1769 "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n",
1770 cq->qid, nvmet_pci_epf_iod_name(iod), iod->status,
1771 le64_to_cpu(cqe->result.u64), cq->head, cq->tail,
1774 memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes,
1778 if (cq->tail >= cq->depth) {
1783 nvmet_pci_epf_free_iod(iod);
1785 /* Signal the host. */
1786 nvmet_pci_epf_raise_irq(ctrl, cq, false);
1790 nvmet_pci_epf_unmap_queue(ctrl, cq);
1793 * We do not support precise IRQ coalescing time (100ns units as per
1794 * NVMe specifications). So if we have posted completion entries without
1795 * reaching the interrupt coalescing threshold, raise an interrupt.
1798 nvmet_pci_epf_raise_irq(ctrl, cq, true);
1802 queue_delayed_work(system_highpri_wq, &cq->work,
1803 NVMET_PCI_EPF_CQ_RETRY_INTERVAL);
1806 static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
1808 u64 pci_addr, asq, acq;
1815 dev_info(ctrl->dev, "Enabling controller\n");
1817 ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12;
1818 ctrl->mps = 1UL << ctrl->mps_shift;
1819 ctrl->mps_mask = ctrl->mps - 1;
1821 ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc);
1822 if (ctrl->io_sqes < sizeof(struct nvme_command)) {
1823 dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
1824 ctrl->io_sqes, sizeof(struct nvme_command));
1828 ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
1829 if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
1830 dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
1831 ctrl->io_sqes, sizeof(struct nvme_completion));
1835 /* Create the admin queue. */
1836 aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA);
1837 asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ);
1838 acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ);
1840 qsize = (aqa & 0x0fff0000) >> 16;
1841 pci_addr = acq & GENMASK_ULL(63, 12);
1842 status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0,
1843 NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG,
1844 qsize, pci_addr, 0);
1845 if (status != NVME_SC_SUCCESS) {
1846 dev_err(ctrl->dev, "Failed to create admin completion queue\n");
1850 qsize = aqa & 0x00000fff;
1851 pci_addr = asq & GENMASK_ULL(63, 12);
1852 status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG,
1854 if (status != NVME_SC_SUCCESS) {
1855 dev_err(ctrl->dev, "Failed to create admin submission queue\n");
1856 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1860 ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
1861 ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
1862 ctrl->enabled = true;
1864 /* Start polling the controller SQs. */
1865 schedule_delayed_work(&ctrl->poll_sqs, 0);
1870 static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
1877 dev_info(ctrl->dev, "Disabling controller\n");
1879 ctrl->enabled = false;
1880 cancel_delayed_work_sync(&ctrl->poll_sqs);
1882 /* Delete all I/O queues first. */
1883 for (qid = 1; qid < ctrl->nr_queues; qid++)
1884 nvmet_pci_epf_delete_sq(ctrl->tctrl, qid);
1886 for (qid = 1; qid < ctrl->nr_queues; qid++)
1887 nvmet_pci_epf_delete_cq(ctrl->tctrl, qid);
1889 /* Delete the admin queue last. */
1890 nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
1891 nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
1894 static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
1896 struct nvmet_pci_epf_ctrl *ctrl =
1897 container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work);
1905 new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
1908 if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
1909 ret = nvmet_pci_epf_enable_ctrl(ctrl);
1912 ctrl->csts |= NVME_CSTS_RDY;
1915 if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) {
1916 nvmet_pci_epf_disable_ctrl(ctrl);
1917 ctrl->csts &= ~NVME_CSTS_RDY;
1920 if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
1921 nvmet_pci_epf_disable_ctrl(ctrl);
1922 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1925 if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc))
1926 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1928 nvmet_update_cc(ctrl->tctrl, ctrl->cc);
1929 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1931 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
1934 static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl)
1936 struct nvmet_ctrl *tctrl = ctrl->tctrl;
1938 ctrl->bar = ctrl->nvme_epf->reg_bar;
1940 /* Copy the target controller capabilities as a base. */
1941 ctrl->cap = tctrl->cap;
1943 /* Contiguous Queues Required (CQR). */
1944 ctrl->cap |= 0x1ULL << 16;
1946 /* Set Doorbell stride to 4B (DSTRB). */
1947 ctrl->cap &= ~GENMASK_ULL(35, 32);
1949 /* Clear NVM Subsystem Reset Supported (NSSRS). */
1950 ctrl->cap &= ~(0x1ULL << 36);
1952 /* Clear Boot Partition Support (BPS). */
1953 ctrl->cap &= ~(0x1ULL << 45);
1955 /* Clear Persistent Memory Region Supported (PMRS). */
1956 ctrl->cap &= ~(0x1ULL << 56);
1958 /* Clear Controller Memory Buffer Supported (CMBS). */
1959 ctrl->cap &= ~(0x1ULL << 57);
1961 /* Controller configuration. */
1962 ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE);
1964 /* Controller status. */
1965 ctrl->csts = ctrl->tctrl->csts;
1967 nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap);
1968 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver);
1969 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
1970 nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc);
1973 static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf,
1974 unsigned int max_nr_queues)
1976 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
1977 struct nvmet_alloc_ctrl_args args = {};
1978 char hostnqn[NVMF_NQN_SIZE];
1982 memset(ctrl, 0, sizeof(*ctrl));
1983 ctrl->dev = &nvme_epf->epf->dev;
1984 mutex_init(&ctrl->irq_lock);
1985 ctrl->nvme_epf = nvme_epf;
1986 ctrl->mdts = nvme_epf->mdts_kb * SZ_1K;
1987 INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work);
1988 INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work);
1990 ret = mempool_init_kmalloc_pool(&ctrl->iod_pool,
1991 max_nr_queues * NVMET_MAX_QUEUE_SIZE,
1992 sizeof(struct nvmet_pci_epf_iod));
1994 dev_err(ctrl->dev, "Failed to initialize IOD mempool\n");
1998 ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid);
2000 dev_err(ctrl->dev, "Port not found\n");
2002 goto out_mempool_exit;
2005 /* Create the target controller. */
2007 snprintf(hostnqn, NVMF_NQN_SIZE,
2008 "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id);
2009 args.port = ctrl->port;
2010 args.subsysnqn = nvme_epf->subsysnqn;
2011 memset(&id, 0, sizeof(uuid_t));
2013 args.hostnqn = hostnqn;
2014 args.ops = &nvmet_pci_epf_fabrics_ops;
2016 ctrl->tctrl = nvmet_alloc_ctrl(&args);
2018 dev_err(ctrl->dev, "Failed to create target controller\n");
2020 goto out_mempool_exit;
2022 ctrl->tctrl->drvdata = ctrl;
2024 /* We do not support protection information for now. */
2025 if (ctrl->tctrl->pi_support) {
2027 "Protection information (PI) is not supported\n");
2032 /* Allocate our queues, up to the maximum number. */
2033 ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues);
2034 ret = nvmet_pci_epf_alloc_queues(ctrl);
2039 * Allocate the IRQ vectors descriptors. We cannot have more than the
2040 * maximum number of queues.
2042 ret = nvmet_pci_epf_alloc_irq_vectors(ctrl);
2044 goto out_free_queues;
2047 "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n",
2048 ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1,
2051 /* Initialize BAR 0 using the target controller CAP. */
2052 nvmet_pci_epf_init_bar(ctrl);
2057 nvmet_pci_epf_free_queues(ctrl);
2059 nvmet_ctrl_put(ctrl->tctrl);
2062 mempool_exit(&ctrl->iod_pool);
2066 static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2068 schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
2071 static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2073 cancel_delayed_work_sync(&ctrl->poll_cc);
2075 nvmet_pci_epf_disable_ctrl(ctrl);
2078 static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
2083 dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n",
2084 ctrl->tctrl->subsys->subsysnqn);
2086 nvmet_pci_epf_stop_ctrl(ctrl);
2088 nvmet_pci_epf_free_queues(ctrl);
2089 nvmet_pci_epf_free_irq_vectors(ctrl);
2091 nvmet_ctrl_put(ctrl->tctrl);
2094 mempool_exit(&ctrl->iod_pool);
2097 static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf)
2099 struct pci_epf *epf = nvme_epf->epf;
2100 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2101 size_t reg_size, reg_bar_size;
2102 size_t msix_table_size = 0;
2105 * The first free BAR will be our register BAR and per NVMe
2106 * specifications, it must be BAR 0.
2108 if (pci_epc_get_first_free_bar(epc_features) != BAR_0) {
2109 dev_err(&epf->dev, "BAR 0 is not free\n");
2113 if (epc_features->bar[BAR_0].only_64bit)
2114 epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
2117 * Calculate the size of the register bar: NVMe registers first with
2118 * enough space for the doorbells, followed by the MSI-X table
2121 reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32));
2122 reg_size = ALIGN(reg_size, 8);
2124 if (epc_features->msix_capable) {
2127 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
2128 nvme_epf->msix_table_offset = reg_size;
2129 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
2131 reg_size += msix_table_size + pba_size;
2134 if (epc_features->bar[BAR_0].type == BAR_FIXED) {
2135 if (reg_size > epc_features->bar[BAR_0].fixed_size) {
2137 "BAR 0 size %llu B too small, need %zu B\n",
2138 epc_features->bar[BAR_0].fixed_size,
2142 reg_bar_size = epc_features->bar[BAR_0].fixed_size;
2144 reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096));
2147 nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0,
2148 epc_features, PRIMARY_INTERFACE);
2149 if (!nvme_epf->reg_bar) {
2150 dev_err(&epf->dev, "Failed to allocate BAR 0\n");
2153 memset(nvme_epf->reg_bar, 0, reg_bar_size);
2158 static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf)
2160 struct pci_epf *epf = nvme_epf->epf;
2162 if (!nvme_epf->reg_bar)
2165 pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE);
2166 nvme_epf->reg_bar = NULL;
2169 static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf)
2171 struct pci_epf *epf = nvme_epf->epf;
2173 pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no,
2177 static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf)
2179 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2180 struct pci_epf *epf = nvme_epf->epf;
2183 /* Enable MSI-X if supported, otherwise, use MSI. */
2184 if (epc_features->msix_capable && epf->msix_interrupts) {
2185 ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no,
2186 epf->msix_interrupts, BAR_0,
2187 nvme_epf->msix_table_offset);
2189 dev_err(&epf->dev, "Failed to configure MSI-X\n");
2193 nvme_epf->nr_vectors = epf->msix_interrupts;
2194 nvme_epf->irq_type = PCI_IRQ_MSIX;
2199 if (epc_features->msi_capable && epf->msi_interrupts) {
2200 ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no,
2201 epf->msi_interrupts);
2203 dev_err(&epf->dev, "Failed to configure MSI\n");
2207 nvme_epf->nr_vectors = epf->msi_interrupts;
2208 nvme_epf->irq_type = PCI_IRQ_MSI;
2213 /* MSI and MSI-X are not supported: fall back to INTx. */
2214 nvme_epf->nr_vectors = 1;
2215 nvme_epf->irq_type = PCI_IRQ_INTX;
2220 static int nvmet_pci_epf_epc_init(struct pci_epf *epf)
2222 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2223 const struct pci_epc_features *epc_features = nvme_epf->epc_features;
2224 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2225 unsigned int max_nr_queues = NVMET_NR_QUEUES;
2228 /* For now, do not support virtual functions. */
2229 if (epf->vfunc_no > 0) {
2230 dev_err(&epf->dev, "Virtual functions are not supported\n");
2235 * Cap the maximum number of queues we can support on the controller
2236 * with the number of IRQs we can use.
2238 if (epc_features->msix_capable && epf->msix_interrupts) {
2240 "PCI endpoint controller supports MSI-X, %u vectors\n",
2241 epf->msix_interrupts);
2242 max_nr_queues = min(max_nr_queues, epf->msix_interrupts);
2243 } else if (epc_features->msi_capable && epf->msi_interrupts) {
2245 "PCI endpoint controller supports MSI, %u vectors\n",
2246 epf->msi_interrupts);
2247 max_nr_queues = min(max_nr_queues, epf->msi_interrupts);
2250 if (max_nr_queues < 2) {
2251 dev_err(&epf->dev, "Invalid maximum number of queues %u\n",
2256 /* Create the target controller. */
2257 ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues);
2260 "Failed to create NVMe PCI target controller (err=%d)\n",
2265 /* Set device ID, class, etc. */
2266 epf->header->vendorid = ctrl->tctrl->subsys->vendor_id;
2267 epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id;
2268 ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no,
2272 "Failed to write configuration header (err=%d)\n", ret);
2273 goto out_destroy_ctrl;
2276 ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no,
2279 dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret);
2280 goto out_destroy_ctrl;
2284 * Enable interrupts and start polling the controller BAR if we do not
2285 * have a link up notifier.
2287 ret = nvmet_pci_epf_init_irq(nvme_epf);
2291 if (!epc_features->linkup_notifier) {
2292 ctrl->link_up = true;
2293 nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl);
2299 nvmet_pci_epf_clear_bar(nvme_epf);
2301 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2305 static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf)
2307 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2308 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2310 ctrl->link_up = false;
2311 nvmet_pci_epf_destroy_ctrl(ctrl);
2313 nvmet_pci_epf_deinit_dma(nvme_epf);
2314 nvmet_pci_epf_clear_bar(nvme_epf);
2317 static int nvmet_pci_epf_link_up(struct pci_epf *epf)
2319 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2320 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2322 ctrl->link_up = true;
2323 nvmet_pci_epf_start_ctrl(ctrl);
2328 static int nvmet_pci_epf_link_down(struct pci_epf *epf)
2330 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2331 struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl;
2333 ctrl->link_up = false;
2334 nvmet_pci_epf_stop_ctrl(ctrl);
2339 static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = {
2340 .epc_init = nvmet_pci_epf_epc_init,
2341 .epc_deinit = nvmet_pci_epf_epc_deinit,
2342 .link_up = nvmet_pci_epf_link_up,
2343 .link_down = nvmet_pci_epf_link_down,
2346 static int nvmet_pci_epf_bind(struct pci_epf *epf)
2348 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2349 const struct pci_epc_features *epc_features;
2350 struct pci_epc *epc = epf->epc;
2353 if (WARN_ON_ONCE(!epc))
2356 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
2357 if (!epc_features) {
2358 dev_err(&epf->dev, "epc_features not implemented\n");
2361 nvme_epf->epc_features = epc_features;
2363 ret = nvmet_pci_epf_configure_bar(nvme_epf);
2367 nvmet_pci_epf_init_dma(nvme_epf);
2372 static void nvmet_pci_epf_unbind(struct pci_epf *epf)
2374 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2375 struct pci_epc *epc = epf->epc;
2377 nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl);
2379 if (epc->init_complete) {
2380 nvmet_pci_epf_deinit_dma(nvme_epf);
2381 nvmet_pci_epf_clear_bar(nvme_epf);
2384 nvmet_pci_epf_free_bar(nvme_epf);
2387 static struct pci_epf_header nvme_epf_pci_header = {
2388 .vendorid = PCI_ANY_ID,
2389 .deviceid = PCI_ANY_ID,
2390 .progif_code = 0x02, /* NVM Express */
2391 .baseclass_code = PCI_BASE_CLASS_STORAGE,
2392 .subclass_code = 0x08, /* Non-Volatile Memory controller */
2393 .interrupt_pin = PCI_INTERRUPT_INTA,
2396 static int nvmet_pci_epf_probe(struct pci_epf *epf,
2397 const struct pci_epf_device_id *id)
2399 struct nvmet_pci_epf *nvme_epf;
2402 nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL);
2406 ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock);
2410 nvme_epf->epf = epf;
2411 nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2413 epf->event_ops = &nvmet_pci_epf_event_ops;
2414 epf->header = &nvme_epf_pci_header;
2415 epf_set_drvdata(epf, nvme_epf);
2420 #define to_nvme_epf(epf_group) \
2421 container_of(epf_group, struct nvmet_pci_epf, group)
2423 static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page)
2425 struct config_group *group = to_config_group(item);
2426 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2428 return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid));
2431 static ssize_t nvmet_pci_epf_portid_store(struct config_item *item,
2432 const char *page, size_t len)
2434 struct config_group *group = to_config_group(item);
2435 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2438 /* Do not allow setting this when the function is already started. */
2439 if (nvme_epf->ctrl.tctrl)
2445 if (kstrtou16(page, 0, &portid))
2448 nvme_epf->portid = cpu_to_le16(portid);
2453 CONFIGFS_ATTR(nvmet_pci_epf_, portid);
2455 static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item,
2458 struct config_group *group = to_config_group(item);
2459 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2461 return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn);
2464 static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item,
2465 const char *page, size_t len)
2467 struct config_group *group = to_config_group(item);
2468 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2470 /* Do not allow setting this when the function is already started. */
2471 if (nvme_epf->ctrl.tctrl)
2477 strscpy(nvme_epf->subsysnqn, page, len);
2482 CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn);
2484 static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page)
2486 struct config_group *group = to_config_group(item);
2487 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2489 return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb);
2492 static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item,
2493 const char *page, size_t len)
2495 struct config_group *group = to_config_group(item);
2496 struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group);
2497 unsigned long mdts_kb;
2500 if (nvme_epf->ctrl.tctrl)
2503 ret = kstrtoul(page, 0, &mdts_kb);
2507 mdts_kb = NVMET_PCI_EPF_MDTS_KB;
2508 else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB)
2509 mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB;
2511 if (!is_power_of_2(mdts_kb))
2514 nvme_epf->mdts_kb = mdts_kb;
2519 CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb);
2521 static struct configfs_attribute *nvmet_pci_epf_attrs[] = {
2522 &nvmet_pci_epf_attr_portid,
2523 &nvmet_pci_epf_attr_subsysnqn,
2524 &nvmet_pci_epf_attr_mdts_kb,
2528 static const struct config_item_type nvmet_pci_epf_group_type = {
2529 .ct_attrs = nvmet_pci_epf_attrs,
2530 .ct_owner = THIS_MODULE,
2533 static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf,
2534 struct config_group *group)
2536 struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf);
2538 config_group_init_type_name(&nvme_epf->group, "nvme",
2539 &nvmet_pci_epf_group_type);
2541 return &nvme_epf->group;
2544 static const struct pci_epf_device_id nvmet_pci_epf_ids[] = {
2545 { .name = "nvmet_pci_epf" },
2549 static struct pci_epf_ops nvmet_pci_epf_ops = {
2550 .bind = nvmet_pci_epf_bind,
2551 .unbind = nvmet_pci_epf_unbind,
2552 .add_cfs = nvmet_pci_epf_add_cfs,
2555 static struct pci_epf_driver nvmet_pci_epf_driver = {
2556 .driver.name = "nvmet_pci_epf",
2557 .probe = nvmet_pci_epf_probe,
2558 .id_table = nvmet_pci_epf_ids,
2559 .ops = &nvmet_pci_epf_ops,
2560 .owner = THIS_MODULE,
2563 static int __init nvmet_pci_epf_init_module(void)
2567 ret = pci_epf_register_driver(&nvmet_pci_epf_driver);
2571 ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops);
2573 pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2580 static void __exit nvmet_pci_epf_cleanup_module(void)
2582 nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops);
2583 pci_epf_unregister_driver(&nvmet_pci_epf_driver);
2586 module_init(nvmet_pci_epf_init_module);
2587 module_exit(nvmet_pci_epf_cleanup_module);
2589 MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver");
2591 MODULE_LICENSE("GPL");