]> Git Repo - linux.git/commitdiff
Merge tag 'block-6.0-2022-08-12' of git://git.kernel.dk/linux-block
authorLinus Torvalds <[email protected]>
Sat, 13 Aug 2022 20:37:36 +0000 (13:37 -0700)
committerLinus Torvalds <[email protected]>
Sat, 13 Aug 2022 20:37:36 +0000 (13:37 -0700)
Pull block fixes from Jens Axboe:

 - NVMe pull request
     - print nvme connect Linux error codes properly (Amit Engel)
     - fix the fc_appid_store return value (Christoph Hellwig)
     - fix a typo in an error message (Christophe JAILLET)
     - add another non-unique identifier quirk (Dennis P. Kliem)
     - check if the queue is allocated before stopping it in nvme-tcp
       (Maurizio Lombardi)
     - restart admin queue if the caller needs to restart queue in
       nvme-fc (Ming Lei)
     - use kmemdup instead of kmalloc + memcpy in nvme-auth (Zhang
       Xiaoxu)

 - __alloc_disk_node() error handling fix (Rafael)

* tag 'block-6.0-2022-08-12' of git://git.kernel.dk/linux-block:
  block: Do not call blk_put_queue() if gendisk allocation fails
  nvme-pci: add NVME_QUIRK_BOGUS_NID for ADATA XPG GAMMIX S70
  nvme-tcp: check if the queue is allocated before stopping it
  nvme-fabrics: Fix a typo in an error message
  nvme-fabrics: parse nvme connect Linux error codes
  nvmet-auth: use kmemdup instead of kmalloc + memcpy
  nvme-fc: fix the fc_appid_store return value
  nvme-fc: restart admin queue if the caller needs to restart queue

1  2 
drivers/nvme/host/pci.c

diff --combined drivers/nvme/host/pci.c
index de1b4463142db7909a0daff782b722b3983c6d43,a222caa1ab002c9725558373d9d58e5f07e01d65..3a1c37f32f30d93992556937e7ed356bd617cbfa
@@@ -230,10 -230,11 +230,10 @@@ struct nvme_iod 
        bool use_sgl;
        int aborted;
        int npages;             /* In the PRP list. 0 means small pool in use */
 -      int nents;              /* Used in scatterlist */
        dma_addr_t first_dma;
        unsigned int dma_len;   /* length of single DMA segment mapping */
        dma_addr_t meta_dma;
 -      struct scatterlist *sg;
 +      struct sg_table sgt;
  };
  
  static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@@ -523,7 -524,7 +523,7 @@@ static void nvme_commit_rqs(struct blk_
  static void **nvme_pci_iod_list(struct request *req)
  {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 -      return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
 +      return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
  }
  
  static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@@ -575,6 -576,17 +575,6 @@@ static void nvme_free_sgls(struct nvme_
        }
  }
  
 -static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
 -{
 -      struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 -
 -      if (is_pci_p2pdma_page(sg_page(iod->sg)))
 -              pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
 -                                  rq_dma_dir(req));
 -      else
 -              dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
 -}
 -
  static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
  {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
                return;
        }
  
 -      WARN_ON_ONCE(!iod->nents);
 +      WARN_ON_ONCE(!iod->sgt.nents);
 +
 +      dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
  
 -      nvme_unmap_sg(dev, req);
        if (iod->npages == 0)
                dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
                              iod->first_dma);
                nvme_free_sgls(dev, req);
        else
                nvme_free_prps(dev, req);
 -      mempool_free(iod->sg, dev->iod_mempool);
 +      mempool_free(iod->sgt.sgl, dev->iod_mempool);
  }
  
  static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@@ -619,7 -630,7 +619,7 @@@ static blk_status_t nvme_pci_setup_prps
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct dma_pool *pool;
        int length = blk_rq_payload_bytes(req);
 -      struct scatterlist *sg = iod->sg;
 +      struct scatterlist *sg = iod->sgt.sgl;
        int dma_len = sg_dma_len(sg);
        u64 dma_addr = sg_dma_address(sg);
        int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
                dma_len = sg_dma_len(sg);
        }
  done:
 -      cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
 +      cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
        cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
        return BLK_STS_OK;
  free_prps:
        nvme_free_prps(dev, req);
        return BLK_STS_RESOURCE;
  bad_sgl:
 -      WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
 +      WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
                        "Invalid SGL for payload:%d nents:%d\n",
 -                      blk_rq_payload_bytes(req), iod->nents);
 +                      blk_rq_payload_bytes(req), iod->sgt.nents);
        return BLK_STS_IOERR;
  }
  
@@@ -726,13 -737,12 +726,13 @@@ static void nvme_pci_sgl_set_seg(struc
  }
  
  static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 -              struct request *req, struct nvme_rw_command *cmd, int entries)
 +              struct request *req, struct nvme_rw_command *cmd)
  {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct dma_pool *pool;
        struct nvme_sgl_desc *sg_list;
 -      struct scatterlist *sg = iod->sg;
 +      struct scatterlist *sg = iod->sgt.sgl;
 +      unsigned int entries = iod->sgt.nents;
        dma_addr_t sgl_dma;
        int i = 0;
  
@@@ -830,7 -840,7 +830,7 @@@ static blk_status_t nvme_map_data(struc
  {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        blk_status_t ret = BLK_STS_RESOURCE;
 -      int nr_mapped;
 +      int rc;
  
        if (blk_rq_nr_phys_segments(req) == 1) {
                struct bio_vec bv = req_bvec(req);
        }
  
        iod->dma_len = 0;
 -      iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
 -      if (!iod->sg)
 +      iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
 +      if (!iod->sgt.sgl)
                return BLK_STS_RESOURCE;
 -      sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
 -      iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
 -      if (!iod->nents)
 +      sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
 +      iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
 +      if (!iod->sgt.orig_nents)
                goto out_free_sg;
  
 -      if (is_pci_p2pdma_page(sg_page(iod->sg)))
 -              nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
 -                              iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
 -      else
 -              nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
 -                                           rq_dma_dir(req), DMA_ATTR_NO_WARN);
 -      if (!nr_mapped)
 +      rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
 +                           DMA_ATTR_NO_WARN);
 +      if (rc) {
 +              if (rc == -EREMOTEIO)
 +                      ret = BLK_STS_TARGET;
                goto out_free_sg;
 +      }
  
        iod->use_sgl = nvme_pci_use_sgls(dev, req);
        if (iod->use_sgl)
 -              ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
 +              ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
        else
                ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
        if (ret != BLK_STS_OK)
        return BLK_STS_OK;
  
  out_unmap_sg:
 -      nvme_unmap_sg(dev, req);
 +      dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
  out_free_sg:
 -      mempool_free(iod->sg, dev->iod_mempool);
 +      mempool_free(iod->sgt.sgl, dev->iod_mempool);
        return ret;
  }
  
@@@ -900,7 -911,7 +900,7 @@@ static blk_status_t nvme_prep_rq(struc
  
        iod->aborted = 0;
        iod->npages = -1;
 -      iod->nents = 0;
 +      iod->sgt.nents = 0;
  
        ret = nvme_setup_cmd(req->q->queuedata, req);
        if (ret)
@@@ -2981,6 -2992,7 +2981,6 @@@ static int nvme_pci_get_address(struct 
        return snprintf(buf, size, "%s\n", dev_name(&pdev->dev));
  }
  
 -
  static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl)
  {
        struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev);
                subsys->firmware_rev);
  }
  
 +static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl)
 +{
 +      struct nvme_dev *dev = to_nvme_dev(ctrl);
 +
 +      return dma_pci_p2pdma_supported(dev->dev);
 +}
 +
  static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
        .name                   = "pcie",
        .module                 = THIS_MODULE,
 -      .flags                  = NVME_F_METADATA_SUPPORTED |
 -                                NVME_F_PCI_P2PDMA,
 +      .flags                  = NVME_F_METADATA_SUPPORTED,
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
        .submit_async_event     = nvme_pci_submit_async_event,
        .get_address            = nvme_pci_get_address,
        .print_device_info      = nvme_pci_print_device_info,
 +      .supports_pci_p2pdma    = nvme_pci_supports_pci_p2pdma,
  };
  
  static int nvme_dev_map(struct nvme_dev *dev)
@@@ -3511,6 -3516,8 +3511,8 @@@ static const struct pci_device_id nvme_
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1cc1, 0x5350),   /* ADATA XPG GAMMIX S50 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1dbe, 0x5236),   /* ADATA XPG GAMMIX S70 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e49, 0x0041),   /* ZHITAI TiPro7000 NVMe SSD */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0xc0a9, 0x540a),   /* Crucial P2 */
This page took 0.072769 seconds and 4 git commands to generate.