]> Git Repo - linux.git/commitdiff
dma-mapping: don't return errors from dma_set_max_seg_size
authorChristoph Hellwig <[email protected]>
Fri, 19 Jul 2024 04:07:38 +0000 (06:07 +0200)
committerChristoph Hellwig <[email protected]>
Thu, 29 Aug 2024 04:22:49 +0000 (07:22 +0300)
A NULL dev->dma_parms indicates either a bus that is not DMA capable or
grave bug in the implementation of the bus code.

There isn't much the driver can do in terms of error handling for either
case, so just warn and continue as DMA operations will fail anyway.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Robin Murphy <[email protected]>
Reviewed-by: Martin K. Petersen <[email protected]>
Acked-by: Ulf Hansson <[email protected]> # For MMC
13 files changed:
drivers/accel/qaic/qaic_drv.c
drivers/dma/idma64.c
drivers/dma/pl330.c
drivers/dma/qcom/bam_dma.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/ste_dma40.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/pci/intel/ipu6/ipu6.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/scsi/lpfc/lpfc_init.c
include/linux/dma-mapping.h

index 580b29ed190217654230d2c4c4b7385a585a6a35..bf10156c334e7144e7dcb7277a4bd26fb75d3dc3 100644 (file)
@@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (ret)
                return ret;
-       ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-       if (ret)
-               return ret;
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
 
        qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
        if (IS_ERR(qdev->bar_0))
index e3505e56784b1a4c4d7dc20c60753fb0844a7e46..1398814d8fbb63c3dd750a603deca0a050e488a1 100644 (file)
@@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
 
        idma64->dma.dev = chip->sysdev;
 
-       ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
-       if (ret)
-               return ret;
+       dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
 
        ret = dma_async_device_register(&idma64->dma);
        if (ret)
index 60c4de8dac1d2abd067ff1065d21d960b1dac844..82a9fe88ad54c995616ceb9b4be63e3e884fa49f 100644 (file)
@@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
         * This is the limit for transfers with a buswidth of 1, larger
         * buswidths will have larger limits.
         */
-       ret = dma_set_max_seg_size(&adev->dev, 1900800);
-       if (ret)
-               dev_err(&adev->dev, "unable to set the seg size\n");
-
+       dma_set_max_seg_size(&adev->dev, 1900800);
 
        init_pl330_debugfs(pl330);
        dev_info(&adev->dev,
index 5e7d332731e0c111792b51b23f28e35d7463ff5f..368ffaa400378985aaa317ce88f26be909047ea9 100644 (file)
@@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
 
        /* set max dma segment size */
        bdev->common.dev = bdev->dev;
-       ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
-       if (ret) {
-               dev_err(bdev->dev, "cannot set maximum segment size\n");
-               goto err_bam_channel_exit;
-       }
+       dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
 
        platform_set_drvdata(pdev, bdev);
 
index 40482cb73d798ac15d24fa1aa1a94ea0c2ee2723..1094a2f821649ca5c422079ae9321eeea7830da4 100644 (file)
@@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
 
        dmac->dev = &pdev->dev;
        platform_set_drvdata(pdev, dmac);
-       ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
-       if (ret)
-               return ret;
+       dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
 
        ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
        if (ret)
index 2c489299148eeea268e83e1a74824c434d86cdee..d52e1685aed53ffa49c7c7f6d6a611a358e7f360 100644 (file)
@@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
        if (ret)
                goto destroy_cache;
 
-       ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
-       if (ret) {
-               d40_err(dev, "Failed to set dma max seg size\n");
-               goto destroy_cache;
-       }
+       dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
 
        d40_hw_init(base);
 
index 77b50c56c124ce48831aa2839c9fc45548a86e37..3e807195a0d03ab25552139a3ed09aa16dc226ee 100644 (file)
@@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
         * Configure the DMA segment size to make sure we get contiguous IOVA
         * when importing PRIME buffers.
         */
-       ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
-       if (ret) {
-               dev_err(dma_dev, "Failed to set DMA segment size\n");
-               goto err_component_unbind;
-       }
+       dma_set_max_seg_size(dma_dev, UINT_MAX);
 
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret < 0)
index 3d4fd4ef53107c6519442d24d11e7c449b0fa8ba..bb0b7fa67b539aa73ad5ccf3c3bc318e26f8a4cb 100644 (file)
@@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
                return -ENODEV;
        }
        if (dma_get_max_seg_size(dev) < size)
-               return dma_set_max_seg_size(dev, size);
-
+               dma_set_max_seg_size(dev, size);
        return 0;
 }
 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
index bbd646378ab3ed93b0fdc3a5db4b01dd515d920b..83e70c692d957f5d0b3d479c84fd99f2a16d2069 100644 (file)
@@ -576,9 +576,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (ret)
                return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
 
-       ret = dma_set_max_seg_size(dev, UINT_MAX);
-       if (ret)
-               return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
+       dma_set_max_seg_size(dev, UINT_MAX);
 
        ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
        if (ret)
index f5da7f9baa52d4b29cd396f0aa88e1ff7891666c..9dc51859c2e51e8507a42861191d47300f6760e3 100644 (file)
@@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
                host->mmc->max_seg_size = host->mmc->max_req_size;
        }
 
-       return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+       dma_set_max_seg_size(dev, host->mmc->max_seg_size);
+       return 0;
 }
 
 static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
index ddb8f68d80a2062c33d4028aef35e8a2fa56ccbf..ca4ed58f1206dd6906905ab506211c4a0b5d02a9 100644 (file)
@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto release_region;
 
-       err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to set dma device segment size\n");
-               goto release_region;
-       }
+       dma_set_max_seg_size(&pdev->dev, UINT_MAX);
 
        err = -ENOMEM;
        gc = vzalloc(sizeof(*gc));
index e1dfa96c2a553a51b123c7a53814d1288922106d..50620918becd594f50e9ec8feeb2bc5a7a886807 100644 (file)
@@ -13861,12 +13861,7 @@ fcponly:
        if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
                sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
 
-       rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
-       if (unlikely(rc)) {
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "6400 Can't set dma maximum segment size\n");
-               return rc;
-       }
+       dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
 
        /*
         * Check whether the adapter supports an embedded copy of the
index 6bd1333dbacb9b75e834a236b481ed5554f40659..1524da363734af676054d68698c7bb10373c8f4f 100644 (file)
@@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
        return SZ_64K;
 }
 
-static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
+static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
 {
-       if (dev->dma_parms) {
-               dev->dma_parms->max_segment_size = size;
-               return 0;
-       }
-       return -EIO;
+       if (WARN_ON_ONCE(!dev->dma_parms))
+               return;
+       dev->dma_parms->max_segment_size = size;
 }
 
 static inline unsigned long dma_get_seg_boundary(struct device *dev)
This page took 0.131601 seconds and 4 git commands to generate.