]> Git Repo - linux.git/commitdiff
Merge tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
authorLinus Torvalds <[email protected]>
Sun, 3 Sep 2023 17:49:42 +0000 (10:49 -0700)
committerLinus Torvalds <[email protected]>
Sun, 3 Sep 2023 17:49:42 +0000 (10:49 -0700)
Pull dmaengine updates from Vinod Koul:
 "New controller support and updates to drivers.

  New support:
   - Qualcomm SM6115 and QCM2290 dmaengine support
   - at_xdma support for microchip,sam9x7 controller

  Updates:
   - idxd updates for wq simplification and ats knob updates
   - fsl edma updates for v3 support
   - Xilinx AXI4-Stream control support
   - Yaml conversion for bcm dma binding"

* tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (53 commits)
  dmaengine: fsl-edma: integrate v3 support
  dt-bindings: fsl-dma: fsl-edma: add edma3 compatible string
  dmaengine: fsl-edma: move tcd into struct fsl_dma_chan
  dmaengine: fsl-edma: refactor chan_name setup and safety
  dmaengine: fsl-edma: move clearing of register interrupt into setup_irq function
  dmaengine: fsl-edma: refactor using devm_clk_get_enabled
  dmaengine: fsl-edma: simply ATTR_DSIZE and ATTR_SSIZE by using ffs()
  dmaengine: fsl-edma: move common IRQ handler to common.c
  dmaengine: fsl-edma: Remove enum edma_version
  dmaengine: fsl-edma: transition from bool fields to bitmask flags in drvdata
  dmaengine: fsl-edma: clean up EXPORT_SYMBOL_GPL in fsl-edma-common.c
  dmaengine: fsl-edma: fix build error when arch is s390
  dmaengine: idxd: Fix issues with PRS disable sysfs knob
  dmaengine: idxd: Allow ATS disable update only for configurable devices
  dmaengine: xilinx_dma: Program interrupt delay timeout
  dmaengine: xilinx_dma: Use tasklet_hi_schedule for timing critical usecase
  dmaengine: xilinx_dma: Freeup active list based on descriptor completion bit
  dmaengine: xilinx_dma: Increase AXI DMA transaction segment count
  dmaengine: xilinx_dma: Pass AXI4-Stream control words to dma client
  dt-bindings: dmaengine: xilinx_dma: Add xlnx,irq-delay property
  ...

1  2 
Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
drivers/dma/Kconfig
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/sysfs.c
drivers/dma/mcf-edma-main.c
drivers/dma/owl-dma.c

index 4f5510b6fa021112a81fa07f5c442f992918eb1d,20fc8c16ded82afd7dbc5f1ec658e4fc805878b8..3ad0d9b1fbc5e4f83dd316d1ad79773c288748ba
@@@ -15,13 -15,19 +15,19 @@@ allOf
  
  properties:
    compatible:
-     enum:
-         # APQ8064, IPQ8064 and MSM8960
-       - qcom,bam-v1.3.0
-         # MSM8974, APQ8074 and APQ8084
-       - qcom,bam-v1.4.0
-         # MSM8916 and SDM845
-       - qcom,bam-v1.7.0
+     oneOf:
+       - enum:
+           # APQ8064, IPQ8064 and MSM8960
+           - qcom,bam-v1.3.0
+           # MSM8974, APQ8074 and APQ8084
+           - qcom,bam-v1.4.0
+           # MSM8916, SDM630
+           - qcom,bam-v1.7.0
+       - items:
+           - enum:
+               # SDM845, SM6115, SM8150, SM8250 and QCM2290
+               - qcom,bam-v1.7.4
+           - const: qcom,bam-v1.7.0
  
    clocks:
      maxItems: 1
@@@ -38,7 -44,7 +44,7 @@@
  
    iommus:
      minItems: 1
-     maxItems: 4
+     maxItems: 6
  
    num-channels:
      $ref: /schemas/types.yaml#/definitions/uint32
@@@ -48,7 -54,7 +54,7 @@@
    qcom,controlled-remotely:
      type: boolean
      description:
 -      Indicates that the bam is controlled by remote proccessor i.e. execution
 +      Indicates that the bam is controlled by remote processor i.e. execution
        environment.
  
    qcom,ee:
@@@ -81,6 -87,15 +87,15 @@@ required
    - qcom,ee
    - reg
  
+ anyOf:
+   - required:
+       - qcom,powered-remotely
+   - required:
+       - qcom,controlled-remotely
+   - required:
+       - clocks
+       - clock-names
  additionalProperties: false
  
  examples:
diff --combined drivers/dma/Kconfig
index 08fdd0e2ed1bf01a4be5baf462fabb32dd5b88d3,f52d36e713f3c02da8693092d032e9f7e3408c6d..4ccae1a3b8842787fd144a79eede2396b00a7b83
@@@ -281,7 -281,6 +281,7 @@@ config IMX_SDM
  
  config INTEL_IDMA64
        tristate "Intel integrated DMA 64-bit support"
 +      depends on HAS_IOMEM
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
@@@ -474,25 -473,6 +474,6 @@@ config MXS_DM
          Support the MXS DMA engine. This engine including APBH-DMA
          and APBX-DMA is integrated into some Freescale chips.
  
- config MX3_IPU
-       bool "MX3x Image Processing Unit support"
-       depends on ARCH_MXC
-       select DMA_ENGINE
-       default y
-       help
-         If you plan to use the Image Processing unit in the i.MX3x, say
-         Y here. If unsure, select Y.
- config MX3_IPU_IRQS
-       int "Number of dynamically mapped interrupts for IPU"
-       depends on MX3_IPU
-       range 2 137
-       default 4
-       help
-         Out of 137 interrupt sources on i.MX31 IPU only very few are used.
-         To avoid bloating the irq_desc[] array we allocate a sufficient
-         number of IRQ slots and map them dynamically to specific sources.
  config NBPFAXI_DMA
        tristate "Renesas Type-AXI NBPF DMA support"
        select DMA_ENGINE
@@@ -699,7 -679,7 +680,7 @@@ config XGENE_DM
  
  config XILINX_DMA
        tristate "Xilinx AXI DMAS Engine"
-       depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+       depends on HAS_IOMEM
        select DMA_ENGINE
        help
          Enable support for Xilinx AXI VDMA Soft IP.
index 6453b5b35bfece6c139e49409788a21b53597725,7c74bc60f582830ce5d8466dcf16a8c4d55f2535..22d6f4e455b7973cae80208ccfcd638f3e0e0dca
@@@ -299,6 -299,21 +299,6 @@@ void idxd_wqs_unmap_portal(struct idxd_
        }
  }
  
 -static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
 -{
 -      struct idxd_device *idxd = wq->idxd;
 -      union wqcfg wqcfg;
 -      unsigned int offset;
 -
 -      offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
 -      spin_lock(&idxd->dev_lock);
 -      wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
 -      wqcfg.priv = priv;
 -      wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
 -      iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
 -      spin_unlock(&idxd->dev_lock);
 -}
 -
  static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
  {
        struct idxd_device *idxd = wq->idxd;
@@@ -369,7 -384,9 +369,7 @@@ static void idxd_wq_disable_cleanup(str
        wq->threshold = 0;
        wq->priority = 0;
        wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
 -      clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
 -      clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
 -      clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
 +      wq->flags = 0;
        memset(wq->name, 0, WQ_NAME_SIZE);
        wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
        idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
@@@ -769,8 -786,6 +769,6 @@@ static int idxd_device_evl_setup(struc
                goto err_alloc;
        }
  
-       memset(addr, 0, size);
        spin_lock(&evl->lock);
        evl->log = addr;
        evl->dma = dma_addr;
@@@ -1406,14 -1421,15 +1404,14 @@@ int drv_enable_wq(struct idxd_wq *wq
        }
  
        /*
 -       * In the event that the WQ is configurable for pasid and priv bits.
 -       * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
 -       * However, for non-kernel wq, the driver should only set the pasid_en bit for
 -       * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
 +       * In the event that the WQ is configurable for pasid, the driver
 +       * should setup the pasid, pasid_en bit. This is true for both kernel
 +       * and user shared workqueues. There is no need to setup priv bit in
 +       * that in-kernel DMA will also do user privileged requests.
 +       * A dedicated wq that is not 'kernel' type will configure pasid and
         * pasid_en later on so there is no need to setup.
         */
        if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
 -              int priv = 0;
 -
                if (wq_pasid_enabled(wq)) {
                        if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
                                u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
                                __idxd_wq_set_pasid_locked(wq, pasid);
                        }
                }
 -
 -              if (is_idxd_wq_kernel(wq))
 -                      priv = 1;
 -              __idxd_wq_set_priv_locked(wq, priv);
        }
  
        rc = 0;
@@@ -1528,15 -1548,6 +1526,15 @@@ int idxd_device_drv_probe(struct idxd_d
        if (rc < 0)
                return -ENXIO;
  
 +      /*
 +       * System PASID is preserved across device disable/enable cycle, but
 +       * genconfig register content gets cleared during device reset. We
 +       * need to re-enable user interrupts for kernel work queue completion
 +       * IRQ to function.
 +       */
 +      if (idxd->pasid != IOMMU_PASID_INVALID)
 +              idxd_set_user_intr(idxd, 1);
 +
        rc = idxd_device_evl_setup(idxd);
        if (rc < 0) {
                idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
diff --combined drivers/dma/idxd/idxd.h
index 502be9db63f403bb5acd122b3690241ff22aa51e,05a83359def9af25491874760fd26ac9b06cc640..e269ca1f48625513fb03df0f678919db5ccc3c48
@@@ -473,15 -473,6 +473,15 @@@ static inline struct idxd_device *ie_to
        return container_of(ie, struct idxd_device, ie);
  }
  
 +static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
 +{
 +      union gencfg_reg reg;
 +
 +      reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
 +      reg.user_int_en = enable;
 +      iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
 +}
 +
  extern struct bus_type dsa_bus_type;
  
  extern bool support_enqcmd;
@@@ -660,8 -651,6 +660,6 @@@ int idxd_register_bus_type(void)
  void idxd_unregister_bus_type(void);
  int idxd_register_devices(struct idxd_device *idxd);
  void idxd_unregister_devices(struct idxd_device *idxd);
- int idxd_register_driver(void);
- void idxd_unregister_driver(void);
  void idxd_wqs_quiesce(struct idxd_device *idxd);
  bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
  void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
@@@ -673,8 -662,6 +671,6 @@@ void idxd_mask_error_interrupts(struct 
  void idxd_unmask_error_interrupts(struct idxd_device *idxd);
  
  /* device control */
- int idxd_register_idxd_drv(void);
- void idxd_unregister_idxd_drv(void);
  int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
  void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
  int drv_enable_wq(struct idxd_wq *wq);
@@@ -719,7 -706,6 +715,6 @@@ int idxd_enqcmds(struct idxd_wq *wq, vo
  /* dmaengine */
  int idxd_register_dma_device(struct idxd_device *idxd);
  void idxd_unregister_dma_device(struct idxd_device *idxd);
- void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
  void idxd_dma_complete_txd(struct idxd_desc *desc,
                           enum idxd_complete_type comp_type, bool free_desc);
  
diff --combined drivers/dma/idxd/sysfs.c
index 63f6966c51aa1726dea4bf465729a1b07befbb05,a5c3eb434832517d74ea23943365f6f67c695fba..7caba90d85b31e2d2519e7ff564f2c106e51ccc2
@@@ -948,6 -948,13 +948,6 @@@ static ssize_t wq_name_store(struct dev
        if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
                return -EINVAL;
  
 -      /*
 -       * This is temporarily placed here until we have SVM support for
 -       * dmaengine.
 -       */
 -      if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
 -              return -EOPNOTSUPP;
 -
        input = kstrndup(buf, count, GFP_KERNEL);
        if (!input)
                return -ENOMEM;
@@@ -1088,8 -1095,8 +1088,8 @@@ static ssize_t wq_ats_disable_store(str
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
  
-       if (!idxd->hw.wq_cap.wq_ats_support)
-               return -EOPNOTSUPP;
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
  
        rc = kstrtobool(buf, &ats_dis);
        if (rc < 0)
@@@ -1124,8 -1131,8 +1124,8 @@@ static ssize_t wq_prs_disable_store(str
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
  
-       if (!idxd->hw.wq_cap.wq_prs_support)
-               return -EOPNOTSUPP;
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
  
        rc = kstrtobool(buf, &prs_dis);
        if (rc < 0)
@@@ -1281,12 -1288,9 +1281,9 @@@ static struct attribute *idxd_wq_attrib
        NULL,
  };
  
- static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
-                                            struct idxd_device *idxd)
- {
-       return attr == &dev_attr_wq_op_config.attr &&
-              !idxd->hw.wq_cap.op_config;
- }
+ /*  A WQ attr is invisible if the feature is not supported in WQCAP. */
+ #define idxd_wq_attr_invisible(name, cap_field, a, idxd)              \
+       ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
  
  static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
                                                  struct idxd_device *idxd)
               idxd->data->type == IDXD_TYPE_IAX;
  }
  
- static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
-                                                 struct idxd_device *idxd)
- {
-       return attr == &dev_attr_wq_prs_disable.attr &&
-              !idxd->hw.wq_cap.wq_prs_support;
- }
  static umode_t idxd_wq_attr_visible(struct kobject *kobj,
                                    struct attribute *attr, int n)
  {
        struct idxd_wq *wq = confdev_to_wq(dev);
        struct idxd_device *idxd = wq->idxd;
  
-       if (idxd_wq_attr_op_config_invisible(attr, idxd))
+       if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
                return 0;
  
        if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
                return 0;
  
-       if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
+       if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
+               return 0;
+       if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
                return 0;
  
        return attr->mode;
@@@ -1473,7 -1473,7 +1466,7 @@@ static ssize_t pasid_enabled_show(struc
  {
        struct idxd_device *idxd = confdev_to_idxd(dev);
  
-       return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
+       return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
  }
  static DEVICE_ATTR_RO(pasid_enabled);
  
index 0000000000000000000000000000000000000000,a903461da5bd57b3bfab61ec4a91f2a3d98ff2b0..b359421ee9ea52e2a31e786a6c0d59f4551be5b3
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,302 +1,303 @@@
 -      chans = pdata->dma_channels;
+ // SPDX-License-Identifier: GPL-2.0+
+ //
+ // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+ // Copyright (c) 2017 Sysam, Angelo Dureghello  <[email protected]>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/dmaengine.h>
+ #include <linux/platform_device.h>
+ #include <linux/platform_data/dma-mcf-edma.h>
+ #include "fsl-edma-common.h"
+ #define EDMA_CHANNELS         64
+ #define EDMA_MASK_CH(x)               ((x) & GENMASK(5, 0))
+ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
+ {
+       struct fsl_edma_engine *mcf_edma = dev_id;
+       struct edma_regs *regs = &mcf_edma->regs;
+       unsigned int ch;
+       u64 intmap;
+       intmap = ioread32(regs->inth);
+       intmap <<= 32;
+       intmap |= ioread32(regs->intl);
+       if (!intmap)
+               return IRQ_NONE;
+       for (ch = 0; ch < mcf_edma->n_chans; ch++) {
+               if (intmap & BIT(ch)) {
+                       iowrite8(EDMA_MASK_CH(ch), regs->cint);
+                       fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
+               }
+       }
+       return IRQ_HANDLED;
+ }
+ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
+ {
+       struct fsl_edma_engine *mcf_edma = dev_id;
+       struct edma_regs *regs = &mcf_edma->regs;
+       unsigned int err, ch;
+       err = ioread32(regs->errl);
+       if (!err)
+               return IRQ_NONE;
+       for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
+               if (err & BIT(ch)) {
+                       fsl_edma_disable_request(&mcf_edma->chans[ch]);
+                       iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+                       fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
+               }
+       }
+       err = ioread32(regs->errh);
+       if (!err)
+               return IRQ_NONE;
+       for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
+               if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
+                       fsl_edma_disable_request(&mcf_edma->chans[ch]);
+                       iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+                       mcf_edma->chans[ch].status = DMA_ERROR;
+                       mcf_edma->chans[ch].idle = true;
+               }
+       }
+       return IRQ_HANDLED;
+ }
+ static int mcf_edma_irq_init(struct platform_device *pdev,
+                               struct fsl_edma_engine *mcf_edma)
+ {
+       int ret = 0, i;
+       struct resource *res;
+       res = platform_get_resource_byname(pdev,
+                               IORESOURCE_IRQ, "edma-tx-00-15");
+       if (!res)
+               return -1;
+       for (ret = 0, i = res->start; i <= res->end; ++i)
+               ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+       if (ret)
+               return ret;
+       res = platform_get_resource_byname(pdev,
+                       IORESOURCE_IRQ, "edma-tx-16-55");
+       if (!res)
+               return -1;
+       for (ret = 0, i = res->start; i <= res->end; ++i)
+               ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+       if (ret)
+               return ret;
+       ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
+       if (ret != -ENXIO) {
+               ret = request_irq(ret, mcf_edma_tx_handler,
+                                 0, "eDMA", mcf_edma);
+               if (ret)
+                       return ret;
+       }
+       ret = platform_get_irq_byname(pdev, "edma-err");
+       if (ret != -ENXIO) {
+               ret = request_irq(ret, mcf_edma_err_handler,
+                                 0, "eDMA", mcf_edma);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
+ static void mcf_edma_irq_free(struct platform_device *pdev,
+                               struct fsl_edma_engine *mcf_edma)
+ {
+       int irq;
+       struct resource *res;
+       res = platform_get_resource_byname(pdev,
+                       IORESOURCE_IRQ, "edma-tx-00-15");
+       if (res) {
+               for (irq = res->start; irq <= res->end; irq++)
+                       free_irq(irq, mcf_edma);
+       }
+       res = platform_get_resource_byname(pdev,
+                       IORESOURCE_IRQ, "edma-tx-16-55");
+       if (res) {
+               for (irq = res->start; irq <= res->end; irq++)
+                       free_irq(irq, mcf_edma);
+       }
+       irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
+       if (irq != -ENXIO)
+               free_irq(irq, mcf_edma);
+       irq = platform_get_irq_byname(pdev, "edma-err");
+       if (irq != -ENXIO)
+               free_irq(irq, mcf_edma);
+ }
+ static struct fsl_edma_drvdata mcf_data = {
+       .flags = FSL_EDMA_DRV_EDMA64,
+       .setup_irq = mcf_edma_irq_init,
+ };
+ static int mcf_edma_probe(struct platform_device *pdev)
+ {
+       struct mcf_edma_platform_data *pdata;
+       struct fsl_edma_engine *mcf_edma;
+       struct edma_regs *regs;
+       int ret, i, chans;
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               dev_err(&pdev->dev, "no platform data supplied\n");
+               return -EINVAL;
+       }
 -      if (!mcf_edma->n_chans) {
 -              dev_info(&pdev->dev, "setting default channel number to 64");
 -              mcf_edma->n_chans = 64;
 -      }
 -
++      if (!pdata->dma_channels) {
++              dev_info(&pdev->dev, "setting default channel number to 64");
++              chans = 64;
++      } else {
++              chans = pdata->dma_channels;
++      }
++
+       mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
+                               GFP_KERNEL);
+       if (!mcf_edma)
+               return -ENOMEM;
+       mcf_edma->n_chans = chans;
+       /* Set up drvdata for ColdFire edma */
+       mcf_edma->drvdata = &mcf_data;
+       mcf_edma->big_endian = 1;
+       mutex_init(&mcf_edma->fsl_edma_mutex);
+       mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mcf_edma->membase))
+               return PTR_ERR(mcf_edma->membase);
+       fsl_edma_setup_regs(mcf_edma);
+       regs = &mcf_edma->regs;
+       INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
+       for (i = 0; i < mcf_edma->n_chans; i++) {
+               struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
+               mcf_chan->edma = mcf_edma;
+               mcf_chan->slave_id = i;
+               mcf_chan->idle = true;
+               mcf_chan->dma_dir = DMA_NONE;
+               mcf_chan->vchan.desc_free = fsl_edma_free_desc;
+               vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
+               mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+                               + i * sizeof(struct fsl_edma_hw_tcd);
+               iowrite32(0x0, &mcf_chan->tcd->csr);
+       }
+       iowrite32(~0, regs->inth);
+       iowrite32(~0, regs->intl);
+       ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
+       if (ret)
+               return ret;
+       dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
+       dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
+       mcf_edma->dma_dev.dev = &pdev->dev;
+       mcf_edma->dma_dev.device_alloc_chan_resources =
+                       fsl_edma_alloc_chan_resources;
+       mcf_edma->dma_dev.device_free_chan_resources =
+                       fsl_edma_free_chan_resources;
+       mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
+       mcf_edma->dma_dev.device_prep_dma_cyclic =
+                       fsl_edma_prep_dma_cyclic;
+       mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+       mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+       mcf_edma->dma_dev.device_pause = fsl_edma_pause;
+       mcf_edma->dma_dev.device_resume = fsl_edma_resume;
+       mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
+       mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+       mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+       mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+       mcf_edma->dma_dev.directions =
+                       BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
+       mcf_edma->dma_dev.filter.map = pdata->slave_map;
+       mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
+       platform_set_drvdata(pdev, mcf_edma);
+       ret = dma_async_device_register(&mcf_edma->dma_dev);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "Can't register Freescale eDMA engine. (%d)\n", ret);
+               return ret;
+       }
+       /* Enable round robin arbitration */
+       iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+       return 0;
+ }
+ static int mcf_edma_remove(struct platform_device *pdev)
+ {
+       struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
+       mcf_edma_irq_free(pdev, mcf_edma);
+       fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
+       dma_async_device_unregister(&mcf_edma->dma_dev);
+       return 0;
+ }
+ static struct platform_driver mcf_edma_driver = {
+       .driver         = {
+               .name   = "mcf-edma",
+       },
+       .probe          = mcf_edma_probe,
+       .remove         = mcf_edma_remove,
+ };
+ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
+ {
+       if (chan->device->dev->driver == &mcf_edma_driver.driver) {
+               struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
+               return (mcf_chan->slave_id == (uintptr_t)param);
+       }
+       return false;
+ }
+ EXPORT_SYMBOL(mcf_edma_filter_fn);
+ static int __init mcf_edma_init(void)
+ {
+       return platform_driver_register(&mcf_edma_driver);
+ }
+ subsys_initcall(mcf_edma_init);
+ static void __exit mcf_edma_exit(void)
+ {
+       platform_driver_unregister(&mcf_edma_driver);
+ }
+ module_exit(mcf_edma_exit);
+ MODULE_ALIAS("platform:mcf-edma");
+ MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
+ MODULE_LICENSE("GPL v2");
diff --combined drivers/dma/owl-dma.c
index b6e0ac8314e5cd278a0d3555805146a21bd206b3,3b7ceee0b85b7c4db943c7027371fdb1bd25bd79..384476757c5e3a47502c23dbadc2049b627dcf34
@@@ -20,8 -20,9 +20,9 @@@
  #include <linux/io.h>
  #include <linux/mm.h>
  #include <linux/module.h>
- #include <linux/of_device.h>
+ #include <linux/of.h>
  #include <linux/of_dma.h>
+ #include <linux/platform_device.h>
  #include <linux/slab.h>
  #include "virt-dma.h"
  
@@@ -192,7 -193,7 +193,7 @@@ struct owl_dma_pchan 
  };
  
  /**
 - * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
 + * struct owl_dma_vchan - Wrapper for DMA ENGINE channel
   * @vc: wrapped virtual channel
   * @pchan: the physical channel utilized by this channel
   * @txd: active transaction on this channel
@@@ -1116,7 -1117,7 +1117,7 @@@ static int owl_dma_probe(struct platfor
        dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
                 nr_channels, nr_requests);
  
-       od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
+       od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
  
        od->nr_pchans = nr_channels;
        od->nr_vchans = nr_requests;
This page took 0.104951 seconds and 4 git commands to generate.