]> Git Repo - linux.git/commitdiff
Merge tag 'dmaengine-4.11-rc1' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <[email protected]>
Wed, 22 Feb 2017 01:06:22 +0000 (17:06 -0800)
committerLinus Torvalds <[email protected]>
Wed, 22 Feb 2017 01:06:22 +0000 (17:06 -0800)
Pull dmaengine updates from Vinod Koul:
 "This time we fairly boring and bit small update.

   - Support for Intel iDMA 32-bit hardware
   - deprecate broken support for channel switching in async_tx
   - bunch of updates on stm32-dma
   - Cyclic support for zx dma and making in generic zx dma driver
   - Small updates to bunch of other drivers"

* tag 'dmaengine-4.11-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (29 commits)
  async_tx: deprecate broken support for channel switching
  dmaengine: rcar-dmac: Widen DMA mask to 40 bits
  dmaengine: sun6i: allow build on ARM64 platforms (sun50i)
  dmaengine: Provide a wrapper for memcpy operations
  dmaengine: zx: fix build warning
  dmaengine: dw: we do support Merrifield SoC in PCI mode
  dmaengine: dw: add support of iDMA 32-bit hardware
  dmaengine: dw: introduce register mappings for iDMA 32-bit
  dmaengine: dw: introduce block2bytes() and bytes2block()
  dmaengine: dw: extract dwc_chan_pause() for future use
  dmaengine: dw: replace convert_burst() with one liner
  dmaengine: dw: register IRQ and DMA pool with instance ID
  dmaengine: dw: Fix data corruption in large device to memory transfers
  dmaengine: ste_dma40: indicate granularity on channels
  dmaengine: ste_dma40: indicate directions on channels
  dmaengine: stm32-dma: Add error messages if xlate fails
  dmaengine: dw: pci: remove LPE Audio DMA ID
  dmaengine: stm32-dma: Add max_burst support
  dmaengine: stm32-dma: Add synchronization support
  dmaengine: stm32-dma: Fix residue computation issue in cyclic mode
  ...

1  2 
drivers/dma/sh/rcar-dmac.c
drivers/dma/stm32-dma.c

index 4c357d47546594c6bd0c9b1ca16e27c658a720cf,93a69b992a51a7aaffbea2458c831e3c8d459945..48b22d5c860260988f052c331f0c3050854b288c
@@@ -986,7 -986,6 +986,7 @@@ static void rcar_dmac_free_chan_resourc
  {
        struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
        struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
 +      struct rcar_dmac_chan_map *map = &rchan->map;
        struct rcar_dmac_desc_page *page, *_page;
        struct rcar_dmac_desc *desc;
        LIST_HEAD(list);
                free_page((unsigned long)page);
        }
  
 +      /* Remove slave mapping if present. */
 +      if (map->slave.xfer_size) {
 +              dma_unmap_resource(chan->device->dev, map->addr,
 +                                 map->slave.xfer_size, map->dir, 0);
 +              map->slave.xfer_size = 0;
 +      }
 +
        pm_runtime_put(chan->device->dev);
  }
  
@@@ -1724,6 -1716,7 +1724,7 @@@ static int rcar_dmac_probe(struct platf
  
        dmac->dev = &pdev->dev;
        platform_set_drvdata(pdev, dmac);
+       dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
  
        ret = rcar_dmac_parse_of(&pdev->dev, dmac);
        if (ret < 0)
diff --combined drivers/dma/stm32-dma.c
index 3056ce7f8c69d01c61fe3ab0eeff6ad299f538f7,4eacd9dd5710b41d2262f04916ec2f6f919dc896..49f86cabcfec1e04b6d63f4dfb77d6f4a74654a4
  #define STM32_DMA_MAX_CHANNELS                0x08
  #define STM32_DMA_MAX_REQUEST_ID      0x08
  #define STM32_DMA_MAX_DATA_PARAM      0x03
+ #define STM32_DMA_MAX_BURST           16
  
  enum stm32_dma_width {
        STM32_DMA_BYTE,
@@@ -403,6 -404,13 +404,13 @@@ static int stm32_dma_terminate_all(stru
        return 0;
  }
  
+ static void stm32_dma_synchronize(struct dma_chan *c)
+ {
+       struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
+       vchan_synchronize(&chan->vchan);
+ }
  static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
  {
        struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
        dev_dbg(chan2dev(chan), "SFCR:  0x%08x\n", sfcr);
  }
  
- static int stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
  {
        struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
        struct virt_dma_desc *vdesc;
  
        ret = stm32_dma_disable_chan(chan);
        if (ret < 0)
-               return ret;
+               return;
  
        if (!chan->desc) {
                vdesc = vchan_next_desc(&chan->vchan);
                if (!vdesc)
-                       return -EPERM;
+                       return;
  
                chan->desc = to_stm32_dma_desc(vdesc);
                chan->next_sg = 0;
  
        chan->busy = true;
  
-       return 0;
+       dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan);
  }
  
  static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
                        dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
                                stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
                }
-               chan->next_sg++;
        }
  }
  
@@@ -510,6 -516,7 +516,7 @@@ static void stm32_dma_handle_chan_done(
        if (chan->desc) {
                if (chan->desc->cyclic) {
                        vchan_cyclic_callback(&chan->desc->vdesc);
+                       chan->next_sg++;
                        stm32_dma_configure_next_sg(chan);
                } else {
                        chan->busy = false;
@@@ -552,15 -559,13 +559,13 @@@ static void stm32_dma_issue_pending(str
  {
        struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
        unsigned long flags;
-       int ret;
  
        spin_lock_irqsave(&chan->vchan.lock, flags);
-       if (!chan->busy) {
-               if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
-                       ret = stm32_dma_start_transfer(chan);
-                       if ((!ret) && (chan->desc->cyclic))
-                               stm32_dma_configure_next_sg(chan);
-               }
+       if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
+               dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan);
+               stm32_dma_start_transfer(chan);
+               if (chan->desc->cyclic)
+                       stm32_dma_configure_next_sg(chan);
        }
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
  }
@@@ -848,26 -853,40 +853,40 @@@ static struct dma_async_tx_descriptor *
        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
  }
  
+ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
+ {
+       u32 dma_scr, width, ndtr;
+       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
+       dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
+       width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
+       ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
+       return ndtr << width;
+ }
  static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
                                     struct stm32_dma_desc *desc,
                                     u32 next_sg)
  {
-       struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
-       u32 dma_scr, width, residue, count;
+       u32 residue = 0;
        int i;
  
-       residue = 0;
+       /*
+        * In cyclic mode, for the last period, residue = remaining bytes from
+        * NDTR
+        */
+       if (chan->desc->cyclic && next_sg == 0)
+               return stm32_dma_get_remaining_bytes(chan);
  
+       /*
+        * For all other periods in cyclic mode, and in sg mode,
+        * residue = remaining bytes from NDTR + remaining periods/sg to be
+        * transferred
+        */
        for (i = next_sg; i < desc->num_sgs; i++)
                residue += desc->sg_req[i].len;
-       if (next_sg != 0) {
-               dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
-               width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
-               count = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
-               residue += count << width;
-       }
+       residue += stm32_dma_get_remaining_bytes(chan);
  
        return residue;
  }
@@@ -880,7 -899,7 +899,7 @@@ static enum dma_status stm32_dma_tx_sta
        struct virt_dma_desc *vdesc;
        enum dma_status status;
        unsigned long flags;
 -      u32 residue;
 +      u32 residue = 0;
  
        status = dma_cookie_status(c, cookie, state);
        if ((status == DMA_COMPLETE) || (!state))
  
        spin_lock_irqsave(&chan->vchan.lock, flags);
        vdesc = vchan_find_desc(&chan->vchan, cookie);
 -      if (cookie == chan->desc->vdesc.tx.cookie) {
 +      if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
                residue = stm32_dma_desc_residue(chan, chan->desc,
                                                 chan->next_sg);
 -      } else if (vdesc) {
 +      else if (vdesc)
                residue = stm32_dma_desc_residue(chan,
                                                 to_stm32_dma_desc(vdesc), 0);
 -      } else {
 -              residue = 0;
 -      }
 -
        dma_set_residue(state, residue);
  
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@@ -964,27 -987,36 +983,36 @@@ static struct dma_chan *stm32_dma_of_xl
                                           struct of_dma *ofdma)
  {
        struct stm32_dma_device *dmadev = ofdma->of_dma_data;
+       struct device *dev = dmadev->ddev.dev;
        struct stm32_dma_cfg cfg;
        struct stm32_dma_chan *chan;
        struct dma_chan *c;
  
-       if (dma_spec->args_count < 4)
+       if (dma_spec->args_count < 4) {
+               dev_err(dev, "Bad number of cells\n");
                return NULL;
+       }
  
        cfg.channel_id = dma_spec->args[0];
        cfg.request_line = dma_spec->args[1];
        cfg.stream_config = dma_spec->args[2];
        cfg.threshold = dma_spec->args[3];
  
-       if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
-                               STM32_DMA_MAX_REQUEST_ID))
+       if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) ||
+           (cfg.request_line >= STM32_DMA_MAX_REQUEST_ID)) {
+               dev_err(dev, "Bad channel and/or request id\n");
                return NULL;
+       }
  
        chan = &dmadev->chan[cfg.channel_id];
  
        c = dma_get_slave_channel(&chan->vchan.chan);
-       if (c)
-               stm32_dma_set_config(chan, &cfg);
+       if (!c) {
+               dev_err(dev, "No more channel avalaible\n");
+               return NULL;
+       }
+       stm32_dma_set_config(chan, &cfg);
  
        return c;
  }
@@@ -1048,6 -1080,7 +1076,7 @@@ static int stm32_dma_probe(struct platf
        dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
        dd->device_config = stm32_dma_slave_config;
        dd->device_terminate_all = stm32_dma_terminate_all;
+       dd->device_synchronize = stm32_dma_synchronize;
        dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
        dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
        dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       dd->max_burst = STM32_DMA_MAX_BURST;
        dd->dev = &pdev->dev;
        INIT_LIST_HEAD(&dd->channels);
  
This page took 0.118345 seconds and 4 git commands to generate.