]> Git Repo - linux.git/commitdiff
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <[email protected]>
Tue, 10 Sep 2013 20:37:36 +0000 (13:37 -0700)
committerLinus Torvalds <[email protected]>
Tue, 10 Sep 2013 20:37:36 +0000 (13:37 -0700)
Pull slave-dmaengine updates from Vinod Koul:
 "This pull brings:
   - Andy's DW driver updates
   - Guennadi's sh driver updates
   - Pl08x driver fixes from Tomasz & Alban
   - Improvements to mmp_pdma by Daniel
   - TI EDMA fixes by Joel
   - New drivers:
     - Hisilicon k3dma driver
     - Renesas rcar dma driver
  - New API for publishing slave driver capablities
  - Various fixes across the subsystem by Andy, Jingoo, Sachin etc..."

* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (94 commits)
  dma: edma: Remove limits on number of slots
  dma: edma: Leave linked to Null slot instead of DUMMY slot
  dma: edma: Find missed events and issue them
  ARM: edma: Add function to manually trigger an EDMA channel
  dma: edma: Write out and handle MAX_NR_SG at a given time
  dma: edma: Setup parameters to DMA MAX_NR_SG at a time
  dmaengine: pl330: use dma_set_max_seg_size to set the sg limit
  dmaengine: dma_slave_caps: remove sg entries
  dma: replace devm_request_and_ioremap by devm_ioremap_resource
  dma: ste_dma40: Fix potential null pointer dereference
  dma: ste_dma40: Remove duplicate const
  dma: imx-dma: Remove redundant NULL check
  dma: dmagengine: fix function names in comments
  dma: add driver for R-Car HPB-DMAC
  dma: k3dma: use devm_ioremap_resource() instead of devm_request_and_ioremap()
  dma: imx-sdma: Staticize sdma_driver_data structures
  pch_dma: Add MODULE_DEVICE_TABLE
  dmaengine: PL08x: Add cyclic transfer support
  dmaengine: PL08x: Fix reading the byte count in cctl
  dmaengine: PL08x: Add support for different maximum transfer size
  ...

1  2 
Documentation/driver-model/devres.txt
MAINTAINERS
arch/arm/mach-imx/mm-imx5.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/amba-pl08x.c
drivers/dma/dmaengine.c
drivers/dma/mv_xor.c
drivers/dma/sh/shdmac.c
include/linux/dmaengine.h

index fb57d85e7316027f58c336a359e0b1eb6c320f64,2850110b343aef7dbc4056d9430be79ffb9bb155..fcb34a5697eaa4ec2c44a46791b711d5a78f1c7b
@@@ -237,12 -237,6 +237,12 @@@ ME
    devm_kzalloc()
    devm_kfree()
  
 +IIO
 +  devm_iio_device_alloc()
 +  devm_iio_device_free()
 +  devm_iio_trigger_alloc()
 +  devm_iio_trigger_free()
 +
  IO region
    devm_request_region()
    devm_request_mem_region()
@@@ -299,3 -293,6 +299,6 @@@ PW
  PHY
    devm_usb_get_phy()
    devm_usb_put_phy()
+ SLAVE DMA ENGINE
+   devm_acpi_dma_controller_register()
diff --combined MAINTAINERS
index 87efa1f5c7f3e6b62594a006a08128fb93b347a6,6a6554a68959b66ecc32924aab691f64e2c0f2a7..38a37f7e514f8c04f8b549d6327687d49443ffc8
@@@ -580,24 -580,12 +580,24 @@@ L:      [email protected]
  S:    Maintained
  F:    drivers/media/i2c/ad9389b*
  
 +ANALOG DEVICES INC ADV7511 DRIVER
 +M:    Hans Verkuil <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/media/i2c/adv7511*
 +
  ANALOG DEVICES INC ADV7604 DRIVER
  M:    Hans Verkuil <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/media/i2c/adv7604*
  
 +ANALOG DEVICES INC ADV7842 DRIVER
 +M:    Hans Verkuil <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/media/i2c/adv7842*
 +
  ANALOG DEVICES INC ASOC CODEC DRIVERS
  M:    Lars-Peter Clausen <[email protected]>
  L:    [email protected]
@@@ -607,7 -595,6 +607,7 @@@ S: Supporte
  F:    sound/soc/codecs/adau*
  F:    sound/soc/codecs/adav*
  F:    sound/soc/codecs/ad1*
 +F:    sound/soc/codecs/ad7*
  F:    sound/soc/codecs/ssm*
  F:    sound/soc/codecs/sigmadsp.*
  
@@@ -651,12 -638,6 +651,12 @@@ S:       Maintaine
  F:    drivers/net/appletalk/
  F:    net/appletalk/
  
 +APTINA CAMERA SENSOR PLL
 +M:    Laurent Pinchart <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/media/i2c/aptina-pll.*
 +
  ARASAN COMPACT FLASH PATA CONTROLLER
  M:    Viresh Kumar <[email protected]>
  L:    [email protected]
@@@ -832,7 -813,7 +832,7 @@@ F: arch/arm/mach-prima2
  F:    drivers/dma/sirf-dma.c
  F:    drivers/i2c/busses/i2c-sirf.c
  F:    drivers/mmc/host/sdhci-sirf.c
 -F:    drivers/pinctrl/pinctrl-sirf.c
 +F:    drivers/pinctrl/sirf/
  F:    drivers/spi/spi-sirf.c
  
  ARM/EBSA110 MACHINE SUPPORT
@@@ -933,24 -914,24 +933,24 @@@ F:      arch/arm/mach-pxa/colibri-pxa270-inc
  
  ARM/INTEL IOP32X ARM ARCHITECTURE
  M:    Lennert Buytenhek <[email protected]>
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  
  ARM/INTEL IOP33X ARM ARCHITECTURE
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  
  ARM/INTEL IOP13XX ARM ARCHITECTURE
  M:    Lennert Buytenhek <[email protected]>
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  
  ARM/INTEL IQ81342EX MACHINE SUPPORT
  M:    Lennert Buytenhek <[email protected]>
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  
@@@ -975,7 -956,7 +975,7 @@@ F: drivers/pcmcia/pxa2xx_stargate2.
  
  ARM/INTEL XSC3 (MANZANO) ARM CORE
  M:    Lennert Buytenhek <[email protected]>
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  
@@@ -984,12 -965,6 +984,12 @@@ M:       Lennert Buytenhek <kernel@wantstofly
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  
 +ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
 +M:    Santosh Shilimkar <[email protected]>
 +L:    [email protected] (moderated for non-subscribers)
 +S:    Maintained
 +F:    arch/arm/mach-keystone/
 +
  ARM/LOGICPD PXA270 MACHINE SUPPORT
  M:    Lennert Buytenhek <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
@@@ -1284,6 -1259,7 +1284,6 @@@ F:      drivers/rtc/rtc-coh901331.
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
  
  ARM/Ux500 ARM ARCHITECTURE
 -M:    Srinidhi Kasagar <[email protected]>
  M:    Linus Walleij <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
@@@ -1320,6 -1296,7 +1320,6 @@@ L:      [email protected]
  S:    Maintained
  F:    arch/arm/mach-vt8500/
  F:    drivers/clocksource/vt8500_timer.c
 -F:    drivers/gpio/gpio-vt8500.c
  F:    drivers/i2c/busses/i2c-wmt.c
  F:    drivers/mmc/host/wmt-sdmmc.c
  F:    drivers/pwm/pwm-vt8500.c
@@@ -1386,7 -1363,7 +1386,7 @@@ F:      drivers/platform/x86/asus*.
  F:    drivers/platform/x86/eeepc*.c
  
  ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  W:    http://sourceforge.net/projects/xscaleiop
  S:    Maintained
  F:    Documentation/crypto/async-tx-api.txt
@@@ -1565,13 -1542,6 +1565,13 @@@ W:    http://atmelwlandriver.sourceforge.n
  S:    Maintained
  F:    drivers/net/wireless/atmel*
  
 +ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
 +M:      Bradley Grove <[email protected]>
 +L:      [email protected]
 +W:      http://www.attotech.com
 +S:      Supported
 +F:      drivers/scsi/esas2r
 +
  AUDIT SUBSYSTEM
  M:    Al Viro <[email protected]>
  M:    Eric Paris <[email protected]>
  S:    Supported
  F:    drivers/scsi/bnx2fc/
  
 +BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
 +M:    Eddie Wai <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/scsi/bnx2i/
 +
  BROADCOM SPECIFIC AMBA DRIVER (BCMA)
  M:    Rafał Miłecki <[email protected]>
  L:    [email protected]
@@@ -2107,8 -2071,7 +2107,8 @@@ F:      drivers/usb/chipidea
  
  CISCO VIC ETHERNET NIC DRIVER
  M:    Christian Benvenuti <[email protected]>
 -M:    Roopa Prabhu <[email protected]>
 +M:    Sujith Sankar <[email protected]>
 +M:    Govindarajulu Varadarajan <[email protected]>
  M:    Neel Patel <[email protected]>
  M:    Nishank Trivedi <[email protected]>
  S:    Supported
@@@ -2144,13 -2107,6 +2144,13 @@@ M:    Russell King <[email protected]
  S:    Maintained
  F:    include/linux/clk.h
  
 +CLOCKSOURCE, CLOCKEVENT DRIVERS
 +M:    Daniel Lezcano <[email protected]>
 +M:    Thomas Gleixner <[email protected]>
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 +S:    Supported
 +F:    drivers/clocksource
 +
  CISCO FCOE HBA DRIVER
  M:    Hiral Patel <[email protected]>
  M:    Suma Ramars <[email protected]>
@@@ -2307,15 -2263,6 +2307,15 @@@ F:    drivers/cpufreq/arm_big_little.
  F:    drivers/cpufreq/arm_big_little.c
  F:    drivers/cpufreq/arm_big_little_dt.c
  
 +CPUIDLE DRIVER - ARM BIG LITTLE
 +M:      Lorenzo Pieralisi <[email protected]>
 +M:      Daniel Lezcano <[email protected]>
 +L:      [email protected]
 +L:      [email protected]
 +T:      git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 +S:      Maintained
 +F:      drivers/cpuidle/cpuidle-big_little.c
 +
  CPUIDLE DRIVERS
  M:    Rafael J. Wysocki <[email protected]>
  M:    Daniel Lezcano <[email protected]>
@@@ -2502,9 -2449,9 +2502,9 @@@ S:      Maintaine
  F:    drivers/media/common/cypress_firmware*
  
  CYTTSP TOUCHSCREEN DRIVER
 -M:    Javier Martinez Canillas <[email protected]>
 +M:    Ferruh Yigit <[email protected]>
  L:    [email protected]
 -S:    Maintained
 +S:    Supported
  F:    drivers/input/touchscreen/cyttsp*
  F:    include/linux/input/cyttsp.h
  
@@@ -2691,7 -2638,7 +2691,7 @@@ T:      git git://git.linaro.org/people/sumi
  
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <[email protected]>
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  S:    Supported
  F:    drivers/dma/
  F:    include/linux/dma*
@@@ -4149,13 -4096,6 +4149,13 @@@ W:    http://launchpad.net/ideapad-lapto
  S:    Maintained
  F:    drivers/platform/x86/ideapad-laptop.c
  
 +IDEAPAD LAPTOP SLIDEBAR DRIVER
 +M:    Andrey Moiseev <[email protected]>
 +L:    [email protected]
 +W:    https://github.com/o2genum/ideapad-slidebar
 +S:    Maintained
 +F:    drivers/input/misc/ideapad_slidebar.c
 +
  IDE/ATAPI DRIVERS
  M:    Borislav Petkov <[email protected]>
  L:    [email protected]
@@@ -4323,7 -4263,7 +4323,7 @@@ F:      arch/x86/kernel/microcode_core.
  F:    arch/x86/kernel/microcode_intel.c
  
  INTEL I/OAT DMA DRIVER
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  S:    Maintained
  F:    drivers/dma/ioat*
  
@@@ -4336,7 -4276,7 +4336,7 @@@ F:      drivers/iommu/intel-iommu.
  F:    include/linux/intel-iommu.h
  
  INTEL IOP-ADMA DMA DRIVER
 -M:    Dan Williams <djbw@fb.com>
 +M:    Dan Williams <dan.j.williams@intel.com>
  S:    Odd fixes
  F:    drivers/dma/iop-adma.c
  
@@@ -4420,7 -4360,7 +4420,7 @@@ F:      drivers/net/wireless/iwlegacy
  
  INTEL WIRELESS WIFI LINK (iwlwifi)
  M:    Johannes Berg <[email protected]>
 -M:    Wey-Yi Guy <wey-yi.w.guy@intel.com>
 +M:    Emmanuel Grumbach <emmanuel.grumbach@intel.com>
  M:    Intel Linux Wireless <[email protected]>
  L:    [email protected]
  W:    http://intellinuxwireless.org
@@@ -5451,7 -5391,6 +5451,7 @@@ F:      drivers/watchdog/mena21_wdt.
  
  METAG ARCHITECTURE
  M:    James Hogan <[email protected]>
 +L:    [email protected]
  S:    Supported
  F:    arch/metag/
  F:    Documentation/metag/
@@@ -5553,7 -5492,7 +5553,7 @@@ L:      [email protected]
  S:    Supported
  F:    drivers/platform/x86/msi-wmi.c
  
 -MT9M032 SENSOR DRIVER
 +MT9M032 APTINA SENSOR DRIVER
  M:    Laurent Pinchart <[email protected]>
  L:    [email protected]
  T:    git git://linuxtv.org/media_tree.git
@@@ -5561,7 -5500,7 +5561,7 @@@ S:      Maintaine
  F:    drivers/media/i2c/mt9m032.c
  F:    include/media/mt9m032.h
  
 -MT9P031 SENSOR DRIVER
 +MT9P031 APTINA CAMERA SENSOR
  M:    Laurent Pinchart <[email protected]>
  L:    [email protected]
  T:    git git://linuxtv.org/media_tree.git
@@@ -5569,7 -5508,7 +5569,7 @@@ S:      Maintaine
  F:    drivers/media/i2c/mt9p031.c
  F:    include/media/mt9p031.h
  
 -MT9T001 SENSOR DRIVER
 +MT9T001 APTINA CAMERA SENSOR
  M:    Laurent Pinchart <[email protected]>
  L:    [email protected]
  T:    git git://linuxtv.org/media_tree.git
@@@ -5577,7 -5516,7 +5577,7 @@@ S:      Maintaine
  F:    drivers/media/i2c/mt9t001.c
  F:    include/media/mt9t001.h
  
 -MT9V032 SENSOR DRIVER
 +MT9V032 APTINA CAMERA SENSOR
  M:    Laurent Pinchart <[email protected]>
  L:    [email protected]
  T:    git git://linuxtv.org/media_tree.git
@@@ -5637,9 -5576,9 +5637,9 @@@ S:      Maintaine
  F:    drivers/media/tuners/mxl5007t.*
  
  MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
 -M:    Andrew Gallatin <gallatin@myri.com>
 +M:    Hyong-Youb Kim <hykim@myri.com>
  L:    [email protected]
 -W:    http://www.myri.com/scs/download-Myri10GE.html
 +W:    https://www.myricom.com/support/downloads/myri10ge.html
  S:    Supported
  F:    drivers/net/ethernet/myricom/myri10ge/
  
@@@ -5848,7 -5787,7 +5848,7 @@@ M:      Aloisio Almeida Jr <aloisio.almeida@
  M:    Samuel Ortiz <[email protected]>
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
 -S:    Maintained
 +S:    Supported
  F:    net/nfc/
  F:    include/net/nfc/
  F:    include/uapi/linux/nfc.h
@@@ -5899,8 -5838,6 +5899,8 @@@ F:      drivers/scsi/nsp32
  NTB DRIVER
  M:    Jon Mason <[email protected]>
  S:    Supported
 +W:    https://github.com/jonmason/ntb/wiki
 +T:    git git://github.com/jonmason/ntb.git
  F:    drivers/ntb/
  F:    drivers/net/ntb_netdev.c
  F:    include/linux/ntb.h
@@@ -5942,7 -5879,7 +5942,7 @@@ F:      drivers/i2c/busses/i2c-omap.
  F:    include/linux/i2c-omap.h
  
  OMAP DEVICE TREE SUPPORT
 -M:    Benoît Cousson <b-cousson@ti.com>
 +M:    Benoît Cousson <bcousson@baylibre.com>
  M:    Tony Lindgren <[email protected]>
  L:    [email protected]
  L:    [email protected]
@@@ -6022,14 -5959,14 +6022,14 @@@ S:   Maintaine
  F:    drivers/char/hw_random/omap-rng.c
  
  OMAP HWMOD SUPPORT
 -M:    Benoît Cousson <b-cousson@ti.com>
 +M:    Benoît Cousson <bcousson@baylibre.com>
  M:    Paul Walmsley <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    arch/arm/mach-omap2/omap_hwmod.*
  
  OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
 -M:    Benoît Cousson <b-cousson@ti.com>
 +M:    Benoît Cousson <bcousson@baylibre.com>
  L:    [email protected]
  S:    Maintained
  F:    arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@@ -6124,7 -6061,7 +6124,7 @@@ M:      Rob Herring <[email protected]
  M:    Pawel Moll <[email protected]>
  M:    Mark Rutland <[email protected]>
  M:    Stephen Warren <[email protected]>
 -M:    Ian Campbell <i[email protected]>
 +M:    Ian Campbell <i[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/
@@@ -6333,13 -6270,6 +6333,13 @@@ F:    Documentation/PCI
  F:    drivers/pci/
  F:    include/linux/pci*
  
 +PCI DRIVER FOR NVIDIA TEGRA
 +M:    Thierry Reding <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
 +F:    drivers/pci/host/pci-tegra.c
 +
  PCMCIA SUBSYSTEM
  P:    Linux PCMCIA Team
  L:    [email protected]
@@@ -6741,11 -6671,11 +6741,11 @@@ F:   Documentation/scsi/LICENSE.qla2xx
  F:    drivers/scsi/qla2xxx/
  
  QLOGIC QLA4XXX iSCSI DRIVER
 -M:    Ravi Anand <[email protected]>
  M:    Vikas Chaudhary <[email protected]>
  M:    [email protected]
  L:    [email protected]
  S:    Supported
 +F:    Documentation/scsi/LICENSE.qla4xxx
  F:    drivers/scsi/qla4xxx/
  
  QLOGIC QLA3XXX NETWORK DRIVER
@@@ -6986,14 -6916,6 +6986,14 @@@ M:    Maxim Levitsky <maximlevitsky@gmail.
  S:    Maintained
  F:    drivers/memstick/host/r592.*
  
 +ROCCAT DRIVERS
 +M:    Stefan Achatz <[email protected]>
 +W:    http://sourceforge.net/projects/roccat/
 +S:    Maintained
 +F:    drivers/hid/hid-roccat*
 +F:    include/linux/hid-roccat*
 +F:    Documentation/ABI/*/sysfs-driver-hid-roccat*
 +
  ROCKETPORT DRIVER
  P:    Comtrol Corp.
  W:    http://www.comtrol.com
@@@ -7204,6 -7126,7 +7204,7 @@@ F:      drivers/tty/seria
  
  SYNOPSYS DESIGNWARE DMAC DRIVER
  M:    Viresh Kumar <[email protected]>
+ M:    Andy Shevchenko <[email protected]>
  S:    Maintained
  F:    include/linux/dw_dmac.h
  F:    drivers/dma/dw/
@@@ -7216,7 -7139,7 +7217,7 @@@ S:      Maintaine
  F:    include/linux/mmc/dw_mmc.h
  F:    drivers/mmc/host/dw_mmc*
  
 -TIMEKEEPING, NTP
 +TIMEKEEPING, CLOCKSOURCE CORE, NTP
  M:    John Stultz <[email protected]>
  M:    Thomas Gleixner <[email protected]>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
@@@ -7229,6 -7152,7 +7230,6 @@@ F:      include/uapi/linux/timex.
  F:    kernel/time/clocksource.c
  F:    kernel/time/time*.c
  F:    kernel/time/ntp.c
 -F:    drivers/clocksource
  
  TLG2300 VIDEO4LINUX-2 DRIVER
  M:    Huang Shijie <[email protected]>
@@@ -7308,7 -7232,6 +7309,7 @@@ W:      http://lksctp.sourceforge.ne
  S:    Maintained
  F:    Documentation/networking/sctp.txt
  F:    include/linux/sctp.h
 +F:    include/uapi/linux/sctp.h
  F:    include/net/sctp/
  F:    net/sctp/
  
@@@ -7439,6 -7362,7 +7440,6 @@@ F:      drivers/net/ethernet/sfc
  
  SGI GRU DRIVER
  M:    Dimitri Sivanich <[email protected]>
 -M:    Robin Holt <[email protected]>
  S:    Maintained
  F:    drivers/misc/sgi-gru/
  
@@@ -7458,8 -7382,7 +7459,8 @@@ S:      Maintained for 2.6
  F:    Documentation/sgi-visws.txt
  
  SGI XP/XPC/XPNET DRIVER
 -M:    Robin Holt <[email protected]>
 +M:    Cliff Whickman <[email protected]>
 +M:    Robin Holt <[email protected]>
  S:    Maintained
  F:    drivers/misc/sgi-xp/
  
@@@ -7748,17 -7671,6 +7749,17 @@@ F:    include/sound
  F:    include/uapi/sound/
  F:    sound/
  
 +SOUND - COMPRESSED AUDIO
 +M:    Vinod Koul <[email protected]>
 +L:    [email protected] (moderated for non-subscribers)
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
 +S:    Supported
 +F:    Documentation/sound/alsa/compress_offload.txt
 +F:    include/sound/compress_driver.h
 +F:    include/uapi/sound/compress_*
 +F:    sound/core/compress_offload.c
 +F:    sound/soc/soc-compress.c
 +
  SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
  M:    Liam Girdwood <[email protected]>
  M:    Mark Brown <[email protected]>
@@@ -7766,7 -7678,6 +7767,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  L:    [email protected] (moderated for non-subscribers)
  W:    http://alsa-project.org/main/index.php/ASoC
  S:    Supported
 +F:    Documentation/sound/alsa/soc/
  F:    sound/soc/
  F:    include/sound/soc*
  
@@@ -7975,11 -7886,11 +7976,11 @@@ S:   Maintaine
  F:    drivers/staging/nvec/
  
  STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
 -M:    Andres Salomon <[email protected]>
 -M:    Chris Ball <cjb@laptop.org>
 +M:    Jens Frederich <[email protected]>
 +M:    Daniel Drake <dsd@laptop.org>
  M:    Jon Nettleton <[email protected]>
  W:    http://wiki.laptop.org/go/DCON
 -S:    Odd Fixes
 +S:    Maintained
  F:    drivers/staging/olpc_dcon/
  
  STAGING - OZMO DEVICES USB OVER WIFI DRIVER
@@@ -8057,12 -7968,6 +8058,12 @@@ F:    arch/m68k/sun3*
  F:    arch/m68k/include/asm/sun3*
  F:    drivers/net/ethernet/i825xx/sun3*
  
 +SUNDANCE NETWORK DRIVER
 +M:    Denis Kirjanov <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/net/ethernet/dlink/sundance.c
 +
  SUPERH
  M:    Paul Mundt <[email protected]>
  L:    [email protected]
@@@ -8398,14 -8303,9 +8399,14 @@@ M:    Chris Metcalf <[email protected]
  W:    http://www.tilera.com/scm/
  S:    Supported
  F:    arch/tile/
 -F:    drivers/tty/hvc/hvc_tile.c
 -F:    drivers/net/ethernet/tile/
 +F:    drivers/char/tile-srom.c
  F:    drivers/edac/tile_edac.c
 +F:    drivers/net/ethernet/tile/
 +F:    drivers/rtc/rtc-tile.c
 +F:    drivers/tty/hvc/hvc_tile.c
 +F:    drivers/tty/serial/tilegx.c
 +F:    drivers/usb/host/*-tilegx.c
 +F:    include/linux/usb/tilegx.h
  
  TLAN NETWORK DRIVER
  M:    Samuel Chessman <[email protected]>
@@@ -8765,11 -8665,6 +8766,11 @@@ T:    git git://git.alsa-project.org/alsa-
  S:    Maintained
  F:    sound/usb/midi.*
  
 +USB NETWORKING DRIVERS
 +L:    [email protected]
 +S:    Odd Fixes
 +F:    drivers/net/usb/
 +
  USB OHCI DRIVER
  M:    Alan Stern <[email protected]>
  L:    [email protected]
@@@ -8899,6 -8794,7 +8900,6 @@@ W:      http://www.linux-usb.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
  S:    Supported
  F:    Documentation/usb/
 -F:    drivers/net/usb/
  F:    drivers/usb/
  F:    include/linux/usb.h
  F:    include/linux/usb/
@@@ -9121,12 -9017,6 +9122,12 @@@ F:    drivers/staging/vme
  F:    drivers/vme/
  F:    include/linux/vme*
  
 +VMWARE HYPERVISOR INTERFACE
 +M:    Alok Kataria <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    arch/x86/kernel/cpu/vmware.c
 +
  VMWARE VMXNET3 ETHERNET DRIVER
  M:    Shreyas Bhatewara <[email protected]>
  M:    "VMware, Inc." <[email protected]>
@@@ -9348,9 -9238,9 +9349,9 @@@ F:      drivers/media/tuners/tuner-xc2028.
  
  XEN HYPERVISOR INTERFACE
  M:    Konrad Rzeszutek Wilk <[email protected]>
 -M:    Jeremy Fitzhardinge <[email protected]>
 -L:    [email protected] (moderated for non-subscribers)
 -L:    [email protected]
 +M:    Boris Ostrovsky <[email protected]>
 +M:    David Vrabel <[email protected]>
 +L:    [email protected] (moderated for non-subscribers)
  S:    Supported
  F:    arch/x86/xen/
  F:    drivers/*/xen-*front.c
@@@ -9361,35 -9251,35 +9362,35 @@@ F:   include/uapi/xen
  
  XEN HYPERVISOR ARM
  M:    Stefano Stabellini <[email protected]>
 -L:    [email protected]source.com (moderated for non-subscribers)
 +L:    [email protected]project.org (moderated for non-subscribers)
  S:    Supported
  F:    arch/arm/xen/
  F:    arch/arm/include/asm/xen/
  
  XEN HYPERVISOR ARM64
  M:    Stefano Stabellini <[email protected]>
 -L:    [email protected]source.com (moderated for non-subscribers)
 +L:    [email protected]project.org (moderated for non-subscribers)
  S:    Supported
  F:    arch/arm64/xen/
  F:    arch/arm64/include/asm/xen/
  
  XEN NETWORK BACKEND DRIVER
  M:    Ian Campbell <[email protected]>
 -L:    [email protected]source.com (moderated for non-subscribers)
 +L:    [email protected]project.org (moderated for non-subscribers)
  L:    [email protected]
  S:    Supported
  F:    drivers/net/xen-netback/*
  
  XEN PCI SUBSYSTEM
  M:    Konrad Rzeszutek Wilk <[email protected]>
 -L:    [email protected]source.com (moderated for non-subscribers)
 +L:    [email protected]project.org (moderated for non-subscribers)
  S:    Supported
  F:    arch/x86/pci/*xen*
  F:    drivers/pci/*xen*
  
  XEN SWIOTLB SUBSYSTEM
  M:    Konrad Rzeszutek Wilk <[email protected]>
 -L:    [email protected]source.com (moderated for non-subscribers)
 +L:    [email protected]project.org (moderated for non-subscribers)
  S:    Supported
  F:    arch/x86/xen/*swiotlb*
  F:    drivers/xen/*swiotlb*
index a8229b7f10bf0bf2380e747d8194b1c35f5bed18,051add9cc471ba5055d20a32737fada3555c5629..eb3cce38c70d3f09eb29d9b7d51343bca5327989
@@@ -103,22 -103,8 +103,8 @@@ void __init mx53_init_irq(void
        tzic_init_irq(MX53_IO_ADDRESS(MX53_TZIC_BASE_ADDR));
  }
  
- static struct sdma_script_start_addrs imx51_sdma_script __initdata = {
-       .ap_2_ap_addr = 642,
-       .uart_2_mcu_addr = 817,
-       .mcu_2_app_addr = 747,
-       .mcu_2_shp_addr = 961,
-       .ata_2_mcu_addr = 1473,
-       .mcu_2_ata_addr = 1392,
-       .app_2_per_addr = 1033,
-       .app_2_mcu_addr = 683,
-       .shp_2_per_addr = 1251,
-       .shp_2_mcu_addr = 892,
- };
  static struct sdma_platform_data imx51_sdma_pdata __initdata = {
        .fw_name = "sdma-imx51.bin",
-       .script_addrs = &imx51_sdma_script,
  };
  
  static const struct resource imx51_audmux_res[] __initconst = {
@@@ -153,10 -139,10 +139,10 @@@ void __init imx51_soc_init(void
  void __init imx51_init_late(void)
  {
        mx51_neon_fixup();
 -      imx51_pm_init();
 +      imx5_pm_init();
  }
  
  void __init imx53_init_late(void)
  {
 -      imx53_pm_init();
 +      imx5_pm_init();
  }
diff --combined drivers/dma/Kconfig
index daa4da281e5ebedf83e791b2e94af1ed6f45d990,2945ff084a048d849d3da75fc36008425ddcd361..526ec77c7ba032b9af1c6772484028051a4950ef
@@@ -194,7 -194,7 +194,7 @@@ config SIRF_DM
          Enable support for the CSR SiRFprimaII DMA engine.
  
  config TI_EDMA
 -      tristate "TI EDMA support"
 +      bool "TI EDMA support"
        depends on ARCH_DAVINCI || ARCH_OMAP
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
@@@ -287,14 -287,6 +287,14 @@@ config DMA_OMA
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
  
 +config TI_CPPI41
 +      tristate "AM33xx CPPI41 DMA support"
 +      depends on ARCH_OMAP
 +      select DMA_ENGINE
 +      help
 +        The Communications Port Programming Interface (CPPI) 4.1 DMA engine
 +        is currently used by the USB driver on AM335x platforms.
 +
  config MMP_PDMA
        bool "MMP PDMA support"
        depends on (ARCH_MMP || ARCH_PXA)
@@@ -308,6 -300,15 +308,15 @@@ config DMA_JZ474
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
  
+ config K3_DMA
+       tristate "Hisilicon K3 DMA support"
+       depends on ARCH_HI3xxx
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Support the DMA engine for Hisilicon K3 platform
+         devices.
  config DMA_ENGINE
        bool
  
diff --combined drivers/dma/Makefile
index 6d62ec30c4bc594fcd02bdb1ce79e56a28b9a41f,8c97941df2738f1d1f6973a747507d7dffbbc1db..db89035b362612304a3334ab5c9834770cab1303
@@@ -39,4 -39,4 +39,5 @@@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.
  obj-$(CONFIG_DMA_OMAP) += omap-dma.o
  obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
  obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 +obj-$(CONFIG_TI_CPPI41) += cppi41.o
+ obj-$(CONFIG_K3_DMA) += k3dma.o
diff --combined drivers/dma/amba-pl08x.c
index bff41d4848e53e35891bb0ef34ae5194d3a597e5,cd294340c8516812e24b4642129a9622cc89d2dd..fce46c5bf1c74e3d76accde7570ffa2d423eb9f1
@@@ -24,6 -24,7 +24,7 @@@
   *
   * Documentation: ARM DDI 0196G == PL080
   * Documentation: ARM DDI 0218E == PL081
+  * Documentation: S3C6410 User's Manual == PL080S
   *
   * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
   * channel.
   *
   * The PL080 has a dual bus master, PL081 has a single master.
   *
+  * PL080S is a version modified by Samsung and used in S3C64xx SoCs.
+  * It differs in following aspects:
+  * - CH_CONFIG register at different offset,
+  * - separate CH_CONTROL2 register for transfer size,
+  * - bigger maximum transfer size,
+  * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word,
+  * - no support for peripheral flow control.
+  *
   * Memory to peripheral transfer may be visualized as
   *    Get data from memory to DMAC
   *    Until no data left
   *  - Peripheral flow control: the transfer size is ignored (and should be
   *    zero).  The data is transferred from the current LLI entry, until
   *    after the final transfer signalled by LBREQ or LSREQ.  The DMAC
-  *    will then move to the next LLI entry.
-  *
-  * Global TODO:
-  * - Break out common code from arch/arm/mach-s3c64xx and share
+  *    will then move to the next LLI entry. Unsupported by PL080S.
   */
  #include <linux/amba/bus.h>
  #include <linux/amba/pl08x.h>
@@@ -100,24 -106,16 +106,16 @@@ struct pl08x_driver_data
   * @nomadik: whether the channels have Nomadik security extension bits
   *    that need to be checked for permission before use and some registers are
   *    missing
+  * @pl080s: whether this version is a PL080S, which has separate register and
+  *    LLI word for transfer size.
   */
  struct vendor_data {
+       u8 config_offset;
        u8 channels;
        bool dualmaster;
        bool nomadik;
- };
- /*
-  * PL08X private data structures
-  * An LLI struct - see PL08x TRM.  Note that next uses bit[0] as a bus bit,
-  * start & end do not - their bus bit info is in cctl.  Also note that these
-  * are fixed 32-bit quantities.
-  */
- struct pl08x_lli {
-       u32 src;
-       u32 dst;
-       u32 lli;
-       u32 cctl;
+       bool pl080s;
+       u32 max_transfer_size;
  };
  
  /**
@@@ -133,8 -131,6 +131,8 @@@ struct pl08x_bus_data 
        u8 buswidth;
  };
  
 +#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth)
 +
  /**
   * struct pl08x_phy_chan - holder for the physical channels
   * @id: physical index to this channel
  struct pl08x_phy_chan {
        unsigned int id;
        void __iomem *base;
+       void __iomem *reg_config;
        spinlock_t lock;
        struct pl08x_dma_chan *serving;
        bool locked;
@@@ -176,12 -173,13 +175,13 @@@ struct pl08x_sg 
   * @ccfg: config reg values for current txd
   * @done: this marks completed descriptors, which should not have their
   *   mux released.
+  * @cyclic: indicate cyclic transfers
   */
  struct pl08x_txd {
        struct virt_dma_desc vd;
        struct list_head dsg_list;
        dma_addr_t llis_bus;
-       struct pl08x_lli *llis_va;
+       u32 *llis_va;
        /* Default cctl value for LLIs */
        u32 cctl;
        /*
         */
        u32 ccfg;
        bool done;
+       bool cyclic;
  };
  
  /**
@@@ -265,17 -264,29 +266,29 @@@ struct pl08x_driver_data 
        struct dma_pool *pool;
        u8 lli_buses;
        u8 mem_buses;
+       u8 lli_words;
  };
  
  /*
   * PL08X specific defines
   */
  
- /* Size (bytes) of each LLI buffer allocated for one transfer */
- # define PL08X_LLI_TSFR_SIZE  0x2000
+ /* The order of words in an LLI. */
+ #define PL080_LLI_SRC         0
+ #define PL080_LLI_DST         1
+ #define PL080_LLI_LLI         2
+ #define PL080_LLI_CCTL                3
+ #define PL080S_LLI_CCTL2      4
+ /* Total words in an LLI. */
+ #define PL080_LLI_WORDS               4
+ #define PL080S_LLI_WORDS      8
  
- /* Maximum times we call dma_pool_alloc on this pool without freeing */
- #define MAX_NUM_TSFR_LLIS     (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
+ /*
+  * Number of LLIs in each LLI buffer allocated for one transfer
+  * (maximum times we call dma_pool_alloc on this pool without freeing)
+  */
+ #define MAX_NUM_TSFR_LLIS     512
  #define PL08X_ALIGN           8
  
  static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
@@@ -336,10 -347,39 +349,39 @@@ static int pl08x_phy_channel_busy(struc
  {
        unsigned int val;
  
-       val = readl(ch->base + PL080_CH_CONFIG);
+       val = readl(ch->reg_config);
        return val & PL080_CONFIG_ACTIVE;
  }
  
+ static void pl08x_write_lli(struct pl08x_driver_data *pl08x,
+               struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg)
+ {
+       if (pl08x->vd->pl080s)
+               dev_vdbg(&pl08x->adev->dev,
+                       "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+                       "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n",
+                       phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+                       lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL],
+                       lli[PL080S_LLI_CCTL2], ccfg);
+       else
+               dev_vdbg(&pl08x->adev->dev,
+                       "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
+                       "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
+                       phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST],
+                       lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg);
+       writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR);
+       writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR);
+       writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI);
+       writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL);
+       if (pl08x->vd->pl080s)
+               writel_relaxed(lli[PL080S_LLI_CCTL2],
+                               phychan->base + PL080S_CH_CONTROL2);
+       writel(ccfg, phychan->reg_config);
+ }
  /*
   * Set the initial DMA register values i.e. those for the first LLI
   * The next LLI pointer and the configuration interrupt bit have
@@@ -352,7 -392,6 +394,6 @@@ static void pl08x_start_next_txd(struc
        struct pl08x_phy_chan *phychan = plchan->phychan;
        struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc);
        struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
-       struct pl08x_lli *lli;
        u32 val;
  
        list_del(&txd->vd.node);
        while (pl08x_phy_channel_busy(phychan))
                cpu_relax();
  
-       lli = &txd->llis_va[0];
-       dev_vdbg(&pl08x->adev->dev,
-               "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
-               "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
-               phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
-               txd->ccfg);
-       writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
-       writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
-       writel(lli->lli, phychan->base + PL080_CH_LLI);
-       writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
-       writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
+       pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg);
  
        /* Enable the DMA channel */
        /* Do not access config register until channel shows as disabled */
                cpu_relax();
  
        /* Do not access config register until channel shows as inactive */
-       val = readl(phychan->base + PL080_CH_CONFIG);
+       val = readl(phychan->reg_config);
        while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
-               val = readl(phychan->base + PL080_CH_CONFIG);
+               val = readl(phychan->reg_config);
  
-       writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
+       writel(val | PL080_CONFIG_ENABLE, phychan->reg_config);
  }
  
  /*
@@@ -406,9 -433,9 +435,9 @@@ static void pl08x_pause_phy_chan(struc
        int timeout;
  
        /* Set the HALT bit and wait for the FIFO to drain */
-       val = readl(ch->base + PL080_CH_CONFIG);
+       val = readl(ch->reg_config);
        val |= PL080_CONFIG_HALT;
-       writel(val, ch->base + PL080_CH_CONFIG);
+       writel(val, ch->reg_config);
  
        /* Wait for channel inactive */
        for (timeout = 1000; timeout; timeout--) {
@@@ -425,9 -452,9 +454,9 @@@ static void pl08x_resume_phy_chan(struc
        u32 val;
  
        /* Clear the HALT bit */
-       val = readl(ch->base + PL080_CH_CONFIG);
+       val = readl(ch->reg_config);
        val &= ~PL080_CONFIG_HALT;
-       writel(val, ch->base + PL080_CH_CONFIG);
+       writel(val, ch->reg_config);
  }
  
  /*
  static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
        struct pl08x_phy_chan *ch)
  {
-       u32 val = readl(ch->base + PL080_CH_CONFIG);
+       u32 val = readl(ch->reg_config);
  
        val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
                 PL080_CONFIG_TC_IRQ_MASK);
  
-       writel(val, ch->base + PL080_CH_CONFIG);
+       writel(val, ch->reg_config);
  
        writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
        writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
@@@ -455,6 -482,28 +484,28 @@@ static inline u32 get_bytes_in_cctl(u3
        /* The source width defines the number of bytes */
        u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
  
+       cctl &= PL080_CONTROL_SWIDTH_MASK;
+       switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
+       case PL080_WIDTH_8BIT:
+               break;
+       case PL080_WIDTH_16BIT:
+               bytes *= 2;
+               break;
+       case PL080_WIDTH_32BIT:
+               bytes *= 4;
+               break;
+       }
+       return bytes;
+ }
+ static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1)
+ {
+       /* The source width defines the number of bytes */
+       u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK;
+       cctl &= PL080_CONTROL_SWIDTH_MASK;
        switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
        case PL080_WIDTH_8BIT:
                break;
  /* The channel should be paused when calling this */
  static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
  {
+       struct pl08x_driver_data *pl08x = plchan->host;
+       const u32 *llis_va, *llis_va_limit;
        struct pl08x_phy_chan *ch;
+       dma_addr_t llis_bus;
        struct pl08x_txd *txd;
-       size_t bytes = 0;
+       u32 llis_max_words;
+       size_t bytes;
+       u32 clli;
  
        ch = plchan->phychan;
        txd = plchan->at;
  
+       if (!ch || !txd)
+               return 0;
        /*
         * Follow the LLIs to get the number of remaining
         * bytes in the currently active transaction.
         */
-       if (ch && txd) {
-               u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
+       clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
  
-               /* First get the remaining bytes in the active transfer */
+       /* First get the remaining bytes in the active transfer */
+       if (pl08x->vd->pl080s)
+               bytes = get_bytes_in_cctl_pl080s(
+                               readl(ch->base + PL080_CH_CONTROL),
+                               readl(ch->base + PL080S_CH_CONTROL2));
+       else
                bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
  
-               if (clli) {
-                       struct pl08x_lli *llis_va = txd->llis_va;
-                       dma_addr_t llis_bus = txd->llis_bus;
-                       int index;
+       if (!clli)
+               return bytes;
  
-                       BUG_ON(clli < llis_bus || clli >= llis_bus +
-                               sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
+       llis_va = txd->llis_va;
+       llis_bus = txd->llis_bus;
  
-                       /*
-                        * Locate the next LLI - as this is an array,
-                        * it's simple maths to find.
-                        */
-                       index = (clli - llis_bus) / sizeof(struct pl08x_lli);
+       llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS;
+       BUG_ON(clli < llis_bus || clli >= llis_bus +
+                                               sizeof(u32) * llis_max_words);
  
-                       for (; index < MAX_NUM_TSFR_LLIS; index++) {
-                               bytes += get_bytes_in_cctl(llis_va[index].cctl);
+       /*
+        * Locate the next LLI - as this is an array,
+        * it's simple maths to find.
+        */
+       llis_va += (clli - llis_bus) / sizeof(u32);
  
-                               /*
-                                * A LLI pointer of 0 terminates the LLI list
-                                */
-                               if (!llis_va[index].lli)
-                                       break;
-                       }
-               }
+       llis_va_limit = llis_va + llis_max_words;
+       for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) {
+               if (pl08x->vd->pl080s)
+                       bytes += get_bytes_in_cctl_pl080s(
+                                               llis_va[PL080_LLI_CCTL],
+                                               llis_va[PL080S_LLI_CCTL2]);
+               else
+                       bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]);
+               /*
+                * A LLI pointer going backward terminates the LLI list
+                */
+               if (llis_va[PL080_LLI_LLI] <= clli)
+                       break;
        }
  
        return bytes;
@@@ -722,6 -790,7 +792,7 @@@ static inline u32 pl08x_cctl_bits(u32 c
                break;
        }
  
+       tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK;
        retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
        return retbits;
  }
@@@ -766,20 -835,26 +837,26 @@@ static void pl08x_choose_master_bus(str
  /*
   * Fills in one LLI for a certain transfer descriptor and advance the counter
   */
- static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
-       int num_llis, int len, u32 cctl)
+ static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x,
+                                   struct pl08x_lli_build_data *bd,
+                                   int num_llis, int len, u32 cctl, u32 cctl2)
  {
-       struct pl08x_lli *llis_va = bd->txd->llis_va;
+       u32 offset = num_llis * pl08x->lli_words;
+       u32 *llis_va = bd->txd->llis_va + offset;
        dma_addr_t llis_bus = bd->txd->llis_bus;
  
        BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
  
-       llis_va[num_llis].cctl = cctl;
-       llis_va[num_llis].src = bd->srcbus.addr;
-       llis_va[num_llis].dst = bd->dstbus.addr;
-       llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
-               sizeof(struct pl08x_lli);
-       llis_va[num_llis].lli |= bd->lli_bus;
+       /* Advance the offset to next LLI. */
+       offset += pl08x->lli_words;
+       llis_va[PL080_LLI_SRC] = bd->srcbus.addr;
+       llis_va[PL080_LLI_DST] = bd->dstbus.addr;
+       llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset);
+       llis_va[PL080_LLI_LLI] |= bd->lli_bus;
+       llis_va[PL080_LLI_CCTL] = cctl;
+       if (pl08x->vd->pl080s)
+               llis_va[PL080S_LLI_CCTL2] = cctl2;
  
        if (cctl & PL080_CONTROL_SRC_INCR)
                bd->srcbus.addr += len;
        bd->remainder -= len;
  }
  
- static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
-               u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
+ static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x,
+                       struct pl08x_lli_build_data *bd, u32 *cctl, u32 len,
+                       int num_llis, size_t *total_bytes)
  {
        *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
-       pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+       pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len);
        (*total_bytes) += len;
  }
  
+ #ifdef VERBOSE_DEBUG
+ static void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+                          const u32 *llis_va, int num_llis)
+ {
+       int i;
+       if (pl08x->vd->pl080s) {
+               dev_vdbg(&pl08x->adev->dev,
+                       "%-3s %-9s  %-10s %-10s %-10s %-10s %s\n",
+                       "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2");
+               for (i = 0; i < num_llis; i++) {
+                       dev_vdbg(&pl08x->adev->dev,
+                               "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                               i, llis_va, llis_va[PL080_LLI_SRC],
+                               llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+                               llis_va[PL080_LLI_CCTL],
+                               llis_va[PL080S_LLI_CCTL2]);
+                       llis_va += pl08x->lli_words;
+               }
+       } else {
+               dev_vdbg(&pl08x->adev->dev,
+                       "%-3s %-9s  %-10s %-10s %-10s %s\n",
+                       "lli", "", "csrc", "cdst", "clli", "cctl");
+               for (i = 0; i < num_llis; i++) {
+                       dev_vdbg(&pl08x->adev->dev,
+                               "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+                               i, llis_va, llis_va[PL080_LLI_SRC],
+                               llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI],
+                               llis_va[PL080_LLI_CCTL]);
+                       llis_va += pl08x->lli_words;
+               }
+       }
+ }
+ #else
+ static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x,
+                                 const u32 *llis_va, int num_llis) {}
+ #endif
  /*
   * This fills in the table of LLIs for the transfer descriptor
   * Note that we assume we never have to change the burst sizes
@@@ -812,7 -926,7 +928,7 @@@ static int pl08x_fill_llis_for_desc(str
        int num_llis = 0;
        u32 cctl, early_bytes = 0;
        size_t max_bytes_per_lli, total_bytes;
-       struct pl08x_lli *llis_va;
+       u32 *llis_va, *last_lli;
        struct pl08x_sg *dsg;
  
        txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
  
                pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
  
 -              dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
 -                      bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
 +              dev_vdbg(&pl08x->adev->dev,
 +                      "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n",
 +                      (u64)bd.srcbus.addr,
 +                      cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
                        bd.srcbus.buswidth,
 -                      bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
 +                      (u64)bd.dstbus.addr,
 +                      cctl & PL080_CONTROL_DST_INCR ? "+" : "",
                        bd.dstbus.buswidth,
                        bd.remainder);
                dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
                                return 0;
                        }
  
 -                      if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
 -                                      (bd.dstbus.addr % bd.dstbus.buswidth)) {
 +                      if (!IS_BUS_ALIGNED(&bd.srcbus) ||
 +                              !IS_BUS_ALIGNED(&bd.dstbus)) {
                                dev_err(&pl08x->adev->dev,
                                        "%s src & dst address must be aligned to src"
                                        " & dst width if peripheral is flow controller",
  
                        cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
                                        bd.dstbus.buswidth, 0);
-                       pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+                       pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+                                       0, cctl, 0);
                        break;
                }
  
                 */
                if (bd.remainder < mbus->buswidth)
                        early_bytes = bd.remainder;
 -              else if ((mbus->addr) % (mbus->buswidth)) {
 -                      early_bytes = mbus->buswidth - (mbus->addr) %
 -                              (mbus->buswidth);
 +              else if (!IS_BUS_ALIGNED(mbus)) {
 +                      early_bytes = mbus->buswidth -
 +                              (mbus->addr & (mbus->buswidth - 1));
                        if ((bd.remainder - early_bytes) < mbus->buswidth)
                                early_bytes = bd.remainder;
                }
                        dev_vdbg(&pl08x->adev->dev,
                                "%s byte width LLIs (remain 0x%08x)\n",
                                __func__, bd.remainder);
-                       prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
-                               &total_bytes);
+                       prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
+                               num_llis++, &total_bytes);
                }
  
                if (bd.remainder) {
                         * Master now aligned
                         * - if slave is not then we must set its width down
                         */
 -                      if (sbus->addr % sbus->buswidth) {
 +                      if (!IS_BUS_ALIGNED(sbus)) {
                                dev_dbg(&pl08x->adev->dev,
                                        "%s set down bus width to one byte\n",
                                        __func__);
                         * MIN(buswidths)
                         */
                        max_bytes_per_lli = bd.srcbus.buswidth *
-                               PL080_CONTROL_TRANSFER_SIZE_MASK;
+                                               pl08x->vd->max_transfer_size;
                        dev_vdbg(&pl08x->adev->dev,
                                "%s max bytes per lli = %zu\n",
                                __func__, max_bytes_per_lli);
  
                                cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
                                        bd.dstbus.buswidth, tsize);
-                               pl08x_fill_lli_for_desc(&bd, num_llis++,
-                                               lli_len, cctl);
+                               pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++,
+                                               lli_len, cctl, tsize);
                                total_bytes += lli_len;
                        }
  
                                dev_vdbg(&pl08x->adev->dev,
                                        "%s align with boundary, send odd bytes (remain %zu)\n",
                                        __func__, bd.remainder);
-                               prep_byte_width_lli(&bd, &cctl, bd.remainder,
-                                               num_llis++, &total_bytes);
+                               prep_byte_width_lli(pl08x, &bd, &cctl,
+                                       bd.remainder, num_llis++, &total_bytes);
                        }
                }
  
                if (num_llis >= MAX_NUM_TSFR_LLIS) {
                        dev_err(&pl08x->adev->dev,
                                "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
-                               __func__, (u32) MAX_NUM_TSFR_LLIS);
+                               __func__, MAX_NUM_TSFR_LLIS);
                        return 0;
                }
        }
  
        llis_va = txd->llis_va;
-       /* The final LLI terminates the LLI. */
-       llis_va[num_llis - 1].lli = 0;
-       /* The final LLI element shall also fire an interrupt. */
-       llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
- #ifdef VERBOSE_DEBUG
-       {
-               int i;
+       last_lli = llis_va + (num_llis - 1) * pl08x->lli_words;
  
-               dev_vdbg(&pl08x->adev->dev,
-                        "%-3s %-9s  %-10s %-10s %-10s %s\n",
-                        "lli", "", "csrc", "cdst", "clli", "cctl");
-               for (i = 0; i < num_llis; i++) {
-                       dev_vdbg(&pl08x->adev->dev,
-                                "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
-                                i, &llis_va[i], llis_va[i].src,
-                                llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
-                               );
-               }
+       if (txd->cyclic) {
+               /* Link back to the first LLI. */
+               last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus;
+       } else {
+               /* The final LLI terminates the LLI. */
+               last_lli[PL080_LLI_LLI] = 0;
+               /* The final LLI element shall also fire an interrupt. */
+               last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN;
        }
- #endif
+       pl08x_dump_lli(pl08x, llis_va, num_llis);
  
        return num_llis;
  }
@@@ -1310,6 -1414,7 +1419,7 @@@ static int dma_set_runtime_config(struc
                                  struct dma_slave_config *config)
  {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
  
        if (!plchan->slave)
                return -EINVAL;
            config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
                return -EINVAL;
  
+       if (config->device_fc && pl08x->vd->pl080s) {
+               dev_err(&pl08x->adev->dev,
+                       "%s: PL080S does not support peripheral flow control\n",
+                       __func__);
+               return -EINVAL;
+       }
        plchan->cfg = *config;
  
        return 0;
@@@ -1409,25 -1521,19 +1526,19 @@@ static struct dma_async_tx_descriptor *
        return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
  }
  
- static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
-               struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_transfer_direction direction,
-               unsigned long flags, void *context)
+ static struct pl08x_txd *pl08x_init_txd(
+               struct dma_chan *chan,
+               enum dma_transfer_direction direction,
+               dma_addr_t *slave_addr)
  {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
        struct pl08x_driver_data *pl08x = plchan->host;
        struct pl08x_txd *txd;
-       struct pl08x_sg *dsg;
-       struct scatterlist *sg;
        enum dma_slave_buswidth addr_width;
-       dma_addr_t slave_addr;
        int ret, tmp;
        u8 src_buses, dst_buses;
        u32 maxburst, cctl;
  
-       dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
-                       __func__, sg_dma_len(sgl), plchan->name);
        txd = pl08x_get_txd(plchan);
        if (!txd) {
                dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
         */
        if (direction == DMA_MEM_TO_DEV) {
                cctl = PL080_CONTROL_SRC_INCR;
-               slave_addr = plchan->cfg.dst_addr;
+               *slave_addr = plchan->cfg.dst_addr;
                addr_width = plchan->cfg.dst_addr_width;
                maxburst = plchan->cfg.dst_maxburst;
                src_buses = pl08x->mem_buses;
                dst_buses = plchan->cd->periph_buses;
        } else if (direction == DMA_DEV_TO_MEM) {
                cctl = PL080_CONTROL_DST_INCR;
-               slave_addr = plchan->cfg.src_addr;
+               *slave_addr = plchan->cfg.src_addr;
                addr_width = plchan->cfg.src_addr_width;
                maxburst = plchan->cfg.src_maxburst;
                src_buses = plchan->cd->periph_buses;
        else
                txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
  
+       return txd;
+ }
+ static int pl08x_tx_add_sg(struct pl08x_txd *txd,
+                          enum dma_transfer_direction direction,
+                          dma_addr_t slave_addr,
+                          dma_addr_t buf_addr,
+                          unsigned int len)
+ {
+       struct pl08x_sg *dsg;
+       dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+       if (!dsg)
+               return -ENOMEM;
+       list_add_tail(&dsg->node, &txd->dsg_list);
+       dsg->len = len;
+       if (direction == DMA_MEM_TO_DEV) {
+               dsg->src_addr = buf_addr;
+               dsg->dst_addr = slave_addr;
+       } else {
+               dsg->src_addr = slave_addr;
+               dsg->dst_addr = buf_addr;
+       }
+       return 0;
+ }
+ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_transfer_direction direction,
+               unsigned long flags, void *context)
+ {
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_txd *txd;
+       struct scatterlist *sg;
+       int ret, tmp;
+       dma_addr_t slave_addr;
+       dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
+                       __func__, sg_dma_len(sgl), plchan->name);
+       txd = pl08x_init_txd(chan, direction, &slave_addr);
+       if (!txd)
+               return NULL;
        for_each_sg(sgl, sg, sg_len, tmp) {
-               dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
-               if (!dsg) {
+               ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+                                     sg_dma_address(sg),
+                                     sg_dma_len(sg));
+               if (ret) {
                        pl08x_release_mux(plchan);
                        pl08x_free_txd(pl08x, txd);
                        dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
                                        __func__);
                        return NULL;
                }
-               list_add_tail(&dsg->node, &txd->dsg_list);
+       }
  
-               dsg->len = sg_dma_len(sg);
-               if (direction == DMA_MEM_TO_DEV) {
-                       dsg->src_addr = sg_dma_address(sg);
-                       dsg->dst_addr = slave_addr;
-               } else {
-                       dsg->src_addr = slave_addr;
-                       dsg->dst_addr = sg_dma_address(sg);
+       ret = pl08x_fill_llis_for_desc(plchan->host, txd);
+       if (!ret) {
+               pl08x_release_mux(plchan);
+               pl08x_free_txd(pl08x, txd);
+               return NULL;
+       }
+       return vchan_tx_prep(&plchan->vc, &txd->vd, flags);
+ }
+ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
+               struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+               size_t period_len, enum dma_transfer_direction direction,
+               unsigned long flags, void *context)
+ {
+       struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+       struct pl08x_driver_data *pl08x = plchan->host;
+       struct pl08x_txd *txd;
+       int ret, tmp;
+       dma_addr_t slave_addr;
+       dev_dbg(&pl08x->adev->dev,
+               "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+               __func__, period_len, buf_len,
+               direction == DMA_MEM_TO_DEV ? "to" : "from",
+               plchan->name);
+       txd = pl08x_init_txd(chan, direction, &slave_addr);
+       if (!txd)
+               return NULL;
+       txd->cyclic = true;
+       txd->cctl |= PL080_CONTROL_TC_IRQ_EN;
+       for (tmp = 0; tmp < buf_len; tmp += period_len) {
+               ret = pl08x_tx_add_sg(txd, direction, slave_addr,
+                                     buf_addr + tmp, period_len);
+               if (ret) {
+                       pl08x_release_mux(plchan);
+                       pl08x_free_txd(pl08x, txd);
+                       return NULL;
                }
        }
  
@@@ -1657,7 -1846,9 +1851,9 @@@ static irqreturn_t pl08x_irq(int irq, v
  
                        spin_lock(&plchan->vc.lock);
                        tx = plchan->at;
-                       if (tx) {
+                       if (tx && tx->cyclic) {
+                               vchan_cyclic_callback(&tx->vd);
+                       } else if (tx) {
                                plchan->at = NULL;
                                /*
                                 * This descriptor is done, release its mux
@@@ -1851,6 -2042,7 +2047,7 @@@ static int pl08x_probe(struct amba_devi
  {
        struct pl08x_driver_data *pl08x;
        const struct vendor_data *vd = id->data;
+       u32 tsfr_size;
        int ret = 0;
        int i;
  
  
        /* Initialize slave engine */
        dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
+       dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask);
        pl08x->slave.dev = &adev->dev;
        pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
        pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
        pl08x->slave.device_tx_status = pl08x_dma_tx_status;
        pl08x->slave.device_issue_pending = pl08x_issue_pending;
        pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
+       pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic;
        pl08x->slave.device_control = pl08x_control;
  
        /* Get the platform data */
                pl08x->mem_buses = pl08x->pd->mem_buses;
        }
  
+       if (vd->pl080s)
+               pl08x->lli_words = PL080S_LLI_WORDS;
+       else
+               pl08x->lli_words = PL080_LLI_WORDS;
+       tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32);
        /* A DMA memory pool for LLIs, align on 1-byte boundary */
        pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
-                       PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
+                                               tsfr_size, PL08X_ALIGN, 0);
        if (!pl08x->pool) {
                ret = -ENOMEM;
                goto out_no_lli_pool;
  
                ch->id = i;
                ch->base = pl08x->base + PL080_Cx_BASE(i);
+               ch->reg_config = ch->base + vd->config_offset;
                spin_lock_init(&ch->lock);
  
                /*
                if (vd->nomadik) {
                        u32 val;
  
-                       val = readl(ch->base + PL080_CH_CONFIG);
+                       val = readl(ch->reg_config);
                        if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
                                dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
                                ch->locked = true;
  
        amba_set_drvdata(adev, pl08x);
        init_pl08x_debugfs(pl08x);
-       dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
-                amba_part(adev), amba_rev(adev),
+       dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n",
+                amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev),
                 (unsigned long long)adev->res.start, adev->irq[0]);
  
        return 0;
@@@ -2043,22 -2244,41 +2249,41 @@@ out_no_pl08x
  
  /* PL080 has 8 channels and the PL080 have just 2 */
  static struct vendor_data vendor_pl080 = {
+       .config_offset = PL080_CH_CONFIG,
        .channels = 8,
        .dualmaster = true,
+       .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
  };
  
  static struct vendor_data vendor_nomadik = {
+       .config_offset = PL080_CH_CONFIG,
        .channels = 8,
        .dualmaster = true,
        .nomadik = true,
+       .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
+ };
+ static struct vendor_data vendor_pl080s = {
+       .config_offset = PL080S_CH_CONFIG,
+       .channels = 8,
+       .pl080s = true,
+       .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK,
  };
  
  static struct vendor_data vendor_pl081 = {
+       .config_offset = PL080_CH_CONFIG,
        .channels = 2,
        .dualmaster = false,
+       .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK,
  };
  
  static struct amba_id pl08x_ids[] = {
+       /* Samsung PL080S variant */
+       {
+               .id     = 0x0a141080,
+               .mask   = 0xffffffff,
+               .data   = &vendor_pl080s,
+       },
        /* PL080 */
        {
                .id     = 0x00041080,
diff --combined drivers/dma/dmaengine.c
index eee16b01fa8951a7634408a6e65eaceacf5e9705,d7d94d21a038576581834cbf768e4ee3e82aea74..9162ac80c18f303ac9a509eb97298eba33d4753b
@@@ -87,8 -87,7 +87,8 @@@ static struct dma_chan *dev_to_dma_chan
        return chan_dev->chan;
  }
  
 -static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 +static ssize_t memcpy_count_show(struct device *dev,
 +                               struct device_attribute *attr, char *buf)
  {
        struct dma_chan *chan;
        unsigned long count = 0;
  
        return err;
  }
 +static DEVICE_ATTR_RO(memcpy_count);
  
 -static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 -                                    char *buf)
 +static ssize_t bytes_transferred_show(struct device *dev,
 +                                    struct device_attribute *attr, char *buf)
  {
        struct dma_chan *chan;
        unsigned long count = 0;
  
        return err;
  }
 +static DEVICE_ATTR_RO(bytes_transferred);
  
 -static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 +static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 +                         char *buf)
  {
        struct dma_chan *chan;
        int err;
  
        return err;
  }
 +static DEVICE_ATTR_RO(in_use);
  
 -static struct device_attribute dma_attrs[] = {
 -      __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
 -      __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 -      __ATTR(in_use, S_IRUGO, show_in_use, NULL),
 -      __ATTR_NULL
 +static struct attribute *dma_dev_attrs[] = {
 +      &dev_attr_memcpy_count.attr,
 +      &dev_attr_bytes_transferred.attr,
 +      &dev_attr_in_use.attr,
 +      NULL,
  };
 +ATTRIBUTE_GROUPS(dma_dev);
  
  static void chan_dev_release(struct device *dev)
  {
  
  static struct class dma_devclass = {
        .name           = "dma",
 -      .dev_attrs      = dma_attrs,
 +      .dev_groups     = dma_dev_groups,
        .dev_release    = chan_dev_release,
  };
  
@@@ -382,30 -376,20 +382,30 @@@ void dma_issue_pending_all(void
  EXPORT_SYMBOL(dma_issue_pending_all);
  
  /**
 - * nth_chan - returns the nth channel of the given capability
 + * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 + */
 +static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 +{
 +      int node = dev_to_node(chan->device->dev);
 +      return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
 +}
 +
 +/**
 + * min_chan - returns the channel with min count and in the same numa-node as the cpu
   * @cap: capability to match
 - * @n: nth channel desired
 + * @cpu: cpu index which the channel should be close to
   *
 - * Defaults to returning the channel with the desired capability and the
 - * lowest reference count when 'n' cannot be satisfied.  Must be called
 - * under dma_list_mutex.
 + * If some channels are close to the given cpu, the one with the lowest
 + * reference count is returned. Otherwise, cpu is ignored and only the
 + * reference count is taken into account.
 + * Must be called under dma_list_mutex.
   */
 -static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
  {
        struct dma_device *device;
        struct dma_chan *chan;
 -      struct dma_chan *ret = NULL;
        struct dma_chan *min = NULL;
 +      struct dma_chan *localmin = NULL;
  
        list_for_each_entry(device, &dma_device_list, global_node) {
                if (!dma_has_cap(cap, device->cap_mask) ||
                list_for_each_entry(chan, &device->channels, device_node) {
                        if (!chan->client_count)
                                continue;
 -                      if (!min)
 -                              min = chan;
 -                      else if (chan->table_count < min->table_count)
 +                      if (!min || chan->table_count < min->table_count)
                                min = chan;
  
 -                      if (n-- == 0) {
 -                              ret = chan;
 -                              break; /* done */
 -                      }
 +                      if (dma_chan_is_local(chan, cpu))
 +                              if (!localmin ||
 +                                  chan->table_count < localmin->table_count)
 +                                      localmin = chan;
                }
 -              if (ret)
 -                      break; /* done */
        }
  
 -      if (!ret)
 -              ret = min;
 +      chan = localmin ? localmin : min;
  
 -      if (ret)
 -              ret->table_count++;
 +      if (chan)
 +              chan->table_count++;
  
 -      return ret;
 +      return chan;
  }
  
  /**
@@@ -446,6 -435,7 +446,6 @@@ static void dma_channel_rebalance(void
        struct dma_device *device;
        int cpu;
        int cap;
 -      int n;
  
        /* undo the last distribution */
        for_each_dma_cap_mask(cap, dma_cap_mask_all)
                return;
  
        /* redistribute available channels */
 -      n = 0;
        for_each_dma_cap_mask(cap, dma_cap_mask_all)
                for_each_online_cpu(cpu) {
 -                      if (num_possible_cpus() > 1)
 -                              chan = nth_chan(cap, n++);
 -                      else
 -                              chan = nth_chan(cap, -1);
 -
 +                      chan = min_chan(cap, cpu);
                        per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
                }
  }
@@@ -509,7 -504,33 +509,33 @@@ static struct dma_chan *private_candida
  }
  
  /**
-  * dma_request_channel - try to allocate an exclusive channel
+  * dma_request_slave_channel - try to get specific channel exclusively
+  * @chan: target channel
+  */
+ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+ {
+       int err = -EBUSY;
+       /* lock against __dma_request_channel */
+       mutex_lock(&dma_list_mutex);
+       if (chan->client_count == 0) {
+               err = dma_chan_get(chan);
+               if (err)
+                       pr_debug("%s: failed to get %s: (%d)\n",
+                               __func__, dma_chan_name(chan), err);
+       } else
+               chan = NULL;
+       mutex_unlock(&dma_list_mutex);
+       return chan;
+ }
+ EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+ /**
+  * __dma_request_channel - try to allocate an exclusive channel
   * @mask: capabilities that the channel must satisfy
   * @fn: optional callback to disposition available channels
   * @fn_param: opaque parameter to pass to dma_filter_fn
diff --combined drivers/dma/mv_xor.c
index 0ec086d2b6a00819d2407108893d68e01a9063c2,d9a26777a1b00ae8e36a898d27db0133bf41f3d5..536dcb8ba5fdfe69ed5f726fc6b5897f00266698
@@@ -64,7 -64,7 +64,7 @@@ static u32 mv_desc_get_src_addr(struct 
                                int src_idx)
  {
        struct mv_xor_desc *hw_desc = desc->hw_desc;
 -      return hw_desc->phy_src_addr[src_idx];
 +      return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
  }
  
  
@@@ -107,32 -107,32 +107,32 @@@ static void mv_desc_set_src_addr(struc
                                 int index, dma_addr_t addr)
  {
        struct mv_xor_desc *hw_desc = desc->hw_desc;
 -      hw_desc->phy_src_addr[index] = addr;
 +      hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
        if (desc->type == DMA_XOR)
                hw_desc->desc_command |= (1 << index);
  }
  
  static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
  {
 -      return __raw_readl(XOR_CURR_DESC(chan));
 +      return readl_relaxed(XOR_CURR_DESC(chan));
  }
  
  static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
                                        u32 next_desc_addr)
  {
 -      __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
 +      writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
  }
  
  static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
  {
 -      u32 val = __raw_readl(XOR_INTR_MASK(chan));
 +      u32 val = readl_relaxed(XOR_INTR_MASK(chan));
        val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
 -      __raw_writel(val, XOR_INTR_MASK(chan));
 +      writel_relaxed(val, XOR_INTR_MASK(chan));
  }
  
  static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
  {
 -      u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
 +      u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
        intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
        return intr_cause;
  }
@@@ -149,13 -149,13 +149,13 @@@ static void mv_xor_device_clear_eoc_cau
  {
        u32 val = ~(1 << (chan->idx * 16));
        dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
 -      __raw_writel(val, XOR_INTR_CAUSE(chan));
 +      writel_relaxed(val, XOR_INTR_CAUSE(chan));
  }
  
  static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
  {
        u32 val = 0xFFFF0000 >> (chan->idx * 16);
 -      __raw_writel(val, XOR_INTR_CAUSE(chan));
 +      writel_relaxed(val, XOR_INTR_CAUSE(chan));
  }
  
  static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@@ -173,7 -173,7 +173,7 @@@ static void mv_set_mode(struct mv_xor_c
                               enum dma_transaction_type type)
  {
        u32 op_mode;
 -      u32 config = __raw_readl(XOR_CONFIG(chan));
 +      u32 config = readl_relaxed(XOR_CONFIG(chan));
  
        switch (type) {
        case DMA_XOR:
  
        config &= ~0x7;
        config |= op_mode;
 -      __raw_writel(config, XOR_CONFIG(chan));
 +
 +#if defined(__BIG_ENDIAN)
 +      config |= XOR_DESCRIPTOR_SWAP;
 +#else
 +      config &= ~XOR_DESCRIPTOR_SWAP;
 +#endif
 +
 +      writel_relaxed(config, XOR_CONFIG(chan));
        chan->current_type = type;
  }
  
@@@ -208,14 -201,14 +208,14 @@@ static void mv_chan_activate(struct mv_
        u32 activation;
  
        dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
 -      activation = __raw_readl(XOR_ACTIVATION(chan));
 +      activation = readl_relaxed(XOR_ACTIVATION(chan));
        activation |= 0x1;
 -      __raw_writel(activation, XOR_ACTIVATION(chan));
 +      writel_relaxed(activation, XOR_ACTIVATION(chan));
  }
  
  static char mv_chan_is_busy(struct mv_xor_chan *chan)
  {
 -      u32 state = __raw_readl(XOR_ACTIVATION(chan));
 +      u32 state = readl_relaxed(XOR_ACTIVATION(chan));
  
        state = (state >> 4) & 0x3;
  
@@@ -654,7 -647,7 +654,7 @@@ mv_xor_prep_dma_memcpy(struct dma_chan 
  
        dev_dbg(mv_chan_to_devp(mv_chan),
                "%s sw_desc %p async_tx %p\n",
-               __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+               __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
  
        return sw_desc ? &sw_desc->async_tx : NULL;
  }
@@@ -762,22 -755,22 +762,22 @@@ static void mv_dump_xor_regs(struct mv_
  {
        u32 val;
  
 -      val = __raw_readl(XOR_CONFIG(chan));
 +      val = readl_relaxed(XOR_CONFIG(chan));
        dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
  
 -      val = __raw_readl(XOR_ACTIVATION(chan));
 +      val = readl_relaxed(XOR_ACTIVATION(chan));
        dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
  
 -      val = __raw_readl(XOR_INTR_CAUSE(chan));
 +      val = readl_relaxed(XOR_INTR_CAUSE(chan));
        dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
  
 -      val = __raw_readl(XOR_INTR_MASK(chan));
 +      val = readl_relaxed(XOR_INTR_MASK(chan));
        dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
  
 -      val = __raw_readl(XOR_ERROR_CAUSE(chan));
 +      val = readl_relaxed(XOR_ERROR_CAUSE(chan));
        dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
  
 -      val = __raw_readl(XOR_ERROR_ADDR(chan));
 +      val = readl_relaxed(XOR_ERROR_ADDR(chan));
        dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
  }
  
@@@ -1036,8 -1029,10 +1036,8 @@@ mv_xor_channel_add(struct mv_xor_devic
        struct dma_device *dma_dev;
  
        mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
 -      if (!mv_chan) {
 -              ret = -ENOMEM;
 -              goto err_free_dma;
 -      }
 +      if (!mv_chan)
 +              return ERR_PTR(-ENOMEM);
  
        mv_chan->idx = idx;
        mv_chan->irq = irq;
@@@ -1171,7 -1166,7 +1171,7 @@@ static int mv_xor_probe(struct platform
  {
        const struct mbus_dram_target_info *dram;
        struct mv_xor_device *xordev;
-       struct mv_xor_platform_data *pdata = pdev->dev.platform_data;
+       struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct resource *res;
        int i, ret;
  
diff --combined drivers/dma/sh/shdmac.c
index 0000000000000000000000000000000000000000,3d0472b78f5fa000c7184532c96295c742a82f2b..1069e8869f20762928ecbbe509b2ed294f82ae35
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,954 +1,954 @@@
 -      return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
 -              sh_chan->xmit_shift;
+ /*
+  * Renesas SuperH DMA Engine support
+  *
+  * base is drivers/dma/flsdma.c
+  *
+  * Copyright (C) 2011-2012 Guennadi Liakhovetski <[email protected]>
+  * Copyright (C) 2009 Nobuhiro Iwamatsu <[email protected]>
+  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+  *
+  * This is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  *
+  * - DMA of SuperH does not have Hardware DMA chain mode.
+  * - MAX DMA size is 16MB.
+  *
+  */
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
+ #include <linux/dmaengine.h>
+ #include <linux/delay.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/sh_dma.h>
+ #include <linux/notifier.h>
+ #include <linux/kdebug.h>
+ #include <linux/spinlock.h>
+ #include <linux/rculist.h>
+ #include "../dmaengine.h"
+ #include "shdma.h"
+ /* DMA register */
+ #define SAR   0x00
+ #define DAR   0x04
+ #define TCR   0x08
+ #define CHCR  0x0C
+ #define DMAOR 0x40
+ #define TEND  0x18 /* USB-DMAC */
+ #define SH_DMAE_DRV_NAME "sh-dma-engine"
+ /* Default MEMCPY transfer size = 2^2 = 4 bytes */
+ #define LOG2_DEFAULT_XFER_SIZE        2
+ #define SH_DMA_SLAVE_NUMBER 256
+ #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
+ /*
+  * Used for write-side mutual exclusion for the global device list,
+  * read-side synchronization by way of RCU, and per-controller data.
+  */
+ static DEFINE_SPINLOCK(sh_dmae_lock);
+ static LIST_HEAD(sh_dmae_devices);
+ /*
+  * Different DMAC implementations provide different ways to clear DMA channels:
+  * (1) none - no CHCLR registers are available
+  * (2) one CHCLR register per channel - 0 has to be written to it to clear
+  *     channel buffers
+  * (3) one CHCLR per several channels - 1 has to be written to the bit,
+  *     corresponding to the specific channel to reset it
+  */
+ static void channel_clear(struct sh_dmae_chan *sh_dc)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+       const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
+               sh_dc->shdma_chan.id;
+       u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
+       __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
+ }
+ static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
+ {
+       __raw_writel(data, sh_dc->base + reg);
+ }
+ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
+ {
+       return __raw_readl(sh_dc->base + reg);
+ }
+ static u16 dmaor_read(struct sh_dmae_device *shdev)
+ {
+       void __iomem *addr = shdev->chan_reg + DMAOR;
+       if (shdev->pdata->dmaor_is_32bit)
+               return __raw_readl(addr);
+       else
+               return __raw_readw(addr);
+ }
+ static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
+ {
+       void __iomem *addr = shdev->chan_reg + DMAOR;
+       if (shdev->pdata->dmaor_is_32bit)
+               __raw_writel(data, addr);
+       else
+               __raw_writew(data, addr);
+ }
+ static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+       __raw_writel(data, sh_dc->base + shdev->chcr_offset);
+ }
+ static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+       return __raw_readl(sh_dc->base + shdev->chcr_offset);
+ }
+ /*
+  * Reset DMA controller
+  *
+  * SH7780 has two DMAOR register
+  */
+ static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
+ {
+       unsigned short dmaor;
+       unsigned long flags;
+       spin_lock_irqsave(&sh_dmae_lock, flags);
+       dmaor = dmaor_read(shdev);
+       dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
+       spin_unlock_irqrestore(&sh_dmae_lock, flags);
+ }
+ static int sh_dmae_rst(struct sh_dmae_device *shdev)
+ {
+       unsigned short dmaor;
+       unsigned long flags;
+       spin_lock_irqsave(&sh_dmae_lock, flags);
+       dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+       if (shdev->pdata->chclr_present) {
+               int i;
+               for (i = 0; i < shdev->pdata->channel_num; i++) {
+                       struct sh_dmae_chan *sh_chan = shdev->chan[i];
+                       if (sh_chan)
+                               channel_clear(sh_chan);
+               }
+       }
+       dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
+       dmaor = dmaor_read(shdev);
+       spin_unlock_irqrestore(&sh_dmae_lock, flags);
+       if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
+               dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
+               return -EIO;
+       }
+       if (shdev->pdata->dmaor_init & ~dmaor)
+               dev_warn(shdev->shdma_dev.dma_dev.dev,
+                        "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+                        dmaor, shdev->pdata->dmaor_init);
+       return 0;
+ }
+ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
+ {
+       u32 chcr = chcr_read(sh_chan);
+       if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+               return true; /* working */
+       return false; /* waiting */
+ }
+ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
+               ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
+       if (cnt >= pdata->ts_shift_num)
+               cnt = 0;
+       return pdata->ts_shift[cnt];
+ }
+ static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       int i;
+       for (i = 0; i < pdata->ts_shift_num; i++)
+               if (pdata->ts_shift[i] == l2size)
+                       break;
+       if (i == pdata->ts_shift_num)
+               i = 0;
+       return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
+               ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
+ }
+ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
+ {
+       sh_dmae_writel(sh_chan, hw->sar, SAR);
+       sh_dmae_writel(sh_chan, hw->dar, DAR);
+       sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
+ }
+ static void dmae_start(struct sh_dmae_chan *sh_chan)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       u32 chcr = chcr_read(sh_chan);
+       if (shdev->pdata->needs_tend_set)
+               sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
+       chcr |= CHCR_DE | shdev->chcr_ie_bit;
+       chcr_write(sh_chan, chcr & ~CHCR_TE);
+ }
+ static void dmae_init(struct sh_dmae_chan *sh_chan)
+ {
+       /*
+        * Default configuration for dual address memory-memory transfer.
+        * 0x400 represents auto-request.
+        */
+       u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+                                                  LOG2_DEFAULT_XFER_SIZE);
+       sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
+       chcr_write(sh_chan, chcr);
+ }
+ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
+ {
+       /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
+       if (dmae_is_busy(sh_chan))
+               return -EBUSY;
+       sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
+       chcr_write(sh_chan, val);
+       return 0;
+ }
+ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
+       void __iomem *addr = shdev->dmars;
+       unsigned int shift = chan_pdata->dmars_bit;
+       if (dmae_is_busy(sh_chan))
+               return -EBUSY;
+       if (pdata->no_dmars)
+               return 0;
+       /* in the case of a missing DMARS resource use first memory window */
+       if (!addr)
+               addr = shdev->chan_reg;
+       addr += chan_pdata->dmars;
+       __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
+                    addr);
+       return 0;
+ }
+ static void sh_dmae_start_xfer(struct shdma_chan *schan,
+                              struct shdma_desc *sdesc)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
+               sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
+               sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
+       /* Get the ld start address from ld_queue */
+       dmae_set_reg(sh_chan, &sh_desc->hw);
+       dmae_start(sh_chan);
+ }
+ static bool sh_dmae_channel_busy(struct shdma_chan *schan)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       return dmae_is_busy(sh_chan);
+ }
+ static void sh_dmae_setup_xfer(struct shdma_chan *schan,
+                              int slave_id)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       if (slave_id >= 0) {
+               const struct sh_dmae_slave_config *cfg =
+                       sh_chan->config;
+               dmae_set_dmars(sh_chan, cfg->mid_rid);
+               dmae_set_chcr(sh_chan, cfg->chcr);
+       } else {
+               dmae_init(sh_chan);
+       }
+ }
+ /*
+  * Find a slave channel configuration from the contoller list by either a slave
+  * ID in the non-DT case, or by a MID/RID value in the DT case
+  */
+ static const struct sh_dmae_slave_config *dmae_find_slave(
+       struct sh_dmae_chan *sh_chan, int match)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       const struct sh_dmae_pdata *pdata = shdev->pdata;
+       const struct sh_dmae_slave_config *cfg;
+       int i;
+       if (!sh_chan->shdma_chan.dev->of_node) {
+               if (match >= SH_DMA_SLAVE_NUMBER)
+                       return NULL;
+               for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+                       if (cfg->slave_id == match)
+                               return cfg;
+       } else {
+               for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+                       if (cfg->mid_rid == match) {
+                               sh_chan->shdma_chan.slave_id = i;
+                               return cfg;
+                       }
+       }
+       return NULL;
+ }
+ static int sh_dmae_set_slave(struct shdma_chan *schan,
+                            int slave_id, dma_addr_t slave_addr, bool try)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
+       if (!cfg)
+               return -ENXIO;
+       if (!try) {
+               sh_chan->config = cfg;
+               sh_chan->slave_addr = slave_addr ? : cfg->addr;
+       }
+       return 0;
+ }
+ static void dmae_halt(struct sh_dmae_chan *sh_chan)
+ {
+       struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+       u32 chcr = chcr_read(sh_chan);
+       chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+       chcr_write(sh_chan, chcr);
+ }
+ static int sh_dmae_desc_setup(struct shdma_chan *schan,
+                             struct shdma_desc *sdesc,
+                             dma_addr_t src, dma_addr_t dst, size_t *len)
+ {
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       if (*len > schan->max_xfer_len)
+               *len = schan->max_xfer_len;
+       sh_desc->hw.sar = src;
+       sh_desc->hw.dar = dst;
+       sh_desc->hw.tcr = *len;
+       return 0;
+ }
+ static void sh_dmae_halt(struct shdma_chan *schan)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       dmae_halt(sh_chan);
+ }
+ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       if (!(chcr_read(sh_chan) & CHCR_TE))
+               return false;
+       /* DMA stop */
+       dmae_halt(sh_chan);
+       return true;
+ }
+ static size_t sh_dmae_get_partial(struct shdma_chan *schan,
+                                 struct shdma_desc *sdesc)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+                                                   shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
++      return sh_desc->hw.tcr -
++              (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
+ }
+ /* Called from error IRQ or NMI */
+ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
+ {
+       bool ret;
+       /* halt the dma controller */
+       sh_dmae_ctl_stop(shdev);
+       /* We cannot detect, which channel caused the error, have to reset all */
+       ret = shdma_reset(&shdev->shdma_dev);
+       sh_dmae_rst(shdev);
+       return ret;
+ }
+ static irqreturn_t sh_dmae_err(int irq, void *data)
+ {
+       struct sh_dmae_device *shdev = data;
+       if (!(dmaor_read(shdev) & DMAOR_AE))
+               return IRQ_NONE;
+       sh_dmae_reset(shdev);
+       return IRQ_HANDLED;
+ }
+ static bool sh_dmae_desc_completed(struct shdma_chan *schan,
+                                  struct shdma_desc *sdesc)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan,
+                                       struct sh_dmae_chan, shdma_chan);
+       struct sh_dmae_desc *sh_desc = container_of(sdesc,
+                                       struct sh_dmae_desc, shdma_desc);
+       u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+       u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
+       return  (sdesc->direction == DMA_DEV_TO_MEM &&
+                (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
+               (sdesc->direction != DMA_DEV_TO_MEM &&
+                (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
+ }
+ static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
+ {
+       /* Fast path out if NMIF is not asserted for this controller */
+       if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
+               return false;
+       return sh_dmae_reset(shdev);
+ }
+ static int sh_dmae_nmi_handler(struct notifier_block *self,
+                              unsigned long cmd, void *data)
+ {
+       struct sh_dmae_device *shdev;
+       int ret = NOTIFY_DONE;
+       bool triggered;
+       /*
+        * Only concern ourselves with NMI events.
+        *
+        * Normally we would check the die chain value, but as this needs
+        * to be architecture independent, check for NMI context instead.
+        */
+       if (!in_nmi())
+               return NOTIFY_DONE;
+       rcu_read_lock();
+       list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
+               /*
+                * Only stop if one of the controllers has NMIF asserted,
+                * we do not want to interfere with regular address error
+                * handling or NMI events that don't concern the DMACs.
+                */
+               triggered = sh_dmae_nmi_notify(shdev);
+               if (triggered == true)
+                       ret = NOTIFY_OK;
+       }
+       rcu_read_unlock();
+       return ret;
+ }
+ static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+       .notifier_call  = sh_dmae_nmi_handler,
+       /* Run before NMI debug handler and KGDB */
+       .priority       = 1,
+ };
+ static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+                                       int irq, unsigned long flags)
+ {
+       const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+       struct shdma_dev *sdev = &shdev->shdma_dev;
+       struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+       struct sh_dmae_chan *sh_chan;
+       struct shdma_chan *schan;
+       int err;
+       sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
+                              GFP_KERNEL);
+       if (!sh_chan) {
+               dev_err(sdev->dma_dev.dev,
+                       "No free memory for allocating dma channels!\n");
+               return -ENOMEM;
+       }
+       schan = &sh_chan->shdma_chan;
+       schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
+       shdma_chan_probe(sdev, schan, id);
+       sh_chan->base = shdev->chan_reg + chan_pdata->offset;
+       /* set up channel irq */
+       if (pdev->id >= 0)
+               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+                        "sh-dmae%d.%d", pdev->id, id);
+       else
+               snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+                        "sh-dma%d", id);
+       err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
+       if (err) {
+               dev_err(sdev->dma_dev.dev,
+                       "DMA channel %d request_irq error %d\n",
+                       id, err);
+               goto err_no_irq;
+       }
+       shdev->chan[id] = sh_chan;
+       return 0;
+ err_no_irq:
+       /* remove from dmaengine device node */
+       shdma_chan_remove(schan);
+       return err;
+ }
+ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
+ {
+       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+       struct shdma_chan *schan;
+       int i;
+       shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
+               BUG_ON(!schan);
+               shdma_chan_remove(schan);
+       }
+       dma_dev->chancnt = 0;
+ }
+ static void sh_dmae_shutdown(struct platform_device *pdev)
+ {
+       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+       sh_dmae_ctl_stop(shdev);
+ }
+ static int sh_dmae_runtime_suspend(struct device *dev)
+ {
+       return 0;
+ }
+ static int sh_dmae_runtime_resume(struct device *dev)
+ {
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+       return sh_dmae_rst(shdev);
+ }
+ #ifdef CONFIG_PM
+ static int sh_dmae_suspend(struct device *dev)
+ {
+       return 0;
+ }
+ static int sh_dmae_resume(struct device *dev)
+ {
+       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+       int i, ret;
+       ret = sh_dmae_rst(shdev);
+       if (ret < 0)
+               dev_err(dev, "Failed to reset!\n");
+       for (i = 0; i < shdev->pdata->channel_num; i++) {
+               struct sh_dmae_chan *sh_chan = shdev->chan[i];
+               if (!sh_chan->shdma_chan.desc_num)
+                       continue;
+               if (sh_chan->shdma_chan.slave_id >= 0) {
+                       const struct sh_dmae_slave_config *cfg = sh_chan->config;
+                       dmae_set_dmars(sh_chan, cfg->mid_rid);
+                       dmae_set_chcr(sh_chan, cfg->chcr);
+               } else {
+                       dmae_init(sh_chan);
+               }
+       }
+       return 0;
+ }
+ #else
+ #define sh_dmae_suspend NULL
+ #define sh_dmae_resume NULL
+ #endif
+ const struct dev_pm_ops sh_dmae_pm = {
+       .suspend                = sh_dmae_suspend,
+       .resume                 = sh_dmae_resume,
+       .runtime_suspend        = sh_dmae_runtime_suspend,
+       .runtime_resume         = sh_dmae_runtime_resume,
+ };
+ static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
+ {
+       struct sh_dmae_chan *sh_chan = container_of(schan,
+                                       struct sh_dmae_chan, shdma_chan);
+       /*
+        * Implicit BUG_ON(!sh_chan->config)
+        * This is an exclusive slave DMA operation, may only be called after a
+        * successful slave configuration.
+        */
+       return sh_chan->slave_addr;
+ }
+ static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
+ {
+       return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
+ }
+ static const struct shdma_ops sh_dmae_shdma_ops = {
+       .desc_completed = sh_dmae_desc_completed,
+       .halt_channel = sh_dmae_halt,
+       .channel_busy = sh_dmae_channel_busy,
+       .slave_addr = sh_dmae_slave_addr,
+       .desc_setup = sh_dmae_desc_setup,
+       .set_slave = sh_dmae_set_slave,
+       .setup_xfer = sh_dmae_setup_xfer,
+       .start_xfer = sh_dmae_start_xfer,
+       .embedded_desc = sh_dmae_embedded_desc,
+       .chan_irq = sh_dmae_chan_irq,
+       .get_partial = sh_dmae_get_partial,
+ };
+ static const struct of_device_id sh_dmae_of_match[] = {
+       {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,},
+       {}
+ };
+ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
+ static int sh_dmae_probe(struct platform_device *pdev)
+ {
+       const struct sh_dmae_pdata *pdata;
+       unsigned long irqflags = IRQF_DISABLED,
+               chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+       int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+       int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+       struct sh_dmae_device *shdev;
+       struct dma_device *dma_dev;
+       struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+       if (pdev->dev.of_node)
+               pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data;
+       else
+               pdata = dev_get_platdata(&pdev->dev);
+       /* get platform data */
+       if (!pdata || !pdata->channel_num)
+               return -ENODEV;
+       chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       /* DMARS area is optional */
+       dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       /*
+        * IRQ resources:
+        * 1. there always must be at least one IRQ IO-resource. On SH4 it is
+        *    the error IRQ, in which case it is the only IRQ in this resource:
+        *    start == end. If it is the only IRQ resource, all channels also
+        *    use the same IRQ.
+        * 2. DMA channel IRQ resources can be specified one per resource or in
+        *    ranges (start != end)
+        * 3. iff all events (channels and, optionally, error) on this
+        *    controller use the same IRQ, only one IRQ resource can be
+        *    specified, otherwise there must be one IRQ per channel, even if
+        *    some of them are equal
+        * 4. if all IRQs on this controller are equal or if some specific IRQs
+        *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
+        *    requested with the IRQF_SHARED flag
+        */
+       errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (!chan || !errirq_res)
+               return -ENODEV;
+       shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
+                            GFP_KERNEL);
+       if (!shdev) {
+               dev_err(&pdev->dev, "Not enough memory\n");
+               return -ENOMEM;
+       }
+       dma_dev = &shdev->shdma_dev.dma_dev;
+       shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+       if (IS_ERR(shdev->chan_reg))
+               return PTR_ERR(shdev->chan_reg);
+       if (dmars) {
+               shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
+               if (IS_ERR(shdev->dmars))
+                       return PTR_ERR(shdev->dmars);
+       }
+       if (!pdata->slave_only)
+               dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+       if (pdata->slave && pdata->slave_num)
+               dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+       /* Default transfer size of 32 bytes requires 32-byte alignment */
+       dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+       shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
+       shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
+       err = shdma_init(&pdev->dev, &shdev->shdma_dev,
+                             pdata->channel_num);
+       if (err < 0)
+               goto eshdma;
+       /* platform data */
+       shdev->pdata = pdata;
+       if (pdata->chcr_offset)
+               shdev->chcr_offset = pdata->chcr_offset;
+       else
+               shdev->chcr_offset = CHCR;
+       if (pdata->chcr_ie_bit)
+               shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+       else
+               shdev->chcr_ie_bit = CHCR_IE;
+       platform_set_drvdata(pdev, shdev);
+       pm_runtime_enable(&pdev->dev);
+       err = pm_runtime_get_sync(&pdev->dev);
+       if (err < 0)
+               dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+       spin_lock_irq(&sh_dmae_lock);
+       list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
+       spin_unlock_irq(&sh_dmae_lock);
+       /* reset dma controller - only needed as a test */
+       err = sh_dmae_rst(shdev);
+       if (err)
+               goto rst_err;
+ #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+       chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+       if (!chanirq_res)
+               chanirq_res = errirq_res;
+       else
+               irqres++;
+       if (chanirq_res == errirq_res ||
+           (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+               irqflags = IRQF_SHARED;
+       errirq = errirq_res->start;
+       err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags,
+                              "DMAC Address Error", shdev);
+       if (err) {
+               dev_err(&pdev->dev,
+                       "DMA failed requesting irq #%d, error %d\n",
+                       errirq, err);
+               goto eirq_err;
+       }
+ #else
+       chanirq_res = errirq_res;
+ #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
+       if (chanirq_res->start == chanirq_res->end &&
+           !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
+               /* Special case - all multiplexed */
+               for (; irq_cnt < pdata->channel_num; irq_cnt++) {
+                       if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
+                               chan_irq[irq_cnt] = chanirq_res->start;
+                               chan_flag[irq_cnt] = IRQF_SHARED;
+                       } else {
+                               irq_cap = 1;
+                               break;
+                       }
+               }
+       } else {
+               do {
+                       for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+                               if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
+                                       irq_cap = 1;
+                                       break;
+                               }
+                               if ((errirq_res->flags & IORESOURCE_BITS) ==
+                                   IORESOURCE_IRQ_SHAREABLE)
+                                       chan_flag[irq_cnt] = IRQF_SHARED;
+                               else
+                                       chan_flag[irq_cnt] = IRQF_DISABLED;
+                               dev_dbg(&pdev->dev,
+                                       "Found IRQ %d for channel %d\n",
+                                       i, irq_cnt);
+                               chan_irq[irq_cnt++] = i;
+                       }
+                       if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
+                               break;
+                       chanirq_res = platform_get_resource(pdev,
+                                               IORESOURCE_IRQ, ++irqres);
+               } while (irq_cnt < pdata->channel_num && chanirq_res);
+       }
+       /* Create DMA Channel */
+       for (i = 0; i < irq_cnt; i++) {
+               err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
+               if (err)
+                       goto chan_probe_err;
+       }
+       if (irq_cap)
+               dev_notice(&pdev->dev, "Attempting to register %d DMA "
+                          "channels when a maximum of %d are supported.\n",
+                          pdata->channel_num, SH_DMAE_MAX_CHANNELS);
+       pm_runtime_put(&pdev->dev);
+       err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
+       if (err < 0)
+               goto edmadevreg;
+       return err;
+ edmadevreg:
+       pm_runtime_get(&pdev->dev);
+ chan_probe_err:
+       sh_dmae_chan_remove(shdev);
+ #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+ eirq_err:
+ #endif
+ rst_err:
+       spin_lock_irq(&sh_dmae_lock);
+       list_del_rcu(&shdev->node);
+       spin_unlock_irq(&sh_dmae_lock);
+       pm_runtime_put(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+       shdma_cleanup(&shdev->shdma_dev);
+ eshdma:
+       synchronize_rcu();
+       return err;
+ }
+ static int sh_dmae_remove(struct platform_device *pdev)
+ {
+       struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+       struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+       dma_async_device_unregister(dma_dev);
+       spin_lock_irq(&sh_dmae_lock);
+       list_del_rcu(&shdev->node);
+       spin_unlock_irq(&sh_dmae_lock);
+       pm_runtime_disable(&pdev->dev);
+       sh_dmae_chan_remove(shdev);
+       shdma_cleanup(&shdev->shdma_dev);
+       synchronize_rcu();
+       return 0;
+ }
+ static struct platform_driver sh_dmae_driver = {
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .pm     = &sh_dmae_pm,
+               .name   = SH_DMAE_DRV_NAME,
+               .of_match_table = sh_dmae_of_match,
+       },
+       .remove         = sh_dmae_remove,
+       .shutdown       = sh_dmae_shutdown,
+ };
+ static int __init sh_dmae_init(void)
+ {
+       /* Wire up NMI handling */
+       int err = register_die_notifier(&sh_dmae_nmi_notifier);
+       if (err)
+               return err;
+       return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
+ }
+ module_init(sh_dmae_init);
+ static void __exit sh_dmae_exit(void)
+ {
+       platform_driver_unregister(&sh_dmae_driver);
+       unregister_die_notifier(&sh_dmae_nmi_notifier);
+ }
+ module_exit(sh_dmae_exit);
+ MODULE_AUTHOR("Nobuhiro Iwamatsu <[email protected]>");
+ MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
index 0c72b89a172caa7d6556f8642daf1bd7f9aaee61,2601186ab18344d8e6533406cd845a6d74b5720f..0bc727534108d5a2d5d527e75eaa8020a3ccd239
@@@ -38,10 -38,7 +38,10 @@@ typedef s32 dma_cookie_t
  #define DMA_MIN_COOKIE        1
  #define DMA_MAX_COOKIE        INT_MAX
  
 -#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
 +static inline int dma_submit_error(dma_cookie_t cookie)
 +{
 +      return cookie < 0 ? cookie : 0;
 +}
  
  /**
   * enum dma_status - DMA transaction status
@@@ -373,6 -370,25 +373,25 @@@ struct dma_slave_config 
        unsigned int slave_id;
  };
  
+ /* struct dma_slave_caps - expose capabilities of a slave channel only
+  *
+  * @src_addr_widths: bit mask of src addr widths the channel supports
+  * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+  * @directions: bit mask of slave direction the channel supported
+  *    since the enum dma_transfer_direction is not defined as bits for each
+  *    type of direction, the dma controller should fill (1 << <TYPE>) and same
+  *    should be checked by controller as well
+  * @cmd_pause: true, if pause and thereby resume is supported
+  * @cmd_terminate: true, if terminate cmd is supported
+  */
+ struct dma_slave_caps {
+       u32 src_addr_widths;
+       u32 dstn_addr_widths;
+       u32 directions;
+       bool cmd_pause;
+       bool cmd_terminate;
+ };
  static inline const char *dma_chan_name(struct dma_chan *chan)
  {
        return dev_name(&chan->dev->device);
@@@ -535,6 -551,7 +554,7 @@@ struct dma_tx_state 
   *    struct with auxiliary transfer status information, otherwise the call
   *    will just return a simple status code
   * @device_issue_pending: push pending transactions to hardware
+  * @device_slave_caps: return the slave channel capabilities
   */
  struct dma_device {
  
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *txstate);
        void (*device_issue_pending)(struct dma_chan *chan);
+       int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
  };
  
  static inline int dmaengine_device_control(struct dma_chan *chan,
@@@ -673,6 -691,21 +694,21 @@@ static inline struct dma_async_tx_descr
        return chan->device->device_prep_interleaved_dma(chan, xt, flags);
  }
  
+ static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+ {
+       if (!chan || !caps)
+               return -EINVAL;
+       /* check if the channel supports slave transactions */
+       if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+               return -ENXIO;
+       if (chan->device->device_slave_caps)
+               return chan->device->device_slave_caps(chan, caps);
+       return -ENXIO;
+ }
  static inline int dmaengine_terminate_all(struct dma_chan *chan)
  {
        return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@@ -961,9 -994,8 +997,9 @@@ dma_set_tx_state(struct dma_tx_state *s
        }
  }
  
 -enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
  #ifdef CONFIG_DMA_ENGINE
 +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
 +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
  enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
  void dma_issue_pending_all(void);
  struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
  struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
  void dma_release_channel(struct dma_chan *chan);
  #else
 +static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 +{
 +      return NULL;
 +}
 +static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 +{
 +      return DMA_SUCCESS;
 +}
  static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
  {
        return DMA_SUCCESS;
@@@ -1006,6 -1030,8 +1042,7 @@@ static inline void dma_release_channel(
  int dma_async_device_register(struct dma_device *device);
  void dma_async_device_unregister(struct dma_device *device);
  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 -struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
  struct dma_chan *net_dma_find_channel(void);
  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
  #define dma_request_slave_channel_compat(mask, x, y, dev, name) \
This page took 0.255792 seconds and 4 git commands to generate.