From: Linus Torvalds Date: Tue, 10 Sep 2013 20:37:36 +0000 (-0700) Subject: Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma X-Git-Tag: v3.12-rc1~68 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/ec5b103ecfde929004b691f29183255aeeadecd5?hp=-c Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma Pull slave-dmaengine updates from Vinod Koul: "This pull brings: - Andy's DW driver updates - Guennadi's sh driver updates - Pl08x driver fixes from Tomasz & Alban - Improvements to mmp_pdma by Daniel - TI EDMA fixes by Joel - New drivers: - Hisilicon k3dma driver - Renesas rcar dma driver - New API for publishing slave driver capablities - Various fixes across the subsystem by Andy, Jingoo, Sachin etc..." * 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (94 commits) dma: edma: Remove limits on number of slots dma: edma: Leave linked to Null slot instead of DUMMY slot dma: edma: Find missed events and issue them ARM: edma: Add function to manually trigger an EDMA channel dma: edma: Write out and handle MAX_NR_SG at a given time dma: edma: Setup parameters to DMA MAX_NR_SG at a time dmaengine: pl330: use dma_set_max_seg_size to set the sg limit dmaengine: dma_slave_caps: remove sg entries dma: replace devm_request_and_ioremap by devm_ioremap_resource dma: ste_dma40: Fix potential null pointer dereference dma: ste_dma40: Remove duplicate const dma: imx-dma: Remove redundant NULL check dma: dmagengine: fix function names in comments dma: add driver for R-Car HPB-DMAC dma: k3dma: use devm_ioremap_resource() instead of devm_request_and_ioremap() dma: imx-sdma: Staticize sdma_driver_data structures pch_dma: Add MODULE_DEVICE_TABLE dmaengine: PL08x: Add cyclic transfer support dmaengine: PL08x: Fix reading the byte count in cctl dmaengine: PL08x: Add support for different maximum transfer size ... --- ec5b103ecfde929004b691f29183255aeeadecd5 diff --combined Documentation/driver-model/devres.txt index fb57d85e7316,2850110b343a..fcb34a5697ea --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@@ -237,12 -237,6 +237,12 @@@ ME devm_kzalloc() devm_kfree() +IIO + devm_iio_device_alloc() + devm_iio_device_free() + devm_iio_trigger_alloc() + devm_iio_trigger_free() + IO region devm_request_region() devm_request_mem_region() @@@ -299,3 -293,6 +299,6 @@@ PW PHY devm_usb_get_phy() devm_usb_put_phy() + + SLAVE DMA ENGINE + devm_acpi_dma_controller_register() diff --combined MAINTAINERS index 87efa1f5c7f3,6a6554a68959..38a37f7e514f --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -580,24 -580,12 +580,24 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/i2c/ad9389b* +ANALOG DEVICES INC ADV7511 DRIVER +M: Hans Verkuil +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/adv7511* + ANALOG DEVICES INC ADV7604 DRIVER M: Hans Verkuil L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7604* +ANALOG DEVICES INC ADV7842 DRIVER +M: Hans Verkuil +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/adv7842* + ANALOG DEVICES INC ASOC CODEC DRIVERS M: Lars-Peter Clausen L: device-drivers-devel@blackfin.uclinux.org @@@ -607,7 -595,6 +607,7 @@@ S: Supporte F: sound/soc/codecs/adau* F: sound/soc/codecs/adav* F: sound/soc/codecs/ad1* +F: sound/soc/codecs/ad7* F: sound/soc/codecs/ssm* F: sound/soc/codecs/sigmadsp.* @@@ -651,12 -638,6 +651,12 @@@ S: Maintaine F: drivers/net/appletalk/ F: net/appletalk/ +APTINA CAMERA SENSOR PLL +M: Laurent Pinchart +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/aptina-pll.* + ARASAN COMPACT FLASH PATA CONTROLLER M: Viresh Kumar L: linux-ide@vger.kernel.org @@@ -832,7 -813,7 +832,7 @@@ F: arch/arm/mach-prima2 F: drivers/dma/sirf-dma.c F: drivers/i2c/busses/i2c-sirf.c F: drivers/mmc/host/sdhci-sirf.c -F: drivers/pinctrl/pinctrl-sirf.c +F: drivers/pinctrl/sirf/ F: drivers/spi/spi-sirf.c ARM/EBSA110 MACHINE SUPPORT @@@ -933,24 -914,24 +933,24 @@@ F: arch/arm/mach-pxa/colibri-pxa270-inc ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -975,7 -956,7 +975,7 @@@ F: drivers/pcmcia/pxa2xx_stargate2. ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -984,12 -965,6 +984,12 @@@ M: Lennert Buytenhek +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm/mach-keystone/ + ARM/LOGICPD PXA270 MACHINE SUPPORT M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1284,6 -1259,7 +1284,6 @@@ F: drivers/rtc/rtc-coh901331. T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/Ux500 ARM ARCHITECTURE -M: Srinidhi Kasagar M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -1320,6 -1296,7 +1320,6 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained F: arch/arm/mach-vt8500/ F: drivers/clocksource/vt8500_timer.c -F: drivers/gpio/gpio-vt8500.c F: drivers/i2c/busses/i2c-wmt.c F: drivers/mmc/host/wmt-sdmmc.c F: drivers/pwm/pwm-vt8500.c @@@ -1386,7 -1363,7 +1386,7 @@@ F: drivers/platform/x86/asus*. F: drivers/platform/x86/eeepc*.c ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API -M: Dan Williams +M: Dan Williams W: http://sourceforge.net/projects/xscaleiop S: Maintained F: Documentation/crypto/async-tx-api.txt @@@ -1565,13 -1542,6 +1565,13 @@@ W: http://atmelwlandriver.sourceforge.n S: Maintained F: drivers/net/wireless/atmel* +ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER +M: Bradley Grove +L: linux-scsi@vger.kernel.org +W: http://www.attotech.com +S: Supported +F: drivers/scsi/esas2r + AUDIT SUBSYSTEM M: Al Viro M: Eric Paris @@@ -1848,12 -1818,6 +1848,12 @@@ L: linux-scsi@vger.kernel.or S: Supported F: drivers/scsi/bnx2fc/ +BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER +M: Eddie Wai +L: linux-scsi@vger.kernel.org +S: Supported +F: drivers/scsi/bnx2i/ + BROADCOM SPECIFIC AMBA DRIVER (BCMA) M: Rafał Miłecki L: linux-wireless@vger.kernel.org @@@ -2107,8 -2071,7 +2107,8 @@@ F: drivers/usb/chipidea CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti -M: Roopa Prabhu +M: Sujith Sankar +M: Govindarajulu Varadarajan M: Neel Patel M: Nishank Trivedi S: Supported @@@ -2144,13 -2107,6 +2144,13 @@@ M: Russell King +M: Thomas Gleixner +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core +S: Supported +F: drivers/clocksource + CISCO FCOE HBA DRIVER M: Hiral Patel M: Suma Ramars @@@ -2307,15 -2263,6 +2307,15 @@@ F: drivers/cpufreq/arm_big_little. F: drivers/cpufreq/arm_big_little.c F: drivers/cpufreq/arm_big_little_dt.c +CPUIDLE DRIVER - ARM BIG LITTLE +M: Lorenzo Pieralisi +M: Daniel Lezcano +L: linux-pm@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git +S: Maintained +F: drivers/cpuidle/cpuidle-big_little.c + CPUIDLE DRIVERS M: Rafael J. Wysocki M: Daniel Lezcano @@@ -2502,9 -2449,9 +2502,9 @@@ S: Maintaine F: drivers/media/common/cypress_firmware* CYTTSP TOUCHSCREEN DRIVER -M: Javier Martinez Canillas +M: Ferruh Yigit L: linux-input@vger.kernel.org -S: Maintained +S: Supported F: drivers/input/touchscreen/cyttsp* F: include/linux/input/cyttsp.h @@@ -2691,7 -2638,7 +2691,7 @@@ T: git git://git.linaro.org/people/sumi DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul -M: Dan Williams +M: Dan Williams S: Supported F: drivers/dma/ F: include/linux/dma* @@@ -4149,13 -4096,6 +4149,13 @@@ W: http://launchpad.net/ideapad-lapto S: Maintained F: drivers/platform/x86/ideapad-laptop.c +IDEAPAD LAPTOP SLIDEBAR DRIVER +M: Andrey Moiseev +L: linux-input@vger.kernel.org +W: https://github.com/o2genum/ideapad-slidebar +S: Maintained +F: drivers/input/misc/ideapad_slidebar.c + IDE/ATAPI DRIVERS M: Borislav Petkov L: linux-ide@vger.kernel.org @@@ -4323,7 -4263,7 +4323,7 @@@ F: arch/x86/kernel/microcode_core. F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER -M: Dan Williams +M: Dan Williams S: Maintained F: drivers/dma/ioat* @@@ -4336,7 -4276,7 +4336,7 @@@ F: drivers/iommu/intel-iommu. F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER -M: Dan Williams +M: Dan Williams S: Odd fixes F: drivers/dma/iop-adma.c @@@ -4420,7 -4360,7 +4420,7 @@@ F: drivers/net/wireless/iwlegacy INTEL WIRELESS WIFI LINK (iwlwifi) M: Johannes Berg -M: Wey-Yi Guy +M: Emmanuel Grumbach M: Intel Linux Wireless L: linux-wireless@vger.kernel.org W: http://intellinuxwireless.org @@@ -5451,7 -5391,6 +5451,7 @@@ F: drivers/watchdog/mena21_wdt. METAG ARCHITECTURE M: James Hogan +L: linux-metag@vger.kernel.org S: Supported F: arch/metag/ F: Documentation/metag/ @@@ -5553,7 -5492,7 +5553,7 @@@ L: platform-driver-x86@vger.kernel.or S: Supported F: drivers/platform/x86/msi-wmi.c -MT9M032 SENSOR DRIVER +MT9M032 APTINA SENSOR DRIVER M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -5561,7 -5500,7 +5561,7 @@@ S: Maintaine F: drivers/media/i2c/mt9m032.c F: include/media/mt9m032.h -MT9P031 SENSOR DRIVER +MT9P031 APTINA CAMERA SENSOR M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -5569,7 -5508,7 +5569,7 @@@ S: Maintaine F: drivers/media/i2c/mt9p031.c F: include/media/mt9p031.h -MT9T001 SENSOR DRIVER +MT9T001 APTINA CAMERA SENSOR M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -5577,7 -5516,7 +5577,7 @@@ S: Maintaine F: drivers/media/i2c/mt9t001.c F: include/media/mt9t001.h -MT9V032 SENSOR DRIVER +MT9V032 APTINA CAMERA SENSOR M: Laurent Pinchart L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -5637,9 -5576,9 +5637,9 @@@ S: Maintaine F: drivers/media/tuners/mxl5007t.* MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) -M: Andrew Gallatin +M: Hyong-Youb Kim L: netdev@vger.kernel.org -W: http://www.myri.com/scs/download-Myri10GE.html +W: https://www.myricom.com/support/downloads/myri10ge.html S: Supported F: drivers/net/ethernet/myricom/myri10ge/ @@@ -5848,7 -5787,7 +5848,7 @@@ M: Aloisio Almeida Jr L: linux-wireless@vger.kernel.org L: linux-nfc@lists.01.org (moderated for non-subscribers) -S: Maintained +S: Supported F: net/nfc/ F: include/net/nfc/ F: include/uapi/linux/nfc.h @@@ -5899,8 -5838,6 +5899,8 @@@ F: drivers/scsi/nsp32 NTB DRIVER M: Jon Mason S: Supported +W: https://github.com/jonmason/ntb/wiki +T: git git://github.com/jonmason/ntb.git F: drivers/ntb/ F: drivers/net/ntb_netdev.c F: include/linux/ntb.h @@@ -5942,7 -5879,7 +5942,7 @@@ F: drivers/i2c/busses/i2c-omap. F: include/linux/i2c-omap.h OMAP DEVICE TREE SUPPORT -M: Benoît Cousson +M: Benoît Cousson M: Tony Lindgren L: linux-omap@vger.kernel.org L: devicetree@vger.kernel.org @@@ -6022,14 -5959,14 +6022,14 @@@ S: Maintaine F: drivers/char/hw_random/omap-rng.c OMAP HWMOD SUPPORT -M: Benoît Cousson +M: Benoît Cousson M: Paul Walmsley L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod.* OMAP HWMOD DATA FOR OMAP4-BASED DEVICES -M: Benoît Cousson +M: Benoît Cousson L: linux-omap@vger.kernel.org S: Maintained F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@@ -6124,7 -6061,7 +6124,7 @@@ M: Rob Herring M: Mark Rutland M: Stephen Warren -M: Ian Campbell +M: Ian Campbell L: devicetree@vger.kernel.org S: Maintained F: Documentation/devicetree/ @@@ -6333,13 -6270,6 +6333,13 @@@ F: Documentation/PCI F: drivers/pci/ F: include/linux/pci* +PCI DRIVER FOR NVIDIA TEGRA +M: Thierry Reding +L: linux-tegra@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt +F: drivers/pci/host/pci-tegra.c + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org @@@ -6741,11 -6671,11 +6741,11 @@@ F: Documentation/scsi/LICENSE.qla2xx F: drivers/scsi/qla2xxx/ QLOGIC QLA4XXX iSCSI DRIVER -M: Ravi Anand M: Vikas Chaudhary M: iscsi-driver@qlogic.com L: linux-scsi@vger.kernel.org S: Supported +F: Documentation/scsi/LICENSE.qla4xxx F: drivers/scsi/qla4xxx/ QLOGIC QLA3XXX NETWORK DRIVER @@@ -6986,14 -6916,6 +6986,14 @@@ M: Maxim Levitsky +W: http://sourceforge.net/projects/roccat/ +S: Maintained +F: drivers/hid/hid-roccat* +F: include/linux/hid-roccat* +F: Documentation/ABI/*/sysfs-driver-hid-roccat* + ROCKETPORT DRIVER P: Comtrol Corp. W: http://www.comtrol.com @@@ -7204,6 -7126,7 +7204,7 @@@ F: drivers/tty/seria SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar + M: Andy Shevchenko S: Maintained F: include/linux/dw_dmac.h F: drivers/dma/dw/ @@@ -7216,7 -7139,7 +7217,7 @@@ S: Maintaine F: include/linux/mmc/dw_mmc.h F: drivers/mmc/host/dw_mmc* -TIMEKEEPING, NTP +TIMEKEEPING, CLOCKSOURCE CORE, NTP M: John Stultz M: Thomas Gleixner T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core @@@ -7229,6 -7152,7 +7230,6 @@@ F: include/uapi/linux/timex. F: kernel/time/clocksource.c F: kernel/time/time*.c F: kernel/time/ntp.c -F: drivers/clocksource TLG2300 VIDEO4LINUX-2 DRIVER M: Huang Shijie @@@ -7308,7 -7232,6 +7309,7 @@@ W: http://lksctp.sourceforge.ne S: Maintained F: Documentation/networking/sctp.txt F: include/linux/sctp.h +F: include/uapi/linux/sctp.h F: include/net/sctp/ F: net/sctp/ @@@ -7439,6 -7362,7 +7440,6 @@@ F: drivers/net/ethernet/sfc SGI GRU DRIVER M: Dimitri Sivanich -M: Robin Holt S: Maintained F: drivers/misc/sgi-gru/ @@@ -7458,8 -7382,7 +7459,8 @@@ S: Maintained for 2.6 F: Documentation/sgi-visws.txt SGI XP/XPC/XPNET DRIVER -M: Robin Holt +M: Cliff Whickman +M: Robin Holt S: Maintained F: drivers/misc/sgi-xp/ @@@ -7748,17 -7671,6 +7749,17 @@@ F: include/sound F: include/uapi/sound/ F: sound/ +SOUND - COMPRESSED AUDIO +M: Vinod Koul +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git +S: Supported +F: Documentation/sound/alsa/compress_offload.txt +F: include/sound/compress_driver.h +F: include/uapi/sound/compress_* +F: sound/core/compress_offload.c +F: sound/soc/soc-compress.c + SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) M: Liam Girdwood M: Mark Brown @@@ -7766,7 -7678,6 +7767,7 @@@ T: git git://git.kernel.org/pub/scm/lin L: alsa-devel@alsa-project.org (moderated for non-subscribers) W: http://alsa-project.org/main/index.php/ASoC S: Supported +F: Documentation/sound/alsa/soc/ F: sound/soc/ F: include/sound/soc* @@@ -7975,11 -7886,11 +7976,11 @@@ S: Maintaine F: drivers/staging/nvec/ STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON) -M: Andres Salomon -M: Chris Ball +M: Jens Frederich +M: Daniel Drake M: Jon Nettleton W: http://wiki.laptop.org/go/DCON -S: Odd Fixes +S: Maintained F: drivers/staging/olpc_dcon/ STAGING - OZMO DEVICES USB OVER WIFI DRIVER @@@ -8057,12 -7968,6 +8058,12 @@@ F: arch/m68k/sun3* F: arch/m68k/include/asm/sun3* F: drivers/net/ethernet/i825xx/sun3* +SUNDANCE NETWORK DRIVER +M: Denis Kirjanov +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/dlink/sundance.c + SUPERH M: Paul Mundt L: linux-sh@vger.kernel.org @@@ -8398,14 -8303,9 +8399,14 @@@ M: Chris Metcalf @@@ -8765,11 -8665,6 +8766,11 @@@ T: git git://git.alsa-project.org/alsa- S: Maintained F: sound/usb/midi.* +USB NETWORKING DRIVERS +L: linux-usb@vger.kernel.org +S: Odd Fixes +F: drivers/net/usb/ + USB OHCI DRIVER M: Alan Stern L: linux-usb@vger.kernel.org @@@ -8899,6 -8794,7 +8900,6 @@@ W: http://www.linux-usb.or T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git S: Supported F: Documentation/usb/ -F: drivers/net/usb/ F: drivers/usb/ F: include/linux/usb.h F: include/linux/usb/ @@@ -9121,12 -9017,6 +9122,12 @@@ F: drivers/staging/vme F: drivers/vme/ F: include/linux/vme* +VMWARE HYPERVISOR INTERFACE +M: Alok Kataria +L: virtualization@lists.linux-foundation.org +S: Supported +F: arch/x86/kernel/cpu/vmware.c + VMWARE VMXNET3 ETHERNET DRIVER M: Shreyas Bhatewara M: "VMware, Inc." @@@ -9348,9 -9238,9 +9349,9 @@@ F: drivers/media/tuners/tuner-xc2028. XEN HYPERVISOR INTERFACE M: Konrad Rzeszutek Wilk -M: Jeremy Fitzhardinge -L: xen-devel@lists.xensource.com (moderated for non-subscribers) -L: virtualization@lists.linux-foundation.org +M: Boris Ostrovsky +M: David Vrabel +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: arch/x86/xen/ F: drivers/*/xen-*front.c @@@ -9361,35 -9251,35 +9362,35 @@@ F: include/uapi/xen XEN HYPERVISOR ARM M: Stefano Stabellini -L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: arch/arm/xen/ F: arch/arm/include/asm/xen/ XEN HYPERVISOR ARM64 M: Stefano Stabellini -L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: arch/arm64/xen/ F: arch/arm64/include/asm/xen/ XEN NETWORK BACKEND DRIVER M: Ian Campbell -L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: netdev@vger.kernel.org S: Supported F: drivers/net/xen-netback/* XEN PCI SUBSYSTEM M: Konrad Rzeszutek Wilk -L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: arch/x86/pci/*xen* F: drivers/pci/*xen* XEN SWIOTLB SUBSYSTEM M: Konrad Rzeszutek Wilk -L: xen-devel@lists.xensource.com (moderated for non-subscribers) +L: xen-devel@lists.xenproject.org (moderated for non-subscribers) S: Supported F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* diff --combined arch/arm/mach-imx/mm-imx5.c index a8229b7f10bf,051add9cc471..eb3cce38c70d --- a/arch/arm/mach-imx/mm-imx5.c +++ b/arch/arm/mach-imx/mm-imx5.c @@@ -103,22 -103,8 +103,8 @@@ void __init mx53_init_irq(void tzic_init_irq(MX53_IO_ADDRESS(MX53_TZIC_BASE_ADDR)); } - static struct sdma_script_start_addrs imx51_sdma_script __initdata = { - .ap_2_ap_addr = 642, - .uart_2_mcu_addr = 817, - .mcu_2_app_addr = 747, - .mcu_2_shp_addr = 961, - .ata_2_mcu_addr = 1473, - .mcu_2_ata_addr = 1392, - .app_2_per_addr = 1033, - .app_2_mcu_addr = 683, - .shp_2_per_addr = 1251, - .shp_2_mcu_addr = 892, - }; - static struct sdma_platform_data imx51_sdma_pdata __initdata = { .fw_name = "sdma-imx51.bin", - .script_addrs = &imx51_sdma_script, }; static const struct resource imx51_audmux_res[] __initconst = { @@@ -153,10 -139,10 +139,10 @@@ void __init imx51_soc_init(void void __init imx51_init_late(void) { mx51_neon_fixup(); - imx51_pm_init(); + imx5_pm_init(); } void __init imx53_init_late(void) { - imx53_pm_init(); + imx5_pm_init(); } diff --combined drivers/dma/Kconfig index daa4da281e5e,2945ff084a04..526ec77c7ba0 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@@ -194,7 -194,7 +194,7 @@@ config SIRF_DM Enable support for the CSR SiRFprimaII DMA engine. config TI_EDMA - tristate "TI EDMA support" + bool "TI EDMA support" depends on ARCH_DAVINCI || ARCH_OMAP select DMA_ENGINE select DMA_VIRTUAL_CHANNELS @@@ -287,14 -287,6 +287,14 @@@ config DMA_OMA select DMA_ENGINE select DMA_VIRTUAL_CHANNELS +config TI_CPPI41 + tristate "AM33xx CPPI41 DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + help + The Communications Port Programming Interface (CPPI) 4.1 DMA engine + is currently used by the USB driver on AM335x platforms. + config MMP_PDMA bool "MMP PDMA support" depends on (ARCH_MMP || ARCH_PXA) @@@ -308,6 -300,15 +308,15 @@@ config DMA_JZ474 select DMA_ENGINE select DMA_VIRTUAL_CHANNELS + config K3_DMA + tristate "Hisilicon K3 DMA support" + depends on ARCH_HI3xxx + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support the DMA engine for Hisilicon K3 platform + devices. + config DMA_ENGINE bool diff --combined drivers/dma/Makefile index 6d62ec30c4bc,8c97941df273..db89035b3626 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@@ -39,4 -39,4 +39,5 @@@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma. obj-$(CONFIG_DMA_OMAP) += omap-dma.o obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o +obj-$(CONFIG_TI_CPPI41) += cppi41.o + obj-$(CONFIG_K3_DMA) += k3dma.o diff --combined drivers/dma/amba-pl08x.c index bff41d4848e5,cd294340c851..fce46c5bf1c7 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@@ -24,6 -24,7 +24,7 @@@ * * Documentation: ARM DDI 0196G == PL080 * Documentation: ARM DDI 0218E == PL081 + * Documentation: S3C6410 User's Manual == PL080S * * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any * channel. @@@ -36,6 -37,14 +37,14 @@@ * * The PL080 has a dual bus master, PL081 has a single master. * + * PL080S is a version modified by Samsung and used in S3C64xx SoCs. + * It differs in following aspects: + * - CH_CONFIG register at different offset, + * - separate CH_CONTROL2 register for transfer size, + * - bigger maximum transfer size, + * - 8-word aligned LLI, instead of 4-word, due to extra CCTL2 word, + * - no support for peripheral flow control. + * * Memory to peripheral transfer may be visualized as * Get data from memory to DMAC * Until no data left @@@ -64,10 -73,7 +73,7 @@@ * - Peripheral flow control: the transfer size is ignored (and should be * zero). The data is transferred from the current LLI entry, until * after the final transfer signalled by LBREQ or LSREQ. The DMAC - * will then move to the next LLI entry. - * - * Global TODO: - * - Break out common code from arch/arm/mach-s3c64xx and share + * will then move to the next LLI entry. Unsupported by PL080S. */ #include #include @@@ -100,24 -106,16 +106,16 @@@ struct pl08x_driver_data * @nomadik: whether the channels have Nomadik security extension bits * that need to be checked for permission before use and some registers are * missing + * @pl080s: whether this version is a PL080S, which has separate register and + * LLI word for transfer size. */ struct vendor_data { + u8 config_offset; u8 channels; bool dualmaster; bool nomadik; - }; - - /* - * PL08X private data structures - * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit, - * start & end do not - their bus bit info is in cctl. Also note that these - * are fixed 32-bit quantities. - */ - struct pl08x_lli { - u32 src; - u32 dst; - u32 lli; - u32 cctl; + bool pl080s; + u32 max_transfer_size; }; /** @@@ -133,8 -131,6 +131,8 @@@ struct pl08x_bus_data u8 buswidth; }; +#define IS_BUS_ALIGNED(bus) IS_ALIGNED((bus)->addr, (bus)->buswidth) + /** * struct pl08x_phy_chan - holder for the physical channels * @id: physical index to this channel @@@ -147,6 -143,7 +145,7 @@@ struct pl08x_phy_chan { unsigned int id; void __iomem *base; + void __iomem *reg_config; spinlock_t lock; struct pl08x_dma_chan *serving; bool locked; @@@ -176,12 -173,13 +175,13 @@@ struct pl08x_sg * @ccfg: config reg values for current txd * @done: this marks completed descriptors, which should not have their * mux released. + * @cyclic: indicate cyclic transfers */ struct pl08x_txd { struct virt_dma_desc vd; struct list_head dsg_list; dma_addr_t llis_bus; - struct pl08x_lli *llis_va; + u32 *llis_va; /* Default cctl value for LLIs */ u32 cctl; /* @@@ -190,6 -188,7 +190,7 @@@ */ u32 ccfg; bool done; + bool cyclic; }; /** @@@ -265,17 -264,29 +266,29 @@@ struct pl08x_driver_data struct dma_pool *pool; u8 lli_buses; u8 mem_buses; + u8 lli_words; }; /* * PL08X specific defines */ - /* Size (bytes) of each LLI buffer allocated for one transfer */ - # define PL08X_LLI_TSFR_SIZE 0x2000 + /* The order of words in an LLI. */ + #define PL080_LLI_SRC 0 + #define PL080_LLI_DST 1 + #define PL080_LLI_LLI 2 + #define PL080_LLI_CCTL 3 + #define PL080S_LLI_CCTL2 4 + + /* Total words in an LLI. */ + #define PL080_LLI_WORDS 4 + #define PL080S_LLI_WORDS 8 - /* Maximum times we call dma_pool_alloc on this pool without freeing */ - #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli)) + /* + * Number of LLIs in each LLI buffer allocated for one transfer + * (maximum times we call dma_pool_alloc on this pool without freeing) + */ + #define MAX_NUM_TSFR_LLIS 512 #define PL08X_ALIGN 8 static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) @@@ -336,10 -347,39 +349,39 @@@ static int pl08x_phy_channel_busy(struc { unsigned int val; - val = readl(ch->base + PL080_CH_CONFIG); + val = readl(ch->reg_config); return val & PL080_CONFIG_ACTIVE; } + static void pl08x_write_lli(struct pl08x_driver_data *pl08x, + struct pl08x_phy_chan *phychan, const u32 *lli, u32 ccfg) + { + if (pl08x->vd->pl080s) + dev_vdbg(&pl08x->adev->dev, + "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " + "clli=0x%08x, cctl=0x%08x, cctl2=0x%08x, ccfg=0x%08x\n", + phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], + lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], + lli[PL080S_LLI_CCTL2], ccfg); + else + dev_vdbg(&pl08x->adev->dev, + "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " + "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", + phychan->id, lli[PL080_LLI_SRC], lli[PL080_LLI_DST], + lli[PL080_LLI_LLI], lli[PL080_LLI_CCTL], ccfg); + + writel_relaxed(lli[PL080_LLI_SRC], phychan->base + PL080_CH_SRC_ADDR); + writel_relaxed(lli[PL080_LLI_DST], phychan->base + PL080_CH_DST_ADDR); + writel_relaxed(lli[PL080_LLI_LLI], phychan->base + PL080_CH_LLI); + writel_relaxed(lli[PL080_LLI_CCTL], phychan->base + PL080_CH_CONTROL); + + if (pl08x->vd->pl080s) + writel_relaxed(lli[PL080S_LLI_CCTL2], + phychan->base + PL080S_CH_CONTROL2); + + writel(ccfg, phychan->reg_config); + } + /* * Set the initial DMA register values i.e. those for the first LLI * The next LLI pointer and the configuration interrupt bit have @@@ -352,7 -392,6 +394,6 @@@ static void pl08x_start_next_txd(struc struct pl08x_phy_chan *phychan = plchan->phychan; struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); - struct pl08x_lli *lli; u32 val; list_del(&txd->vd.node); @@@ -363,19 -402,7 +404,7 @@@ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); - lli = &txd->llis_va[0]; - - dev_vdbg(&pl08x->adev->dev, - "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " - "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", - phychan->id, lli->src, lli->dst, lli->lli, lli->cctl, - txd->ccfg); - - writel(lli->src, phychan->base + PL080_CH_SRC_ADDR); - writel(lli->dst, phychan->base + PL080_CH_DST_ADDR); - writel(lli->lli, phychan->base + PL080_CH_LLI); - writel(lli->cctl, phychan->base + PL080_CH_CONTROL); - writel(txd->ccfg, phychan->base + PL080_CH_CONFIG); + pl08x_write_lli(pl08x, phychan, &txd->llis_va[0], txd->ccfg); /* Enable the DMA channel */ /* Do not access config register until channel shows as disabled */ @@@ -383,11 -410,11 +412,11 @@@ cpu_relax(); /* Do not access config register until channel shows as inactive */ - val = readl(phychan->base + PL080_CH_CONFIG); + val = readl(phychan->reg_config); while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) - val = readl(phychan->base + PL080_CH_CONFIG); + val = readl(phychan->reg_config); - writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG); + writel(val | PL080_CONFIG_ENABLE, phychan->reg_config); } /* @@@ -406,9 -433,9 +435,9 @@@ static void pl08x_pause_phy_chan(struc int timeout; /* Set the HALT bit and wait for the FIFO to drain */ - val = readl(ch->base + PL080_CH_CONFIG); + val = readl(ch->reg_config); val |= PL080_CONFIG_HALT; - writel(val, ch->base + PL080_CH_CONFIG); + writel(val, ch->reg_config); /* Wait for channel inactive */ for (timeout = 1000; timeout; timeout--) { @@@ -425,9 -452,9 +454,9 @@@ static void pl08x_resume_phy_chan(struc u32 val; /* Clear the HALT bit */ - val = readl(ch->base + PL080_CH_CONFIG); + val = readl(ch->reg_config); val &= ~PL080_CONFIG_HALT; - writel(val, ch->base + PL080_CH_CONFIG); + writel(val, ch->reg_config); } /* @@@ -439,12 -466,12 +468,12 @@@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - u32 val = readl(ch->base + PL080_CH_CONFIG); + u32 val = readl(ch->reg_config); val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | PL080_CONFIG_TC_IRQ_MASK); - writel(val, ch->base + PL080_CH_CONFIG); + writel(val, ch->reg_config); writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); @@@ -455,6 -482,28 +484,28 @@@ static inline u32 get_bytes_in_cctl(u3 /* The source width defines the number of bytes */ u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; + cctl &= PL080_CONTROL_SWIDTH_MASK; + + switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { + case PL080_WIDTH_8BIT: + break; + case PL080_WIDTH_16BIT: + bytes *= 2; + break; + case PL080_WIDTH_32BIT: + bytes *= 4; + break; + } + return bytes; + } + + static inline u32 get_bytes_in_cctl_pl080s(u32 cctl, u32 cctl1) + { + /* The source width defines the number of bytes */ + u32 bytes = cctl1 & PL080S_CONTROL_TRANSFER_SIZE_MASK; + + cctl &= PL080_CONTROL_SWIDTH_MASK; + switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { case PL080_WIDTH_8BIT: break; @@@ -471,47 -520,66 +522,66 @@@ /* The channel should be paused when calling this */ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { + struct pl08x_driver_data *pl08x = plchan->host; + const u32 *llis_va, *llis_va_limit; struct pl08x_phy_chan *ch; + dma_addr_t llis_bus; struct pl08x_txd *txd; - size_t bytes = 0; + u32 llis_max_words; + size_t bytes; + u32 clli; ch = plchan->phychan; txd = plchan->at; + if (!ch || !txd) + return 0; + /* * Follow the LLIs to get the number of remaining * bytes in the currently active transaction. */ - if (ch && txd) { - u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; + clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2; - /* First get the remaining bytes in the active transfer */ + /* First get the remaining bytes in the active transfer */ + if (pl08x->vd->pl080s) + bytes = get_bytes_in_cctl_pl080s( + readl(ch->base + PL080_CH_CONTROL), + readl(ch->base + PL080S_CH_CONTROL2)); + else bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); - if (clli) { - struct pl08x_lli *llis_va = txd->llis_va; - dma_addr_t llis_bus = txd->llis_bus; - int index; + if (!clli) + return bytes; - BUG_ON(clli < llis_bus || clli >= llis_bus + - sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS); + llis_va = txd->llis_va; + llis_bus = txd->llis_bus; - /* - * Locate the next LLI - as this is an array, - * it's simple maths to find. - */ - index = (clli - llis_bus) / sizeof(struct pl08x_lli); + llis_max_words = pl08x->lli_words * MAX_NUM_TSFR_LLIS; + BUG_ON(clli < llis_bus || clli >= llis_bus + + sizeof(u32) * llis_max_words); - for (; index < MAX_NUM_TSFR_LLIS; index++) { - bytes += get_bytes_in_cctl(llis_va[index].cctl); + /* + * Locate the next LLI - as this is an array, + * it's simple maths to find. + */ + llis_va += (clli - llis_bus) / sizeof(u32); - /* - * A LLI pointer of 0 terminates the LLI list - */ - if (!llis_va[index].lli) - break; - } - } + llis_va_limit = llis_va + llis_max_words; + + for (; llis_va < llis_va_limit; llis_va += pl08x->lli_words) { + if (pl08x->vd->pl080s) + bytes += get_bytes_in_cctl_pl080s( + llis_va[PL080_LLI_CCTL], + llis_va[PL080S_LLI_CCTL2]); + else + bytes += get_bytes_in_cctl(llis_va[PL080_LLI_CCTL]); + + /* + * A LLI pointer going backward terminates the LLI list + */ + if (llis_va[PL080_LLI_LLI] <= clli) + break; } return bytes; @@@ -722,6 -790,7 +792,7 @@@ static inline u32 pl08x_cctl_bits(u32 c break; } + tsize &= PL080_CONTROL_TRANSFER_SIZE_MASK; retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; return retbits; } @@@ -766,20 -835,26 +837,26 @@@ static void pl08x_choose_master_bus(str /* * Fills in one LLI for a certain transfer descriptor and advance the counter */ - static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd, - int num_llis, int len, u32 cctl) + static void pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, + struct pl08x_lli_build_data *bd, + int num_llis, int len, u32 cctl, u32 cctl2) { - struct pl08x_lli *llis_va = bd->txd->llis_va; + u32 offset = num_llis * pl08x->lli_words; + u32 *llis_va = bd->txd->llis_va + offset; dma_addr_t llis_bus = bd->txd->llis_bus; BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); - llis_va[num_llis].cctl = cctl; - llis_va[num_llis].src = bd->srcbus.addr; - llis_va[num_llis].dst = bd->dstbus.addr; - llis_va[num_llis].lli = llis_bus + (num_llis + 1) * - sizeof(struct pl08x_lli); - llis_va[num_llis].lli |= bd->lli_bus; + /* Advance the offset to next LLI. */ + offset += pl08x->lli_words; + + llis_va[PL080_LLI_SRC] = bd->srcbus.addr; + llis_va[PL080_LLI_DST] = bd->dstbus.addr; + llis_va[PL080_LLI_LLI] = (llis_bus + sizeof(u32) * offset); + llis_va[PL080_LLI_LLI] |= bd->lli_bus; + llis_va[PL080_LLI_CCTL] = cctl; + if (pl08x->vd->pl080s) + llis_va[PL080S_LLI_CCTL2] = cctl2; if (cctl & PL080_CONTROL_SRC_INCR) bd->srcbus.addr += len; @@@ -791,14 -866,53 +868,53 @@@ bd->remainder -= len; } - static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, - u32 *cctl, u32 len, int num_llis, size_t *total_bytes) + static inline void prep_byte_width_lli(struct pl08x_driver_data *pl08x, + struct pl08x_lli_build_data *bd, u32 *cctl, u32 len, + int num_llis, size_t *total_bytes) { *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); - pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); + pl08x_fill_lli_for_desc(pl08x, bd, num_llis, len, *cctl, len); (*total_bytes) += len; } + #ifdef VERBOSE_DEBUG + static void pl08x_dump_lli(struct pl08x_driver_data *pl08x, + const u32 *llis_va, int num_llis) + { + int i; + + if (pl08x->vd->pl080s) { + dev_vdbg(&pl08x->adev->dev, + "%-3s %-9s %-10s %-10s %-10s %-10s %s\n", + "lli", "", "csrc", "cdst", "clli", "cctl", "cctl2"); + for (i = 0; i < num_llis; i++) { + dev_vdbg(&pl08x->adev->dev, + "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", + i, llis_va, llis_va[PL080_LLI_SRC], + llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], + llis_va[PL080_LLI_CCTL], + llis_va[PL080S_LLI_CCTL2]); + llis_va += pl08x->lli_words; + } + } else { + dev_vdbg(&pl08x->adev->dev, + "%-3s %-9s %-10s %-10s %-10s %s\n", + "lli", "", "csrc", "cdst", "clli", "cctl"); + for (i = 0; i < num_llis; i++) { + dev_vdbg(&pl08x->adev->dev, + "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", + i, llis_va, llis_va[PL080_LLI_SRC], + llis_va[PL080_LLI_DST], llis_va[PL080_LLI_LLI], + llis_va[PL080_LLI_CCTL]); + llis_va += pl08x->lli_words; + } + } + } + #else + static inline void pl08x_dump_lli(struct pl08x_driver_data *pl08x, + const u32 *llis_va, int num_llis) {} + #endif + /* * This fills in the table of LLIs for the transfer descriptor * Note that we assume we never have to change the burst sizes @@@ -812,7 -926,7 +928,7 @@@ static int pl08x_fill_llis_for_desc(str int num_llis = 0; u32 cctl, early_bytes = 0; size_t max_bytes_per_lli, total_bytes; - struct pl08x_lli *llis_va; + u32 *llis_va, *last_lli; struct pl08x_sg *dsg; txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); @@@ -847,13 -961,10 +963,13 @@@ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); - dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", - bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", + dev_vdbg(&pl08x->adev->dev, + "src=0x%08llx%s/%u dst=0x%08llx%s/%u len=%zu\n", + (u64)bd.srcbus.addr, + cctl & PL080_CONTROL_SRC_INCR ? "+" : "", bd.srcbus.buswidth, - bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", + (u64)bd.dstbus.addr, + cctl & PL080_CONTROL_DST_INCR ? "+" : "", bd.dstbus.buswidth, bd.remainder); dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", @@@ -891,8 -1002,8 +1007,8 @@@ return 0; } - if ((bd.srcbus.addr % bd.srcbus.buswidth) || - (bd.dstbus.addr % bd.dstbus.buswidth)) { + if (!IS_BUS_ALIGNED(&bd.srcbus) || + !IS_BUS_ALIGNED(&bd.dstbus)) { dev_err(&pl08x->adev->dev, "%s src & dst address must be aligned to src" " & dst width if peripheral is flow controller", @@@ -902,7 -1013,8 +1018,8 @@@ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, bd.dstbus.buswidth, 0); - pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); + pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, + 0, cctl, 0); break; } @@@ -913,9 -1025,9 +1030,9 @@@ */ if (bd.remainder < mbus->buswidth) early_bytes = bd.remainder; - else if ((mbus->addr) % (mbus->buswidth)) { - early_bytes = mbus->buswidth - (mbus->addr) % - (mbus->buswidth); + else if (!IS_BUS_ALIGNED(mbus)) { + early_bytes = mbus->buswidth - + (mbus->addr & (mbus->buswidth - 1)); if ((bd.remainder - early_bytes) < mbus->buswidth) early_bytes = bd.remainder; } @@@ -924,8 -1036,8 +1041,8 @@@ dev_vdbg(&pl08x->adev->dev, "%s byte width LLIs (remain 0x%08x)\n", __func__, bd.remainder); - prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, - &total_bytes); + prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, + num_llis++, &total_bytes); } if (bd.remainder) { @@@ -933,7 -1045,7 +1050,7 @@@ * Master now aligned * - if slave is not then we must set its width down */ - if (sbus->addr % sbus->buswidth) { + if (!IS_BUS_ALIGNED(sbus)) { dev_dbg(&pl08x->adev->dev, "%s set down bus width to one byte\n", __func__); @@@ -946,7 -1058,7 +1063,7 @@@ * MIN(buswidths) */ max_bytes_per_lli = bd.srcbus.buswidth * - PL080_CONTROL_TRANSFER_SIZE_MASK; + pl08x->vd->max_transfer_size; dev_vdbg(&pl08x->adev->dev, "%s max bytes per lli = %zu\n", __func__, max_bytes_per_lli); @@@ -981,8 -1093,8 +1098,8 @@@ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, bd.dstbus.buswidth, tsize); - pl08x_fill_lli_for_desc(&bd, num_llis++, - lli_len, cctl); + pl08x_fill_lli_for_desc(pl08x, &bd, num_llis++, + lli_len, cctl, tsize); total_bytes += lli_len; } @@@ -993,8 -1105,8 +1110,8 @@@ dev_vdbg(&pl08x->adev->dev, "%s align with boundary, send odd bytes (remain %zu)\n", __func__, bd.remainder); - prep_byte_width_lli(&bd, &cctl, bd.remainder, - num_llis++, &total_bytes); + prep_byte_width_lli(pl08x, &bd, &cctl, + bd.remainder, num_llis++, &total_bytes); } } @@@ -1008,33 -1120,25 +1125,25 @@@ if (num_llis >= MAX_NUM_TSFR_LLIS) { dev_err(&pl08x->adev->dev, "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", - __func__, (u32) MAX_NUM_TSFR_LLIS); + __func__, MAX_NUM_TSFR_LLIS); return 0; } } llis_va = txd->llis_va; - /* The final LLI terminates the LLI. */ - llis_va[num_llis - 1].lli = 0; - /* The final LLI element shall also fire an interrupt. */ - llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; - - #ifdef VERBOSE_DEBUG - { - int i; + last_lli = llis_va + (num_llis - 1) * pl08x->lli_words; - dev_vdbg(&pl08x->adev->dev, - "%-3s %-9s %-10s %-10s %-10s %s\n", - "lli", "", "csrc", "cdst", "clli", "cctl"); - for (i = 0; i < num_llis; i++) { - dev_vdbg(&pl08x->adev->dev, - "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n", - i, &llis_va[i], llis_va[i].src, - llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl - ); - } + if (txd->cyclic) { + /* Link back to the first LLI. */ + last_lli[PL080_LLI_LLI] = txd->llis_bus | bd.lli_bus; + } else { + /* The final LLI terminates the LLI. */ + last_lli[PL080_LLI_LLI] = 0; + /* The final LLI element shall also fire an interrupt. */ + last_lli[PL080_LLI_CCTL] |= PL080_CONTROL_TC_IRQ_EN; } - #endif + + pl08x_dump_lli(pl08x, llis_va, num_llis); return num_llis; } @@@ -1310,6 -1414,7 +1419,7 @@@ static int dma_set_runtime_config(struc struct dma_slave_config *config) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_driver_data *pl08x = plchan->host; if (!plchan->slave) return -EINVAL; @@@ -1319,6 -1424,13 +1429,13 @@@ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; + if (config->device_fc && pl08x->vd->pl080s) { + dev_err(&pl08x->adev->dev, + "%s: PL080S does not support peripheral flow control\n", + __func__); + return -EINVAL; + } + plchan->cfg = *config; return 0; @@@ -1409,25 -1521,19 +1526,19 @@@ static struct dma_async_tx_descriptor * return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } - static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( - struct dma_chan *chan, struct scatterlist *sgl, - unsigned int sg_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + static struct pl08x_txd *pl08x_init_txd( + struct dma_chan *chan, + enum dma_transfer_direction direction, + dma_addr_t *slave_addr) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_txd *txd; - struct pl08x_sg *dsg; - struct scatterlist *sg; enum dma_slave_buswidth addr_width; - dma_addr_t slave_addr; int ret, tmp; u8 src_buses, dst_buses; u32 maxburst, cctl; - dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", - __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); @@@ -1441,14 -1547,14 +1552,14 @@@ */ if (direction == DMA_MEM_TO_DEV) { cctl = PL080_CONTROL_SRC_INCR; - slave_addr = plchan->cfg.dst_addr; + *slave_addr = plchan->cfg.dst_addr; addr_width = plchan->cfg.dst_addr_width; maxburst = plchan->cfg.dst_maxburst; src_buses = pl08x->mem_buses; dst_buses = plchan->cd->periph_buses; } else if (direction == DMA_DEV_TO_MEM) { cctl = PL080_CONTROL_DST_INCR; - slave_addr = plchan->cfg.src_addr; + *slave_addr = plchan->cfg.src_addr; addr_width = plchan->cfg.src_addr_width; maxburst = plchan->cfg.src_maxburst; src_buses = plchan->cd->periph_buses; @@@ -1497,24 -1603,107 +1608,107 @@@ else txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; + return txd; + } + + static int pl08x_tx_add_sg(struct pl08x_txd *txd, + enum dma_transfer_direction direction, + dma_addr_t slave_addr, + dma_addr_t buf_addr, + unsigned int len) + { + struct pl08x_sg *dsg; + + dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); + if (!dsg) + return -ENOMEM; + + list_add_tail(&dsg->node, &txd->dsg_list); + + dsg->len = len; + if (direction == DMA_MEM_TO_DEV) { + dsg->src_addr = buf_addr; + dsg->dst_addr = slave_addr; + } else { + dsg->src_addr = slave_addr; + dsg->dst_addr = buf_addr; + } + + return 0; + } + + static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) + { + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_txd *txd; + struct scatterlist *sg; + int ret, tmp; + dma_addr_t slave_addr; + + dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", + __func__, sg_dma_len(sgl), plchan->name); + + txd = pl08x_init_txd(chan, direction, &slave_addr); + if (!txd) + return NULL; + for_each_sg(sgl, sg, sg_len, tmp) { - dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); - if (!dsg) { + ret = pl08x_tx_add_sg(txd, direction, slave_addr, + sg_dma_address(sg), + sg_dma_len(sg)); + if (ret) { pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); return NULL; } - list_add_tail(&dsg->node, &txd->dsg_list); + } - dsg->len = sg_dma_len(sg); - if (direction == DMA_MEM_TO_DEV) { - dsg->src_addr = sg_dma_address(sg); - dsg->dst_addr = slave_addr; - } else { - dsg->src_addr = slave_addr; - dsg->dst_addr = sg_dma_address(sg); + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); + return NULL; + } + + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); + } + + static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) + { + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_txd *txd; + int ret, tmp; + dma_addr_t slave_addr; + + dev_dbg(&pl08x->adev->dev, + "%s prepare cyclic transaction of %d/%d bytes %s %s\n", + __func__, period_len, buf_len, + direction == DMA_MEM_TO_DEV ? "to" : "from", + plchan->name); + + txd = pl08x_init_txd(chan, direction, &slave_addr); + if (!txd) + return NULL; + + txd->cyclic = true; + txd->cctl |= PL080_CONTROL_TC_IRQ_EN; + for (tmp = 0; tmp < buf_len; tmp += period_len) { + ret = pl08x_tx_add_sg(txd, direction, slave_addr, + buf_addr + tmp, period_len); + if (ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); + return NULL; } } @@@ -1657,7 -1846,9 +1851,9 @@@ static irqreturn_t pl08x_irq(int irq, v spin_lock(&plchan->vc.lock); tx = plchan->at; - if (tx) { + if (tx && tx->cyclic) { + vchan_cyclic_callback(&tx->vd); + } else if (tx) { plchan->at = NULL; /* * This descriptor is done, release its mux @@@ -1851,6 -2042,7 +2047,7 @@@ static int pl08x_probe(struct amba_devi { struct pl08x_driver_data *pl08x; const struct vendor_data *vd = id->data; + u32 tsfr_size; int ret = 0; int i; @@@ -1878,6 -2070,7 +2075,7 @@@ /* Initialize slave engine */ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); pl08x->slave.dev = &adev->dev; pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; @@@ -1885,6 -2078,7 +2083,7 @@@ pl08x->slave.device_tx_status = pl08x_dma_tx_status; pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; + pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; pl08x->slave.device_control = pl08x_control; /* Get the platform data */ @@@ -1907,9 -2101,15 +2106,15 @@@ pl08x->mem_buses = pl08x->pd->mem_buses; } + if (vd->pl080s) + pl08x->lli_words = PL080S_LLI_WORDS; + else + pl08x->lli_words = PL080_LLI_WORDS; + tsfr_size = MAX_NUM_TSFR_LLIS * pl08x->lli_words * sizeof(u32); + /* A DMA memory pool for LLIs, align on 1-byte boundary */ pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, - PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); + tsfr_size, PL08X_ALIGN, 0); if (!pl08x->pool) { ret = -ENOMEM; goto out_no_lli_pool; @@@ -1952,6 -2152,7 +2157,7 @@@ ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); + ch->reg_config = ch->base + vd->config_offset; spin_lock_init(&ch->lock); /* @@@ -1962,7 -2163,7 +2168,7 @@@ if (vd->nomadik) { u32 val; - val = readl(ch->base + PL080_CH_CONFIG); + val = readl(ch->reg_config); if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) { dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i); ch->locked = true; @@@ -2013,8 -2214,8 +2219,8 @@@ amba_set_drvdata(adev, pl08x); init_pl08x_debugfs(pl08x); - dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", - amba_part(adev), amba_rev(adev), + dev_info(&pl08x->adev->dev, "DMA: PL%03x%s rev%u at 0x%08llx irq %d\n", + amba_part(adev), pl08x->vd->pl080s ? "s" : "", amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); return 0; @@@ -2043,22 -2244,41 +2249,41 @@@ out_no_pl08x /* PL080 has 8 channels and the PL080 have just 2 */ static struct vendor_data vendor_pl080 = { + .config_offset = PL080_CH_CONFIG, .channels = 8, .dualmaster = true, + .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, }; static struct vendor_data vendor_nomadik = { + .config_offset = PL080_CH_CONFIG, .channels = 8, .dualmaster = true, .nomadik = true, + .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, + }; + + static struct vendor_data vendor_pl080s = { + .config_offset = PL080S_CH_CONFIG, + .channels = 8, + .pl080s = true, + .max_transfer_size = PL080S_CONTROL_TRANSFER_SIZE_MASK, }; static struct vendor_data vendor_pl081 = { + .config_offset = PL080_CH_CONFIG, .channels = 2, .dualmaster = false, + .max_transfer_size = PL080_CONTROL_TRANSFER_SIZE_MASK, }; static struct amba_id pl08x_ids[] = { + /* Samsung PL080S variant */ + { + .id = 0x0a141080, + .mask = 0xffffffff, + .data = &vendor_pl080s, + }, /* PL080 */ { .id = 0x00041080, diff --combined drivers/dma/dmaengine.c index eee16b01fa89,d7d94d21a038..9162ac80c18f --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@@ -87,8 -87,7 +87,8 @@@ static struct dma_chan *dev_to_dma_chan return chan_dev->chan; } -static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t memcpy_count_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct dma_chan *chan; unsigned long count = 0; @@@ -107,10 -106,9 +107,10 @@@ return err; } +static DEVICE_ATTR_RO(memcpy_count); -static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t bytes_transferred_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct dma_chan *chan; unsigned long count = 0; @@@ -129,10 -127,8 +129,10 @@@ return err; } +static DEVICE_ATTR_RO(bytes_transferred); -static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct dma_chan *chan; int err; @@@ -147,15 -143,13 +147,15 @@@ return err; } +static DEVICE_ATTR_RO(in_use); -static struct device_attribute dma_attrs[] = { - __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), - __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), - __ATTR(in_use, S_IRUGO, show_in_use, NULL), - __ATTR_NULL +static struct attribute *dma_dev_attrs[] = { + &dev_attr_memcpy_count.attr, + &dev_attr_bytes_transferred.attr, + &dev_attr_in_use.attr, + NULL, }; +ATTRIBUTE_GROUPS(dma_dev); static void chan_dev_release(struct device *dev) { @@@ -173,7 -167,7 +173,7 @@@ static struct class dma_devclass = { .name = "dma", - .dev_attrs = dma_attrs, + .dev_groups = dma_dev_groups, .dev_release = chan_dev_release, }; @@@ -382,30 -376,20 +382,30 @@@ void dma_issue_pending_all(void EXPORT_SYMBOL(dma_issue_pending_all); /** - * nth_chan - returns the nth channel of the given capability + * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - returns the channel with min count and in the same numa-node as the cpu * @cap: capability to match - * @n: nth channel desired + * @cpu: cpu index which the channel should be close to * - * Defaults to returning the channel with the desired capability and the - * lowest reference count when 'n' cannot be satisfied. Must be called - * under dma_list_mutex. + * If some channels are close to the given cpu, the one with the lowest + * reference count is returned. Otherwise, cpu is ignored and only the + * reference count is taken into account. + * Must be called under dma_list_mutex. */ -static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) { struct dma_device *device; struct dma_chan *chan; - struct dma_chan *ret = NULL; struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; list_for_each_entry(device, &dma_device_list, global_node) { if (!dma_has_cap(cap, device->cap_mask) || @@@ -414,22 -398,27 +414,22 @@@ list_for_each_entry(chan, &device->channels, device_node) { if (!chan->client_count) continue; - if (!min) - min = chan; - else if (chan->table_count < min->table_count) + if (!min || chan->table_count < min->table_count) min = chan; - if (n-- == 0) { - ret = chan; - break; /* done */ - } + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; } - if (ret) - break; /* done */ } - if (!ret) - ret = min; + chan = localmin ? localmin : min; - if (ret) - ret->table_count++; + if (chan) + chan->table_count++; - return ret; + return chan; } /** @@@ -446,6 -435,7 +446,6 @@@ static void dma_channel_rebalance(void struct dma_device *device; int cpu; int cap; - int n; /* undo the last distribution */ for_each_dma_cap_mask(cap, dma_cap_mask_all) @@@ -464,9 -454,14 +464,9 @@@ return; /* redistribute available channels */ - n = 0; for_each_dma_cap_mask(cap, dma_cap_mask_all) for_each_online_cpu(cpu) { - if (num_possible_cpus() > 1) - chan = nth_chan(cap, n++); - else - chan = nth_chan(cap, -1); - + chan = min_chan(cap, cpu); per_cpu_ptr(channel_table[cap], cpu)->chan = chan; } } @@@ -509,7 -504,33 +509,33 @@@ static struct dma_chan *private_candida } /** - * dma_request_channel - try to allocate an exclusive channel + * dma_request_slave_channel - try to get specific channel exclusively + * @chan: target channel + */ + struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) + { + int err = -EBUSY; + + /* lock against __dma_request_channel */ + mutex_lock(&dma_list_mutex); + + if (chan->client_count == 0) { + err = dma_chan_get(chan); + if (err) + pr_debug("%s: failed to get %s: (%d)\n", + __func__, dma_chan_name(chan), err); + } else + chan = NULL; + + mutex_unlock(&dma_list_mutex); + + + return chan; + } + EXPORT_SYMBOL_GPL(dma_get_slave_channel); + + /** + * __dma_request_channel - try to allocate an exclusive channel * @mask: capabilities that the channel must satisfy * @fn: optional callback to disposition available channels * @fn_param: opaque parameter to pass to dma_filter_fn diff --combined drivers/dma/mv_xor.c index 0ec086d2b6a0,d9a26777a1b0..536dcb8ba5fd --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@@ -64,7 -64,7 +64,7 @@@ static u32 mv_desc_get_src_addr(struct int src_idx) { struct mv_xor_desc *hw_desc = desc->hw_desc; - return hw_desc->phy_src_addr[src_idx]; + return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)]; } @@@ -107,32 -107,32 +107,32 @@@ static void mv_desc_set_src_addr(struc int index, dma_addr_t addr) { struct mv_xor_desc *hw_desc = desc->hw_desc; - hw_desc->phy_src_addr[index] = addr; + hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; if (desc->type == DMA_XOR) hw_desc->desc_command |= (1 << index); } static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) { - return __raw_readl(XOR_CURR_DESC(chan)); + return readl_relaxed(XOR_CURR_DESC(chan)); } static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, u32 next_desc_addr) { - __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); + writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); } static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) { - u32 val = __raw_readl(XOR_INTR_MASK(chan)); + u32 val = readl_relaxed(XOR_INTR_MASK(chan)); val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); - __raw_writel(val, XOR_INTR_MASK(chan)); + writel_relaxed(val, XOR_INTR_MASK(chan)); } static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) { - u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); + u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; return intr_cause; } @@@ -149,13 -149,13 +149,13 @@@ static void mv_xor_device_clear_eoc_cau { u32 val = ~(1 << (chan->idx * 16)); dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); - __raw_writel(val, XOR_INTR_CAUSE(chan)); + writel_relaxed(val, XOR_INTR_CAUSE(chan)); } static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) { u32 val = 0xFFFF0000 >> (chan->idx * 16); - __raw_writel(val, XOR_INTR_CAUSE(chan)); + writel_relaxed(val, XOR_INTR_CAUSE(chan)); } static int mv_can_chain(struct mv_xor_desc_slot *desc) @@@ -173,7 -173,7 +173,7 @@@ static void mv_set_mode(struct mv_xor_c enum dma_transaction_type type) { u32 op_mode; - u32 config = __raw_readl(XOR_CONFIG(chan)); + u32 config = readl_relaxed(XOR_CONFIG(chan)); switch (type) { case DMA_XOR: @@@ -192,14 -192,7 +192,14 @@@ config &= ~0x7; config |= op_mode; - __raw_writel(config, XOR_CONFIG(chan)); + +#if defined(__BIG_ENDIAN) + config |= XOR_DESCRIPTOR_SWAP; +#else + config &= ~XOR_DESCRIPTOR_SWAP; +#endif + + writel_relaxed(config, XOR_CONFIG(chan)); chan->current_type = type; } @@@ -208,14 -201,14 +208,14 @@@ static void mv_chan_activate(struct mv_ u32 activation; dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); - activation = __raw_readl(XOR_ACTIVATION(chan)); + activation = readl_relaxed(XOR_ACTIVATION(chan)); activation |= 0x1; - __raw_writel(activation, XOR_ACTIVATION(chan)); + writel_relaxed(activation, XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) { - u32 state = __raw_readl(XOR_ACTIVATION(chan)); + u32 state = readl_relaxed(XOR_ACTIVATION(chan)); state = (state >> 4) & 0x3; @@@ -654,7 -647,7 +654,7 @@@ mv_xor_prep_dma_memcpy(struct dma_chan dev_dbg(mv_chan_to_devp(mv_chan), "%s sw_desc %p async_tx %p\n", - __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); + __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); return sw_desc ? &sw_desc->async_tx : NULL; } @@@ -762,22 -755,22 +762,22 @@@ static void mv_dump_xor_regs(struct mv_ { u32 val; - val = __raw_readl(XOR_CONFIG(chan)); + val = readl_relaxed(XOR_CONFIG(chan)); dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); - val = __raw_readl(XOR_ACTIVATION(chan)); + val = readl_relaxed(XOR_ACTIVATION(chan)); dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); - val = __raw_readl(XOR_INTR_CAUSE(chan)); + val = readl_relaxed(XOR_INTR_CAUSE(chan)); dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); - val = __raw_readl(XOR_INTR_MASK(chan)); + val = readl_relaxed(XOR_INTR_MASK(chan)); dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); - val = __raw_readl(XOR_ERROR_CAUSE(chan)); + val = readl_relaxed(XOR_ERROR_CAUSE(chan)); dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); - val = __raw_readl(XOR_ERROR_ADDR(chan)); + val = readl_relaxed(XOR_ERROR_ADDR(chan)); dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); } @@@ -1036,8 -1029,10 +1036,8 @@@ mv_xor_channel_add(struct mv_xor_devic struct dma_device *dma_dev; mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); - if (!mv_chan) { - ret = -ENOMEM; - goto err_free_dma; - } + if (!mv_chan) + return ERR_PTR(-ENOMEM); mv_chan->idx = idx; mv_chan->irq = irq; @@@ -1171,7 -1166,7 +1171,7 @@@ static int mv_xor_probe(struct platform { const struct mbus_dram_target_info *dram; struct mv_xor_device *xordev; - struct mv_xor_platform_data *pdata = pdev->dev.platform_data; + struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); struct resource *res; int i, ret; diff --combined drivers/dma/sh/shdmac.c index 000000000000,3d0472b78f5f..1069e8869f20 mode 000000,100644..100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@@ -1,0 -1,954 +1,954 @@@ + /* + * Renesas SuperH DMA Engine support + * + * base is drivers/dma/flsdma.c + * + * Copyright (C) 2011-2012 Guennadi Liakhovetski + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * - DMA of SuperH does not have Hardware DMA chain mode. + * - MAX DMA size is 16MB. + * + */ + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include "../dmaengine.h" + #include "shdma.h" + + /* DMA register */ + #define SAR 0x00 + #define DAR 0x04 + #define TCR 0x08 + #define CHCR 0x0C + #define DMAOR 0x40 + + #define TEND 0x18 /* USB-DMAC */ + + #define SH_DMAE_DRV_NAME "sh-dma-engine" + + /* Default MEMCPY transfer size = 2^2 = 4 bytes */ + #define LOG2_DEFAULT_XFER_SIZE 2 + #define SH_DMA_SLAVE_NUMBER 256 + #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) + + /* + * Used for write-side mutual exclusion for the global device list, + * read-side synchronization by way of RCU, and per-controller data. + */ + static DEFINE_SPINLOCK(sh_dmae_lock); + static LIST_HEAD(sh_dmae_devices); + + /* + * Different DMAC implementations provide different ways to clear DMA channels: + * (1) none - no CHCLR registers are available + * (2) one CHCLR register per channel - 0 has to be written to it to clear + * channel buffers + * (3) one CHCLR per several channels - 1 has to be written to the bit, + * corresponding to the specific channel to reset it + */ + static void channel_clear(struct sh_dmae_chan *sh_dc) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel + + sh_dc->shdma_chan.id; + u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0; + + __raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset); + } + + static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) + { + __raw_writel(data, sh_dc->base + reg); + } + + static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) + { + return __raw_readl(sh_dc->base + reg); + } + + static u16 dmaor_read(struct sh_dmae_device *shdev) + { + void __iomem *addr = shdev->chan_reg + DMAOR; + + if (shdev->pdata->dmaor_is_32bit) + return __raw_readl(addr); + else + return __raw_readw(addr); + } + + static void dmaor_write(struct sh_dmae_device *shdev, u16 data) + { + void __iomem *addr = shdev->chan_reg + DMAOR; + + if (shdev->pdata->dmaor_is_32bit) + __raw_writel(data, addr); + else + __raw_writew(data, addr); + } + + static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + __raw_writel(data, sh_dc->base + shdev->chcr_offset); + } + + static u32 chcr_read(struct sh_dmae_chan *sh_dc) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); + + return __raw_readl(sh_dc->base + shdev->chcr_offset); + } + + /* + * Reset DMA controller + * + * SH7780 has two DMAOR register + */ + static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) + { + unsigned short dmaor; + unsigned long flags; + + spin_lock_irqsave(&sh_dmae_lock, flags); + + dmaor = dmaor_read(shdev); + dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); + + spin_unlock_irqrestore(&sh_dmae_lock, flags); + } + + static int sh_dmae_rst(struct sh_dmae_device *shdev) + { + unsigned short dmaor; + unsigned long flags; + + spin_lock_irqsave(&sh_dmae_lock, flags); + + dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); + + if (shdev->pdata->chclr_present) { + int i; + for (i = 0; i < shdev->pdata->channel_num; i++) { + struct sh_dmae_chan *sh_chan = shdev->chan[i]; + if (sh_chan) + channel_clear(sh_chan); + } + } + + dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); + + dmaor = dmaor_read(shdev); + + spin_unlock_irqrestore(&sh_dmae_lock, flags); + + if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { + dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); + return -EIO; + } + if (shdev->pdata->dmaor_init & ~dmaor) + dev_warn(shdev->shdma_dev.dma_dev.dev, + "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", + dmaor, shdev->pdata->dmaor_init); + return 0; + } + + static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) + { + u32 chcr = chcr_read(sh_chan); + + if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) + return true; /* working */ + + return false; /* waiting */ + } + + static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + const struct sh_dmae_pdata *pdata = shdev->pdata; + int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | + ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); + + if (cnt >= pdata->ts_shift_num) + cnt = 0; + + return pdata->ts_shift[cnt]; + } + + static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + const struct sh_dmae_pdata *pdata = shdev->pdata; + int i; + + for (i = 0; i < pdata->ts_shift_num; i++) + if (pdata->ts_shift[i] == l2size) + break; + + if (i == pdata->ts_shift_num) + i = 0; + + return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | + ((i << pdata->ts_high_shift) & pdata->ts_high_mask); + } + + static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) + { + sh_dmae_writel(sh_chan, hw->sar, SAR); + sh_dmae_writel(sh_chan, hw->dar, DAR); + sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); + } + + static void dmae_start(struct sh_dmae_chan *sh_chan) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + u32 chcr = chcr_read(sh_chan); + + if (shdev->pdata->needs_tend_set) + sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); + + chcr |= CHCR_DE | shdev->chcr_ie_bit; + chcr_write(sh_chan, chcr & ~CHCR_TE); + } + + static void dmae_init(struct sh_dmae_chan *sh_chan) + { + /* + * Default configuration for dual address memory-memory transfer. + * 0x400 represents auto-request. + */ + u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, + LOG2_DEFAULT_XFER_SIZE); + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); + chcr_write(sh_chan, chcr); + } + + static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) + { + /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ + if (dmae_is_busy(sh_chan)) + return -EBUSY; + + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); + chcr_write(sh_chan, val); + + return 0; + } + + static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + const struct sh_dmae_pdata *pdata = shdev->pdata; + const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; + void __iomem *addr = shdev->dmars; + unsigned int shift = chan_pdata->dmars_bit; + + if (dmae_is_busy(sh_chan)) + return -EBUSY; + + if (pdata->no_dmars) + return 0; + + /* in the case of a missing DMARS resource use first memory window */ + if (!addr) + addr = shdev->chan_reg; + addr += chan_pdata->dmars; + + __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), + addr); + + return 0; + } + + static void sh_dmae_start_xfer(struct shdma_chan *schan, + struct shdma_desc *sdesc) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", + sdesc->async_tx.cookie, sh_chan->shdma_chan.id, + sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); + /* Get the ld start address from ld_queue */ + dmae_set_reg(sh_chan, &sh_desc->hw); + dmae_start(sh_chan); + } + + static bool sh_dmae_channel_busy(struct shdma_chan *schan) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + return dmae_is_busy(sh_chan); + } + + static void sh_dmae_setup_xfer(struct shdma_chan *schan, + int slave_id) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + + if (slave_id >= 0) { + const struct sh_dmae_slave_config *cfg = + sh_chan->config; + + dmae_set_dmars(sh_chan, cfg->mid_rid); + dmae_set_chcr(sh_chan, cfg->chcr); + } else { + dmae_init(sh_chan); + } + } + + /* + * Find a slave channel configuration from the contoller list by either a slave + * ID in the non-DT case, or by a MID/RID value in the DT case + */ + static const struct sh_dmae_slave_config *dmae_find_slave( + struct sh_dmae_chan *sh_chan, int match) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + const struct sh_dmae_pdata *pdata = shdev->pdata; + const struct sh_dmae_slave_config *cfg; + int i; + + if (!sh_chan->shdma_chan.dev->of_node) { + if (match >= SH_DMA_SLAVE_NUMBER) + return NULL; + + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) + if (cfg->slave_id == match) + return cfg; + } else { + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) + if (cfg->mid_rid == match) { + sh_chan->shdma_chan.slave_id = i; + return cfg; + } + } + + return NULL; + } + + static int sh_dmae_set_slave(struct shdma_chan *schan, + int slave_id, dma_addr_t slave_addr, bool try) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); + if (!cfg) + return -ENXIO; + + if (!try) { + sh_chan->config = cfg; + sh_chan->slave_addr = slave_addr ? : cfg->addr; + } + + return 0; + } + + static void dmae_halt(struct sh_dmae_chan *sh_chan) + { + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); + u32 chcr = chcr_read(sh_chan); + + chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); + chcr_write(sh_chan, chcr); + } + + static int sh_dmae_desc_setup(struct shdma_chan *schan, + struct shdma_desc *sdesc, + dma_addr_t src, dma_addr_t dst, size_t *len) + { + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + + if (*len > schan->max_xfer_len) + *len = schan->max_xfer_len; + + sh_desc->hw.sar = src; + sh_desc->hw.dar = dst; + sh_desc->hw.tcr = *len; + + return 0; + } + + static void sh_dmae_halt(struct shdma_chan *schan) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + dmae_halt(sh_chan); + } + + static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + + if (!(chcr_read(sh_chan) & CHCR_TE)) + return false; + + /* DMA stop */ + dmae_halt(sh_chan); + + return true; + } + + static size_t sh_dmae_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) + { + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); - return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << - sh_chan->xmit_shift; ++ return sh_desc->hw.tcr - ++ (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); + } + + /* Called from error IRQ or NMI */ + static bool sh_dmae_reset(struct sh_dmae_device *shdev) + { + bool ret; + + /* halt the dma controller */ + sh_dmae_ctl_stop(shdev); + + /* We cannot detect, which channel caused the error, have to reset all */ + ret = shdma_reset(&shdev->shdma_dev); + + sh_dmae_rst(shdev); + + return ret; + } + + static irqreturn_t sh_dmae_err(int irq, void *data) + { + struct sh_dmae_device *shdev = data; + + if (!(dmaor_read(shdev) & DMAOR_AE)) + return IRQ_NONE; + + sh_dmae_reset(shdev); + return IRQ_HANDLED; + } + + static bool sh_dmae_desc_completed(struct shdma_chan *schan, + struct shdma_desc *sdesc) + { + struct sh_dmae_chan *sh_chan = container_of(schan, + struct sh_dmae_chan, shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + u32 sar_buf = sh_dmae_readl(sh_chan, SAR); + u32 dar_buf = sh_dmae_readl(sh_chan, DAR); + + return (sdesc->direction == DMA_DEV_TO_MEM && + (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || + (sdesc->direction != DMA_DEV_TO_MEM && + (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); + } + + static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) + { + /* Fast path out if NMIF is not asserted for this controller */ + if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) + return false; + + return sh_dmae_reset(shdev); + } + + static int sh_dmae_nmi_handler(struct notifier_block *self, + unsigned long cmd, void *data) + { + struct sh_dmae_device *shdev; + int ret = NOTIFY_DONE; + bool triggered; + + /* + * Only concern ourselves with NMI events. + * + * Normally we would check the die chain value, but as this needs + * to be architecture independent, check for NMI context instead. + */ + if (!in_nmi()) + return NOTIFY_DONE; + + rcu_read_lock(); + list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { + /* + * Only stop if one of the controllers has NMIF asserted, + * we do not want to interfere with regular address error + * handling or NMI events that don't concern the DMACs. + */ + triggered = sh_dmae_nmi_notify(shdev); + if (triggered == true) + ret = NOTIFY_OK; + } + rcu_read_unlock(); + + return ret; + } + + static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { + .notifier_call = sh_dmae_nmi_handler, + + /* Run before NMI debug handler and KGDB */ + .priority = 1, + }; + + static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, + int irq, unsigned long flags) + { + const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; + struct shdma_dev *sdev = &shdev->shdma_dev; + struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); + struct sh_dmae_chan *sh_chan; + struct shdma_chan *schan; + int err; + + sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan), + GFP_KERNEL); + if (!sh_chan) { + dev_err(sdev->dma_dev.dev, + "No free memory for allocating dma channels!\n"); + return -ENOMEM; + } + + schan = &sh_chan->shdma_chan; + schan->max_xfer_len = SH_DMA_TCR_MAX + 1; + + shdma_chan_probe(sdev, schan, id); + + sh_chan->base = shdev->chan_reg + chan_pdata->offset; + + /* set up channel irq */ + if (pdev->id >= 0) + snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), + "sh-dmae%d.%d", pdev->id, id); + else + snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), + "sh-dma%d", id); + + err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); + if (err) { + dev_err(sdev->dma_dev.dev, + "DMA channel %d request_irq error %d\n", + id, err); + goto err_no_irq; + } + + shdev->chan[id] = sh_chan; + return 0; + + err_no_irq: + /* remove from dmaengine device node */ + shdma_chan_remove(schan); + return err; + } + + static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) + { + struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; + struct shdma_chan *schan; + int i; + + shdma_for_each_chan(schan, &shdev->shdma_dev, i) { + BUG_ON(!schan); + + shdma_chan_remove(schan); + } + dma_dev->chancnt = 0; + } + + static void sh_dmae_shutdown(struct platform_device *pdev) + { + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + sh_dmae_ctl_stop(shdev); + } + + static int sh_dmae_runtime_suspend(struct device *dev) + { + return 0; + } + + static int sh_dmae_runtime_resume(struct device *dev) + { + struct sh_dmae_device *shdev = dev_get_drvdata(dev); + + return sh_dmae_rst(shdev); + } + + #ifdef CONFIG_PM + static int sh_dmae_suspend(struct device *dev) + { + return 0; + } + + static int sh_dmae_resume(struct device *dev) + { + struct sh_dmae_device *shdev = dev_get_drvdata(dev); + int i, ret; + + ret = sh_dmae_rst(shdev); + if (ret < 0) + dev_err(dev, "Failed to reset!\n"); + + for (i = 0; i < shdev->pdata->channel_num; i++) { + struct sh_dmae_chan *sh_chan = shdev->chan[i]; + + if (!sh_chan->shdma_chan.desc_num) + continue; + + if (sh_chan->shdma_chan.slave_id >= 0) { + const struct sh_dmae_slave_config *cfg = sh_chan->config; + dmae_set_dmars(sh_chan, cfg->mid_rid); + dmae_set_chcr(sh_chan, cfg->chcr); + } else { + dmae_init(sh_chan); + } + } + + return 0; + } + #else + #define sh_dmae_suspend NULL + #define sh_dmae_resume NULL + #endif + + const struct dev_pm_ops sh_dmae_pm = { + .suspend = sh_dmae_suspend, + .resume = sh_dmae_resume, + .runtime_suspend = sh_dmae_runtime_suspend, + .runtime_resume = sh_dmae_runtime_resume, + }; + + static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) + { + struct sh_dmae_chan *sh_chan = container_of(schan, + struct sh_dmae_chan, shdma_chan); + + /* + * Implicit BUG_ON(!sh_chan->config) + * This is an exclusive slave DMA operation, may only be called after a + * successful slave configuration. + */ + return sh_chan->slave_addr; + } + + static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) + { + return &((struct sh_dmae_desc *)buf)[i].shdma_desc; + } + + static const struct shdma_ops sh_dmae_shdma_ops = { + .desc_completed = sh_dmae_desc_completed, + .halt_channel = sh_dmae_halt, + .channel_busy = sh_dmae_channel_busy, + .slave_addr = sh_dmae_slave_addr, + .desc_setup = sh_dmae_desc_setup, + .set_slave = sh_dmae_set_slave, + .setup_xfer = sh_dmae_setup_xfer, + .start_xfer = sh_dmae_start_xfer, + .embedded_desc = sh_dmae_embedded_desc, + .chan_irq = sh_dmae_chan_irq, + .get_partial = sh_dmae_get_partial, + }; + + static const struct of_device_id sh_dmae_of_match[] = { + {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, + {} + }; + MODULE_DEVICE_TABLE(of, sh_dmae_of_match); + + static int sh_dmae_probe(struct platform_device *pdev) + { + const struct sh_dmae_pdata *pdata; + unsigned long irqflags = IRQF_DISABLED, + chan_flag[SH_DMAE_MAX_CHANNELS] = {}; + int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; + int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; + struct sh_dmae_device *shdev; + struct dma_device *dma_dev; + struct resource *chan, *dmars, *errirq_res, *chanirq_res; + + if (pdev->dev.of_node) + pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; + else + pdata = dev_get_platdata(&pdev->dev); + + /* get platform data */ + if (!pdata || !pdata->channel_num) + return -ENODEV; + + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* DMARS area is optional */ + dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); + /* + * IRQ resources: + * 1. there always must be at least one IRQ IO-resource. On SH4 it is + * the error IRQ, in which case it is the only IRQ in this resource: + * start == end. If it is the only IRQ resource, all channels also + * use the same IRQ. + * 2. DMA channel IRQ resources can be specified one per resource or in + * ranges (start != end) + * 3. iff all events (channels and, optionally, error) on this + * controller use the same IRQ, only one IRQ resource can be + * specified, otherwise there must be one IRQ per channel, even if + * some of them are equal + * 4. if all IRQs on this controller are equal or if some specific IRQs + * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be + * requested with the IRQF_SHARED flag + */ + errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!chan || !errirq_res) + return -ENODEV; + + shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device), + GFP_KERNEL); + if (!shdev) { + dev_err(&pdev->dev, "Not enough memory\n"); + return -ENOMEM; + } + + dma_dev = &shdev->shdma_dev.dma_dev; + + shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan); + if (IS_ERR(shdev->chan_reg)) + return PTR_ERR(shdev->chan_reg); + if (dmars) { + shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars); + if (IS_ERR(shdev->dmars)) + return PTR_ERR(shdev->dmars); + } + + if (!pdata->slave_only) + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + if (pdata->slave && pdata->slave_num) + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + + /* Default transfer size of 32 bytes requires 32-byte alignment */ + dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; + + shdev->shdma_dev.ops = &sh_dmae_shdma_ops; + shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); + err = shdma_init(&pdev->dev, &shdev->shdma_dev, + pdata->channel_num); + if (err < 0) + goto eshdma; + + /* platform data */ + shdev->pdata = pdata; + + if (pdata->chcr_offset) + shdev->chcr_offset = pdata->chcr_offset; + else + shdev->chcr_offset = CHCR; + + if (pdata->chcr_ie_bit) + shdev->chcr_ie_bit = pdata->chcr_ie_bit; + else + shdev->chcr_ie_bit = CHCR_IE; + + platform_set_drvdata(pdev, shdev); + + pm_runtime_enable(&pdev->dev); + err = pm_runtime_get_sync(&pdev->dev); + if (err < 0) + dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); + + spin_lock_irq(&sh_dmae_lock); + list_add_tail_rcu(&shdev->node, &sh_dmae_devices); + spin_unlock_irq(&sh_dmae_lock); + + /* reset dma controller - only needed as a test */ + err = sh_dmae_rst(shdev); + if (err) + goto rst_err; + + #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) + chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + + if (!chanirq_res) + chanirq_res = errirq_res; + else + irqres++; + + if (chanirq_res == errirq_res || + (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) + irqflags = IRQF_SHARED; + + errirq = errirq_res->start; + + err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err, irqflags, + "DMAC Address Error", shdev); + if (err) { + dev_err(&pdev->dev, + "DMA failed requesting irq #%d, error %d\n", + errirq, err); + goto eirq_err; + } + + #else + chanirq_res = errirq_res; + #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ + + if (chanirq_res->start == chanirq_res->end && + !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { + /* Special case - all multiplexed */ + for (; irq_cnt < pdata->channel_num; irq_cnt++) { + if (irq_cnt < SH_DMAE_MAX_CHANNELS) { + chan_irq[irq_cnt] = chanirq_res->start; + chan_flag[irq_cnt] = IRQF_SHARED; + } else { + irq_cap = 1; + break; + } + } + } else { + do { + for (i = chanirq_res->start; i <= chanirq_res->end; i++) { + if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { + irq_cap = 1; + break; + } + + if ((errirq_res->flags & IORESOURCE_BITS) == + IORESOURCE_IRQ_SHAREABLE) + chan_flag[irq_cnt] = IRQF_SHARED; + else + chan_flag[irq_cnt] = IRQF_DISABLED; + dev_dbg(&pdev->dev, + "Found IRQ %d for channel %d\n", + i, irq_cnt); + chan_irq[irq_cnt++] = i; + } + + if (irq_cnt >= SH_DMAE_MAX_CHANNELS) + break; + + chanirq_res = platform_get_resource(pdev, + IORESOURCE_IRQ, ++irqres); + } while (irq_cnt < pdata->channel_num && chanirq_res); + } + + /* Create DMA Channel */ + for (i = 0; i < irq_cnt; i++) { + err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); + if (err) + goto chan_probe_err; + } + + if (irq_cap) + dev_notice(&pdev->dev, "Attempting to register %d DMA " + "channels when a maximum of %d are supported.\n", + pdata->channel_num, SH_DMAE_MAX_CHANNELS); + + pm_runtime_put(&pdev->dev); + + err = dma_async_device_register(&shdev->shdma_dev.dma_dev); + if (err < 0) + goto edmadevreg; + + return err; + + edmadevreg: + pm_runtime_get(&pdev->dev); + + chan_probe_err: + sh_dmae_chan_remove(shdev); + + #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) + eirq_err: + #endif + rst_err: + spin_lock_irq(&sh_dmae_lock); + list_del_rcu(&shdev->node); + spin_unlock_irq(&sh_dmae_lock); + + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + shdma_cleanup(&shdev->shdma_dev); + eshdma: + synchronize_rcu(); + + return err; + } + + static int sh_dmae_remove(struct platform_device *pdev) + { + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; + + dma_async_device_unregister(dma_dev); + + spin_lock_irq(&sh_dmae_lock); + list_del_rcu(&shdev->node); + spin_unlock_irq(&sh_dmae_lock); + + pm_runtime_disable(&pdev->dev); + + sh_dmae_chan_remove(shdev); + shdma_cleanup(&shdev->shdma_dev); + + synchronize_rcu(); + + return 0; + } + + static struct platform_driver sh_dmae_driver = { + .driver = { + .owner = THIS_MODULE, + .pm = &sh_dmae_pm, + .name = SH_DMAE_DRV_NAME, + .of_match_table = sh_dmae_of_match, + }, + .remove = sh_dmae_remove, + .shutdown = sh_dmae_shutdown, + }; + + static int __init sh_dmae_init(void) + { + /* Wire up NMI handling */ + int err = register_die_notifier(&sh_dmae_nmi_notifier); + if (err) + return err; + + return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); + } + module_init(sh_dmae_init); + + static void __exit sh_dmae_exit(void) + { + platform_driver_unregister(&sh_dmae_driver); + + unregister_die_notifier(&sh_dmae_nmi_notifier); + } + module_exit(sh_dmae_exit); + + MODULE_AUTHOR("Nobuhiro Iwamatsu "); + MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); + MODULE_LICENSE("GPL"); + MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME); diff --combined include/linux/dmaengine.h index 0c72b89a172c,2601186ab183..0bc727534108 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@@ -38,10 -38,7 +38,10 @@@ typedef s32 dma_cookie_t #define DMA_MIN_COOKIE 1 #define DMA_MAX_COOKIE INT_MAX -#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) +static inline int dma_submit_error(dma_cookie_t cookie) +{ + return cookie < 0 ? cookie : 0; +} /** * enum dma_status - DMA transaction status @@@ -373,6 -370,25 +373,25 @@@ struct dma_slave_config unsigned int slave_id; }; + /* struct dma_slave_caps - expose capabilities of a slave channel only + * + * @src_addr_widths: bit mask of src addr widths the channel supports + * @dstn_addr_widths: bit mask of dstn addr widths the channel supports + * @directions: bit mask of slave direction the channel supported + * since the enum dma_transfer_direction is not defined as bits for each + * type of direction, the dma controller should fill (1 << ) and same + * should be checked by controller as well + * @cmd_pause: true, if pause and thereby resume is supported + * @cmd_terminate: true, if terminate cmd is supported + */ + struct dma_slave_caps { + u32 src_addr_widths; + u32 dstn_addr_widths; + u32 directions; + bool cmd_pause; + bool cmd_terminate; + }; + static inline const char *dma_chan_name(struct dma_chan *chan) { return dev_name(&chan->dev->device); @@@ -535,6 -551,7 +554,7 @@@ struct dma_tx_state * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware + * @device_slave_caps: return the slave channel capabilities */ struct dma_device { @@@ -600,6 -617,7 +620,7 @@@ dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); + int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); }; static inline int dmaengine_device_control(struct dma_chan *chan, @@@ -673,6 -691,21 +694,21 @@@ static inline struct dma_async_tx_descr return chan->device->device_prep_interleaved_dma(chan, xt, flags); } + static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) + { + if (!chan || !caps) + return -EINVAL; + + /* check if the channel supports slave transactions */ + if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits)) + return -ENXIO; + + if (chan->device->device_slave_caps) + return chan->device->device_slave_caps(chan, caps); + + return -ENXIO; + } + static inline int dmaengine_terminate_all(struct dma_chan *chan) { return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); @@@ -961,9 -994,8 +997,9 @@@ dma_set_tx_state(struct dma_tx_state *s } } -enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); #ifdef CONFIG_DMA_ENGINE +struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); +enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); void dma_issue_pending_all(void); struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, @@@ -971,14 -1003,6 +1007,14 @@@ struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); #else +static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) +{ + return NULL; +} +static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) +{ + return DMA_SUCCESS; +} static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { return DMA_SUCCESS; @@@ -1006,6 -1030,8 +1042,7 @@@ static inline void dma_release_channel( int dma_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); + struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); struct dma_chan *net_dma_find_channel(void); #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_slave_channel_compat(mask, x, y, dev, name) \