]> Git Repo - linux.git/commitdiff
Merge branches 'apple/dart', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'x86/vt-d'...
authorJoerg Roedel <[email protected]>
Sat, 18 Feb 2023 14:43:04 +0000 (15:43 +0100)
committerJoerg Roedel <[email protected]>
Sat, 18 Feb 2023 14:43:04 +0000 (15:43 +0100)
1  2  3  4  5  6  7  8 
MAINTAINERS
drivers/iommu/amd/iommu.c
drivers/iommu/apple-dart.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/intel/pasid.c
drivers/iommu/iommu.c
drivers/iommu/mtk_iommu_v1.c
drivers/of/address.c
drivers/vfio/vfio_iommu_type1.c

diff --combined MAINTAINERS
index 39ff1a71762527001e86bda7ff290564c3002389,a36df9ed283d394e70c83a5acba9a12527bf4bde,a36df9ed283d394e70c83a5acba9a12527bf4bde,a36df9ed283d394e70c83a5acba9a12527bf4bde,a36df9ed283d394e70c83a5acba9a12527bf4bde,43fd97f0e1dfd28f92a214ff52a9318f1a552645,a36df9ed283d394e70c83a5acba9a12527bf4bde,a36df9ed283d394e70c83a5acba9a12527bf4bde..a265f634e41f596cfc6827fdc50fbef098c2fb49
@@@@@@@@@ -383,7 -383,7 -383,7 -383,7 -383,7 -383,7 -383,7 -383,7 +383,7 @@@@@@@@@ ACPI COMPONENT ARCHITECTURE (ACPICA
        M:      Robert Moore <[email protected]>
        M:      "Rafael J. Wysocki" <[email protected]>
        L:      [email protected]
 -------L:      devel@acpica.org
 +++++++L:      [email protected].org
        S:      Supported
        W:      https://acpica.org/
        W:      https://github.com/acpica/acpica/
        F:      drivers/dma/ptdma/
        
        AMD SEATTLE DEVICE TREE SUPPORT
 -------M:      Brijesh Singh <[email protected]>
        M:      Suravee Suthikulpanit <[email protected]>
        M:      Tom Lendacky <[email protected]>
        S:      Supported
        F:      arch/arm64/boot/dts/amd/
        
        AMD XGBE DRIVER
 -------M:      Tom Lendacky <[email protected]>
        M:      "Shyam Sundar S K" <[email protected]>
        L:      [email protected]
        S:      Supported
        S:      Maintained
        T:      git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
        X:      drivers/media/i2c/
 +++++++F:      arch/arm64/boot/dts/freescale/
 +++++++X:      arch/arm64/boot/dts/freescale/fsl-*
 +++++++X:      arch/arm64/boot/dts/freescale/qoriq-*
        N:      imx
        N:      mxs
        
@@@@@@@@@ -2452,14 -2451,11 -2451,11 -2451,11 -2451,11 -2451,11 -2451,11 -2451,11 +2452,14 @@@@@@@@@ F: drivers/rtc/rtc-mt7622.
        
        ARM/Mediatek SoC support
        M:      Matthias Brugger <[email protected]>
 +++++++R:      AngeloGioacchino Del Regno <[email protected]>
 +++++++L:      [email protected]
        L:      [email protected] (moderated for non-subscribers)
        L:      [email protected] (moderated for non-subscribers)
        S:      Maintained
        W:      https://mtk.wiki.kernel.org/
 -------C:      irc://chat.freenode.net/linux-mediatek
 +++++++C:      irc://irc.libera.chat/linux-mediatek
 +++++++F:      arch/arm/boot/dts/mt2*
        F:      arch/arm/boot/dts/mt6*
        F:      arch/arm/boot/dts/mt7*
        F:      arch/arm/boot/dts/mt8*
@@@@@@@@@ -2467,7 -2463,7 -2463,7 -2463,7 -2463,7 -2463,7 -2463,7 -2463,7 +2467,7 @@@@@@@@@ F:  arch/arm/mach-mediatek
        F:      arch/arm64/boot/dts/mediatek/
        F:      drivers/soc/mediatek/
        N:      mtk
 -------N:      mt[678]
 +++++++N:      mt[2678]
        K:      mediatek
        
        ARM/Mediatek USB3 PHY DRIVER
@@@@@@@@@ -3771,6 -3767,7 -3767,7 -3767,7 -3767,7 -3767,7 -3767,7 -3767,7 +3771,6 @@@@@@@@@ F:  net/bluetooth
        
        BONDING DRIVER
        M:      Jay Vosburgh <[email protected]>
 -------M:      Veaceslav Falico <[email protected]>
        M:      Andy Gospodarek <[email protected]>
        L:      [email protected]
        S:      Supported
@@@@@@@@@ -6951,7 -6948,7 -6948,7 -6948,7 -6948,7 -6948,7 -6948,7 -6948,7 +6951,7 @@@@@@@@@ F:  drivers/gpu/drm/atmel-hlcdc
        DRM DRIVERS FOR BRIDGE CHIPS
        M:      Andrzej Hajda <[email protected]>
        M:      Neil Armstrong <[email protected]>
 -------M:      Robert Foss <robert.foss@linaro.org>
 +++++++M:      Robert Foss <rfoss@kernel.org>
        R:      Laurent Pinchart <[email protected]>
        R:      Jonas Karlman <[email protected]>
        R:      Jernej Skrabec <[email protected]>
@@@@@@@@@ -7619,6 -7616,7 -7616,7 -7616,7 -7616,7 -7616,7 -7616,7 -7616,7 +7619,6 @@@@@@@@@ S:  Maintaine
        F:      drivers/firmware/efi/test/
        
        EFI VARIABLE FILESYSTEM
 -------M:      Matthew Garrett <[email protected]>
        M:      Jeremy Kerr <[email protected]>
        M:      Ard Biesheuvel <[email protected]>
        L:      [email protected]
@@@@@@@@@ -7897,11 -7895,7 -7895,7 -7895,7 -7895,7 -7895,7 -7895,7 -7895,7 +7897,11 @@@@@@@@@ F:        include/linux/extcon
        
        EXTRA BOOT CONFIG
        M:      Masami Hiramatsu <[email protected]>
 +++++++L:      [email protected]
 +++++++L:      [email protected]
 +++++++Q:      https://patchwork.kernel.org/project/linux-trace-kernel/list/
        S:      Maintained
 +++++++T:      git git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git
        F:      Documentation/admin-guide/bootconfig.rst
        F:      fs/proc/bootconfig.c
        F:      include/linux/bootconfig.h
        F:      include/linux/fscache*.h
        
        FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
 +++++++M:      Eric Biggers <[email protected]>
        M:      Theodore Y. Ts'o <[email protected]>
        M:      Jaegeuk Kim <[email protected]>
 -------M:      Eric Biggers <[email protected]>
        L:      [email protected]
        S:      Supported
        Q:      https://patchwork.kernel.org/project/linux-fscrypt/list/
 -------T:      git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git
 +++++++T:      git https://git.kernel.org/pub/scm/fs/fscrypt/linux.git
        F:      Documentation/filesystems/fscrypt.rst
        F:      fs/crypto/
 -------F:      include/linux/fscrypt*.h
 +++++++F:      include/linux/fscrypt.h
        F:      include/uapi/linux/fscrypt.h
        
        FSI SUBSYSTEM
@@@@@@@@@ -8526,10 -8520,10 -8520,10 -8520,10 -8520,10 -8520,10 -8520,10 -8520,10 +8526,10 @@@@@@@@@ F: include/linux/fsnotify*.
        FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION
        M:      Eric Biggers <[email protected]>
        M:      Theodore Y. Ts'o <[email protected]>
 -------L:      [email protected]
 +++++++L:      [email protected]
        S:      Supported
 -------Q:      https://patchwork.kernel.org/project/linux-fscrypt/list/
 -------T:      git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity
 +++++++Q:      https://patchwork.kernel.org/project/fsverity/list/
 +++++++T:      git https://git.kernel.org/pub/scm/fs/fsverity/linux.git
        F:      Documentation/filesystems/fsverity.rst
        F:      fs/verity/
        F:      include/linux/fsverity.h
@@@@@@@@@ -8577,7 -8571,6 -8571,6 -8571,6 -8571,6 -8571,6 -8571,6 -8571,6 +8577,7 @@@@@@@@@ F:  kernel/trace/fgraph.
        F:      arch/*/*/*/*ftrace*
        F:      arch/*/*/*ftrace*
        F:      include/*/ftrace.h
 +++++++F:      samples/ftrace
        
        FUNGIBLE ETHERNET DRIVERS
        M:      Dimitris Michailidis <[email protected]>
@@@@@@@@@ -9306,7 -9299,7 -9299,7 -9299,7 -9299,7 -9299,7 -9299,7 -9299,7 +9306,7 @@@@@@@@@ F:  net/dsa/tag_hellcreek.
        
        HISILICON DMA DRIVER
        M:      Zhou Wang <[email protected]>
 -------M:      Jie Hai <haijie1@hisilicon.com>
 +++++++M:      Jie Hai <haijie1@huawei.com>
        L:      [email protected]
        S:      Maintained
        F:      drivers/dma/hisi_dma.c
        S:      Supported
        T:      git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
        F:      drivers/iommu/intel/
----- --F:      include/linux/intel-svm.h
        
        INTEL IPU3 CSI-2 CIO2 DRIVER
        M:      Yong Zhi <[email protected]>
        KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
        M:      Marc Zyngier <[email protected]>
        R:      James Morse <[email protected]>
 -------R:      Alexandru Elisei <[email protected]>
        R:      Suzuki K Poulose <[email protected]>
        R:      Oliver Upton <[email protected]>
 +++++++R:      Zenghui Yu <[email protected]>
        L:      [email protected] (moderated for non-subscribers)
        L:      [email protected]
        L:      [email protected] (deprecated, moderated for non-subscribers)
@@@@@@@@@ -13627,7 -13620,7 -13620,7 -13620,7 -13620,7 -13619,7 -13620,7 -13620,7 +13626,7 @@@@@@@@@ F: arch/microblaze
        
        MICROCHIP AT91 DMA DRIVERS
        M:      Ludovic Desroches <[email protected]>
 -------M:      Tudor Ambarus <tudor.ambarus@microchip.com>
 +++++++M:      Tudor Ambarus <tudor.ambarus@linaro.org>
        L:      [email protected] (moderated for non-subscribers)
        L:      [email protected]
        S:      Supported
@@@@@@@@@ -13672,7 -13665,7 -13665,7 -13665,7 -13665,7 -13664,7 -13665,7 -13665,7 +13671,7 @@@@@@@@@ F: Documentation/devicetree/bindings/me
        F:      drivers/media/platform/microchip/microchip-csi2dc.c
        
        MICROCHIP ECC DRIVER
 -------M:      Tudor Ambarus <tudor.ambarus@microchip.com>
 +++++++M:      Tudor Ambarus <tudor.ambarus@linaro.org>
        L:      [email protected]
        S:      Maintained
        F:      drivers/crypto/atmel-ecc.*
        F:      drivers/mmc/host/atmel-mci.c
        
        MICROCHIP NAND DRIVER
 -------M:      Tudor Ambarus <tudor.ambarus@microchip.com>
 +++++++M:      Tudor Ambarus <tudor.ambarus@linaro.org>
        L:      [email protected]
        S:      Supported
        F:      Documentation/devicetree/bindings/mtd/atmel-nand.txt
        F:      drivers/power/reset/at91-sama5d2_shdwc.c
        
        MICROCHIP SPI DRIVER
 -------M:      Tudor Ambarus <tudor.ambarus@microchip.com>
 +++++++M:      Tudor Ambarus <tudor.ambarus@linaro.org>
        S:      Supported
        F:      drivers/spi/spi-atmel.*
        
@@@@@@@@@ -14608,6 -14601,7 -14601,7 -14601,7 -14601,7 -14600,7 -14601,7 -14601,7 +14607,6 @@@@@@@@@ F: tools/testing/selftests/net/ipsec.
        
        NETWORKING [IPv4/IPv6]
        M:      "David S. Miller" <[email protected]>
 -------M:      Hideaki YOSHIFUJI <[email protected]>
        M:      David Ahern <[email protected]>
        L:      [email protected]
        S:      Maintained
@@@@@@@@@ -14640,6 -14634,7 -14634,7 -14634,7 -14634,7 -14633,7 -14634,7 -14634,7 +14639,6 @@@@@@@@@ F: net/netfilter/xt_SECMARK.
        F:      net/netlabel/
        
        NETWORKING [MPTCP]
 -------M:      Mat Martineau <[email protected]>
        M:      Matthieu Baerts <[email protected]>
        L:      [email protected]
        L:      [email protected]
@@@@@@@@@ -14924,8 -14919,7 -14919,7 -14919,7 -14919,7 -14918,7 -14919,7 -14919,7 +14923,8 @@@@@@@@@ T: git://git.infradead.org/nvme.gi
        F:      Documentation/nvme/
        F:      drivers/nvme/host/
        F:      drivers/nvme/common/
 -------F:      include/linux/nvme*
 +++++++F:      include/linux/nvme.h
 +++++++F:      include/linux/nvme-*.h
        F:      include/uapi/linux/nvme_ioctl.h
        
        NVM EXPRESS FABRICS AUTHENTICATION
@@@@@@@@@ -15664,7 -15658,7 -15658,7 -15658,7 -15658,7 -15657,7 -15658,7 -15658,7 +15663,7 @@@@@@@@@ OPENRISC ARCHITECTUR
        M:      Jonas Bonn <[email protected]>
        M:      Stefan Kristiansson <[email protected]>
        M:      Stafford Horne <[email protected]>
 -------L:      [email protected].org
 +++++++L:      [email protected].org
        S:      Maintained
        W:      http://openrisc.io
        T:      git https://github.com/openrisc/linux.git
@@@@@@@@@ -15755,12 -15749,6 -15749,6 -15749,6 -15749,6 -15748,6 -15749,6 -15749,6 +15754,12 @@@@@@@@@ S:       Maintaine
        W:      https://wireless.wiki.kernel.org/en/users/Drivers/p54
        F:      drivers/net/wireless/intersil/p54/
        
 +++++++PACKET SOCKETS
 +++++++M:      Willem de Bruijn <[email protected]>
 +++++++S:      Maintained
 +++++++F:      include/uapi/linux/if_packet.h
 +++++++F:      net/packet/af_packet.c
 +++++++
        PACKING
        M:      Vladimir Oltean <[email protected]>
        L:      [email protected]
@@@@@@@@@ -16120,7 -16108,7 -16108,7 -16108,7 -16108,7 -16107,7 -16108,7 -16108,7 +16119,7 @@@@@@@@@ F: drivers/pci/controller/pci-v3-semi.
        
        PCI ENDPOINT SUBSYSTEM
        M:      Lorenzo Pieralisi <[email protected]>
 -------R:      Krzysztof WilczyÅ„ski <[email protected]>
 +++++++M:      Krzysztof WilczyÅ„ski <[email protected]>
        R:      Manivannan Sadhasivam <[email protected]>
        R:      Kishon Vijay Abraham I <[email protected]>
        L:      [email protected]
        Q:      https://patchwork.kernel.org/project/linux-pci/list/
        B:      https://bugzilla.kernel.org
        C:      irc://irc.oftc.net/linux-pci
 -------T:      git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
 +++++++T:      git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
        F:      Documentation/PCI/endpoint/*
        F:      Documentation/misc-devices/pci-endpoint-test.rst
        F:      drivers/misc/pci_endpoint_test.c
        Q:      https://patchwork.kernel.org/project/linux-pci/list/
        B:      https://bugzilla.kernel.org
        C:      irc://irc.oftc.net/linux-pci
 -------T:      git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
 +++++++T:      git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
        F:      Documentation/driver-api/pci/p2pdma.rst
        F:      drivers/pci/p2pdma.c
        F:      include/linux/pci-p2pdma.h
@@@@@@@@@ -16185,14 -16173,14 -16173,14 -16173,14 -16173,14 -16172,14 -16173,14 -16173,14 +16184,14 @@@@@@@@@ F:        drivers/pci/controller/pci-xgene-msi
        
        PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS
        M:      Lorenzo Pieralisi <[email protected]>
 +++++++M:      Krzysztof WilczyÅ„ski <[email protected]>
        R:      Rob Herring <[email protected]>
 -------R:      Krzysztof WilczyÅ„ski <[email protected]>
        L:      [email protected]
        S:      Supported
        Q:      https://patchwork.kernel.org/project/linux-pci/list/
        B:      https://bugzilla.kernel.org
        C:      irc://irc.oftc.net/linux-pci
 -------T:      git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git
 +++++++T:      git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
        F:      Documentation/devicetree/bindings/pci/
        F:      drivers/pci/controller/
        F:      drivers/pci/pci-bridge-emul.c
        Q:      https://patchwork.kernel.org/project/linux-pci/list/
        B:      https://bugzilla.kernel.org
        C:      irc://irc.oftc.net/linux-pci
 -------T:      git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
 +++++++T:      git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
        F:      Documentation/PCI/
        F:      Documentation/devicetree/bindings/pci/
        F:      arch/x86/kernel/early-quirks.c
@@@@@@@@@ -17249,7 -17237,7 -17237,7 -17237,7 -17237,7 -17236,7 -17237,7 -17237,7 +17248,7 @@@@@@@@@ F: Documentation/devicetree/bindings/ne
        F:      drivers/net/wwan/qcom_bam_dmux.c
        
        QUALCOMM CAMERA SUBSYSTEM DRIVER
 -------M:      Robert Foss <robert.foss@linaro.org>
 +++++++M:      Robert Foss <rfoss@kernel.org>
        M:      Todor Tomov <[email protected]>
        L:      [email protected]
        S:      Maintained
@@@@@@@@@ -17329,7 -17317,7 -17317,7 -17317,7 -17317,7 -17316,7 -17317,7 -17317,7 +17328,7 @@@@@@@@@ F: drivers/dma/qcom/hidma
        
        QUALCOMM I2C CCI DRIVER
        M:      Loic Poulain <[email protected]>
 -------M:      Robert Foss <robert.foss@linaro.org>
 +++++++M:      Robert Foss <rfoss@kernel.org>
        L:      [email protected]
        L:      [email protected]
        S:      Maintained
        L:      [email protected]
        S:      Supported
        Q:      https://patchwork.kernel.org/project/linux-riscv/list/
 +++++++C:      irc://irc.libera.chat/riscv
        P:      Documentation/riscv/patch-acceptance.rst
        T:      git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
        F:      arch/riscv/
@@@@@@@@@ -18690,9 -18677,9 -18677,9 -18677,9 -18677,9 -18676,9 -18677,9 -18677,9 +18689,9 @@@@@@@@@ F: drivers/target
        F:      include/target/
        
        SCTP PROTOCOL
 -------M:      Vlad Yasevich <[email protected]>
        M:      Neil Horman <[email protected]>
        M:      Marcelo Ricardo Leitner <[email protected]>
 +++++++M:      Xin Long <[email protected]>
        L:      [email protected]
        S:      Maintained
        W:      http://lksctp.sourceforge.net
        S:      Orphan
        F:      sound/soc/uniphier/
        
 +++++++SOCKET TIMESTAMPING
 +++++++M:      Willem de Bruijn <[email protected]>
 +++++++S:      Maintained
 +++++++F:      Documentation/networking/timestamping.rst
 +++++++F:      include/uapi/linux/net_tstamp.h
 +++++++F:      tools/testing/selftests/net/so_txtime.c
 +++++++
        SOEKRIS NET48XX LED SUPPORT
        M:      Chris Boot <[email protected]>
        S:      Maintained
@@@@@@@@@ -19692,7 -19672,7 -19672,7 -19672,7 -19672,7 -19671,7 -19672,7 -19672,7 +19691,7 @@@@@@@@@ F: drivers/clk/spear
        F:      drivers/pinctrl/spear/
        
        SPI NOR SUBSYSTEM
 -------M:      Tudor Ambarus <tudor.ambarus@microchip.com>
 +++++++M:      Tudor Ambarus <tudor.ambarus@linaro.org>
        M:      Pratyush Yadav <[email protected]>
        R:      Michael Walle <[email protected]>
        L:      [email protected]
@@@@@@@@@ -20090,7 -20070,6 -20070,6 -20070,6 -20070,6 -20069,6 -20070,6 -20070,6 +20089,7 @@@@@@@@@ F: drivers/watchdog/sunplus_wdt.
        SUPERH
        M:      Yoshinori Sato <[email protected]>
        M:      Rich Felker <[email protected]>
 +++++++M:      John Paul Adrian Glaubitz <[email protected]>
        L:      [email protected]
        S:      Maintained
        Q:      http://patchwork.kernel.org/project/linux-sh/list/
@@@@@@@@@ -21731,7 -21710,6 -21710,6 -21710,6 -21710,6 -21709,6 -21710,6 -21710,6 +21730,7 @@@@@@@@@ F: include/uapi/linux/uvcvideo.
        
        USB WEBCAM GADGET
        M:      Laurent Pinchart <[email protected]>
 +++++++M:      Daniel Scally <[email protected]>
        L:      [email protected]
        S:      Maintained
        F:      drivers/usb/gadget/function/*uvc*
@@@@@@@@@ -21767,13 -21745,6 -21745,6 -21745,6 -21745,6 -21744,6 -21745,6 -21745,6 +21766,13 @@@@@@@@@ T:       git git://linuxtv.org/media_tree.gi
        F:      Documentation/admin-guide/media/zr364xx*
        F:      drivers/staging/media/deprecated/zr364xx/
        
 +++++++USER DATAGRAM PROTOCOL (UDP)
 +++++++M:      Willem de Bruijn <[email protected]>
 +++++++S:      Maintained
 +++++++F:      include/linux/udp.h
 +++++++F:      net/ipv4/udp.c
 +++++++F:      net/ipv6/udp.c
 +++++++
        USER-MODE LINUX (UML)
        M:      Richard Weinberger <[email protected]>
        M:      Anton Ivanov <[email protected]>
index cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d,cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d,cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d,cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d,cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d,cbeaab55c0dbccb3e3c4e0ad86839f6cd52a137d,2aff392d98955592b2e273936b2201c0234a8c04,92319c9b877c443b04d486d971a461358325389c..c20c41dd9c91053e65706e1d50fba48e0fcd7918
@@@@@@@@@ -558,6 -558,6 -558,6 -558,6 -558,6 -558,6 -558,15 -558,6 +558,15 @@@@@@@@@ static void amd_iommu_report_page_fault
                         * prevent logging it.
                         */
                        if (IS_IOMMU_MEM_TRANSACTION(flags)) {
++++++ +                        /* Device not attached to domain properly */
++++++ +                        if (dev_data->domain == NULL) {
++++++ +                                pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
++++++ +                                pr_err_ratelimited("  device=%04x:%02x:%02x.%x domain=0x%04x\n",
++++++ +                                                   iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
++++++ +                                                   PCI_FUNC(devid), domain_id);
++++++ +                                goto out;
++++++ +                        }
++++++ +
                                if (!report_iommu_fault(&dev_data->domain->domain,
                                                        &pdev->dev, address,
                                                        IS_WRITE_REQUEST(flags) ?
                                event[0], event[1], event[2], event[3]);
                }
        
------ -        memset(__evt, 0, 4 * sizeof(u32));
++++++ +        /*
++++++ +         * To detect the hardware errata 732 we need to clear the
++++++ +         * entry back to zero. This issue does not exist on SNP
++++++ +         * enabled system. Also this buffer is not writeable on
++++++ +         * SNP enabled system.
++++++ +         */
++++++ +        if (!amd_iommu_snp_en)
++++++ +                memset(__evt, 0, 4 * sizeof(u32));
        }
        
        static void iommu_poll_events(struct amd_iommu *iommu)
@@@@@@@@@ -736,10 -736,10 -736,10 -736,10 -736,10 -736,10 -752,13 -736,10 +752,13 @@@@@@@@@ static void iommu_poll_ppr_log(struct a
                        entry[1] = raw[1];
        
                        /*
------ -                 * To detect the hardware bug we need to clear the entry
------ -                 * back to zero.
++++++ +                 * To detect the hardware errata 733 we need to clear the
++++++ +                 * entry back to zero. This issue does not exist on SNP
++++++ +                 * enabled system. Also this buffer is not writeable on
++++++ +                 * SNP enabled system.
                         */
------ -                raw[0] = raw[1] = 0UL;
++++++ +                if (!amd_iommu_snp_en)
++++++ +                        raw[0] = raw[1] = 0UL;
        
                        /* Update head pointer of hardware ring-buffer */
                        head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
@@@@@@@@@ -1702,27 -1702,27 -1702,27 -1702,27 -1702,27 -1702,27 -1721,29 -1702,27 +1721,29 @@@@@@@@@ static int pdev_pri_ats_enable(struct p
                /* Only allow access to user-accessible pages */
                ret = pci_enable_pasid(pdev, 0);
                if (ret)
------ -                goto out_err;
++++++ +                return ret;
        
                /* First reset the PRI state of the device */
                ret = pci_reset_pri(pdev);
                if (ret)
------ -                goto out_err;
++++++ +                goto out_err_pasid;
        
                /* Enable PRI */
                /* FIXME: Hardcode number of outstanding requests for now */
                ret = pci_enable_pri(pdev, 32);
                if (ret)
------ -                goto out_err;
++++++ +                goto out_err_pasid;
        
                ret = pci_enable_ats(pdev, PAGE_SHIFT);
                if (ret)
------ -                goto out_err;
++++++ +                goto out_err_pri;
        
                return 0;
        
------ -out_err:
++++++ +out_err_pri:
                pci_disable_pri(pdev);
++++++ +
++++++ +out_err_pasid:
                pci_disable_pasid(pdev);
        
                return ret;
@@@@@@@@@ -2072,6 -2072,6 -2072,6 -2072,6 -2072,6 -2072,6 -2093,10 -2072,6 +2093,10 @@@@@@@@@ static struct protection_domain *protec
                if (ret)
                        goto out_err;
        
++++++ +        /* No need to allocate io pgtable ops in passthrough mode */
++++++ +        if (type == IOMMU_DOMAIN_IDENTITY)
++++++ +                return domain;
++++++ +
                pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
                if (!pgtbl_ops) {
                        domain_id_free(domain->id);
@@@@@@@@@ -2126,31 -2126,31 -2126,31 -2126,31 -2126,31 -2126,31 -2151,31 -2126,6 +2151,6 @@@@@@@@@ static void amd_iommu_domain_free(struc
                protection_domain_free(domain);
        }
        
------- static void amd_iommu_detach_device(struct iommu_domain *dom,
-------                                     struct device *dev)
------- {
-------         struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
-------         struct amd_iommu *iommu;
------- 
-------         if (!check_device(dev))
-------                 return;
------- 
-------         if (dev_data->domain != NULL)
-------                 detach_device(dev);
------- 
-------         iommu = rlookup_amd_iommu(dev);
-------         if (!iommu)
-------                 return;
------- 
------- #ifdef CONFIG_IRQ_REMAP
-------         if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
-------             (dom->type == IOMMU_DOMAIN_UNMANAGED))
-------                 dev_data->use_vapic = 0;
------- #endif
------- 
-------         iommu_completion_wait(iommu);
------- }
------- 
        static int amd_iommu_attach_device(struct iommu_domain *dom,
                                           struct device *dev)
        {
                struct amd_iommu *iommu = rlookup_amd_iommu(dev);
                int ret;
        
++++++ +        /*
++++++ +         * Skip attach device to domain if new domain is same as
++++++ +         * devices current domain
++++++ +         */
++++++ +        if (dev_data->domain == domain)
++++++ +                return 0;
++++++ +
                dev_data->defer_attach = false;
        
                if (dev_data->domain)
@@@@@@@@@ -2387,12 -2387,12 -2387,12 -2387,12 -2387,12 -2387,12 -2419,17 -2362,12 +2394,17 @@@@@@@@@ static int amd_iommu_def_domain_type(st
                        return 0;
        
                /*
------ -         * Do not identity map IOMMUv2 capable devices when memory encryption is
------ -         * active, because some of those devices (AMD GPUs) don't have the
------ -         * encryption bit in their DMA-mask and require remapping.
++++++ +         * Do not identity map IOMMUv2 capable devices when:
++++++ +         *  - memory encryption is active, because some of those devices
++++++ +         *    (AMD GPUs) don't have the encryption bit in their DMA-mask
++++++ +         *    and require remapping.
++++++ +         *  - SNP is enabled, because it prohibits DTE[Mode]=0.
                 */
------ -        if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
++++++ +        if (dev_data->iommu_v2 &&
++++++ +            !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
++++++ +            !amd_iommu_snp_en) {
                        return IOMMU_DOMAIN_IDENTITY;
++++++ +        }
        
                return 0;
        }
@@@@@@@@@ -2416,7 -2416,7 -2416,7 -2416,7 -2416,7 -2416,7 -2453,7 -2391,6 +2428,6 @@@@@@@@@ const struct iommu_ops amd_iommu_ops = 
                .def_domain_type = amd_iommu_def_domain_type,
                .default_domain_ops = &(const struct iommu_domain_ops) {
                        .attach_dev     = amd_iommu_attach_device,
-------                 .detach_dev     = amd_iommu_detach_device,
                        .map_pages      = amd_iommu_map_pages,
                        .unmap_pages    = amd_iommu_unmap_pages,
                        .iotlb_sync_map = amd_iommu_iotlb_sync_map,
index 4f4a323be0d0ffd5279d814c746f4cb3a432d73d,abedcffed23776fb0d9753e3e7f5baa30c020caf,4f4a323be0d0ffd5279d814c746f4cb3a432d73d,4f4a323be0d0ffd5279d814c746f4cb3a432d73d,4f4a323be0d0ffd5279d814c746f4cb3a432d73d,4f4a323be0d0ffd5279d814c746f4cb3a432d73d,4f4a323be0d0ffd5279d814c746f4cb3a432d73d,96843d4688012a38352511893820d1f31294d5a6..06169d36eab829d85b89f01ee1a44edc8b37f80a
        
        #include "dma-iommu.h"
        
- ------#define DART_MAX_STREAMS 16
+ ++++++#define DART_MAX_STREAMS 256
        #define DART_MAX_TTBR 4
        #define MAX_DARTS_PER_DEVICE 2
        
- ------#define DART_STREAM_ALL 0xffff
+ ++++++/* Common registers */
        
        #define DART_PARAMS1 0x00
- ------#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
+ ++++++#define DART_PARAMS1_PAGE_SHIFT GENMASK(27, 24)
        
        #define DART_PARAMS2 0x04
- ------#define DART_PARAMS_BYPASS_SUPPORT BIT(0)
+ ++++++#define DART_PARAMS2_BYPASS_SUPPORT BIT(0)
        
- ------#define DART_STREAM_COMMAND 0x20
- ------#define DART_STREAM_COMMAND_BUSY BIT(2)
- ------#define DART_STREAM_COMMAND_INVALIDATE BIT(20)
+ ++++++/* T8020/T6000 registers */
        
- ------#define DART_STREAM_SELECT 0x34
+ ++++++#define DART_T8020_STREAM_COMMAND 0x20
+ ++++++#define DART_T8020_STREAM_COMMAND_BUSY BIT(2)
+ ++++++#define DART_T8020_STREAM_COMMAND_INVALIDATE BIT(20)
        
- ------#define DART_ERROR 0x40
- ------#define DART_ERROR_STREAM GENMASK(27, 24)
- ------#define DART_ERROR_CODE GENMASK(11, 0)
- ------#define DART_ERROR_FLAG BIT(31)
+ ++++++#define DART_T8020_STREAM_SELECT 0x34
        
- ------#define DART_ERROR_READ_FAULT BIT(4)
- ------#define DART_ERROR_WRITE_FAULT BIT(3)
- ------#define DART_ERROR_NO_PTE BIT(2)
- ------#define DART_ERROR_NO_PMD BIT(1)
- ------#define DART_ERROR_NO_TTBR BIT(0)
+ ++++++#define DART_T8020_ERROR 0x40
+ ++++++#define DART_T8020_ERROR_STREAM GENMASK(27, 24)
+ ++++++#define DART_T8020_ERROR_CODE GENMASK(11, 0)
+ ++++++#define DART_T8020_ERROR_FLAG BIT(31)
        
- ------#define DART_CONFIG 0x60
- ------#define DART_CONFIG_LOCK BIT(15)
+ ++++++#define DART_T8020_ERROR_READ_FAULT BIT(4)
+ ++++++#define DART_T8020_ERROR_WRITE_FAULT BIT(3)
+ ++++++#define DART_T8020_ERROR_NO_PTE BIT(2)
+ ++++++#define DART_T8020_ERROR_NO_PMD BIT(1)
+ ++++++#define DART_T8020_ERROR_NO_TTBR BIT(0)
        
- ------#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
- ------
- ------#define DART_ERROR_ADDR_HI 0x54
- ------#define DART_ERROR_ADDR_LO 0x50
- ------
- ------#define DART_STREAMS_ENABLE 0xfc
+ ++++++#define DART_T8020_CONFIG 0x60
+ ++++++#define DART_T8020_CONFIG_LOCK BIT(15)
        
- ------#define DART_TCR(sid) (0x100 + 4 * (sid))
- ------#define DART_TCR_TRANSLATE_ENABLE BIT(7)
- ------#define DART_TCR_BYPASS0_ENABLE BIT(8)
- ------#define DART_TCR_BYPASS1_ENABLE BIT(12)
+ ++++++#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
        
- ------#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
- ------#define DART_TTBR_VALID BIT(31)
- ------#define DART_TTBR_SHIFT 12
+ ++++++#define DART_T8020_ERROR_ADDR_HI 0x54
+ ++++++#define DART_T8020_ERROR_ADDR_LO 0x50
+ ++++++
+ ++++++#define DART_T8020_STREAMS_ENABLE 0xfc
+ ++++++
+ ++++++#define DART_T8020_TCR                  0x100
+ ++++++#define DART_T8020_TCR_TRANSLATE_ENABLE BIT(7)
+ ++++++#define DART_T8020_TCR_BYPASS_DART      BIT(8)
+ ++++++#define DART_T8020_TCR_BYPASS_DAPF      BIT(12)
+ ++++++
+ ++++++#define DART_T8020_TTBR       0x200
+ ++++++#define DART_T8020_TTBR_VALID BIT(31)
+ ++++++#define DART_T8020_TTBR_ADDR_FIELD_SHIFT 0
+ ++++++#define DART_T8020_TTBR_SHIFT 12
+ ++++++
+ ++++++/* T8110 registers */
+ ++++++
+ ++++++#define DART_T8110_PARAMS3 0x08
+ ++++++#define DART_T8110_PARAMS3_PA_WIDTH GENMASK(29, 24)
+ ++++++#define DART_T8110_PARAMS3_VA_WIDTH GENMASK(21, 16)
+ ++++++#define DART_T8110_PARAMS3_VER_MAJ GENMASK(15, 8)
+ ++++++#define DART_T8110_PARAMS3_VER_MIN GENMASK(7, 0)
+ ++++++
+ ++++++#define DART_T8110_PARAMS4 0x0c
+ ++++++#define DART_T8110_PARAMS4_NUM_CLIENTS GENMASK(24, 16)
+ ++++++#define DART_T8110_PARAMS4_NUM_SIDS GENMASK(8, 0)
+ ++++++
+ ++++++#define DART_T8110_TLB_CMD              0x80
+ ++++++#define DART_T8110_TLB_CMD_BUSY         BIT(31)
+ ++++++#define DART_T8110_TLB_CMD_OP           GENMASK(10, 8)
+ ++++++#define DART_T8110_TLB_CMD_OP_FLUSH_ALL 0
+ ++++++#define DART_T8110_TLB_CMD_OP_FLUSH_SID 1
+ ++++++#define DART_T8110_TLB_CMD_STREAM       GENMASK(7, 0)
+ ++++++
+ ++++++#define DART_T8110_ERROR 0x100
+ ++++++#define DART_T8110_ERROR_STREAM GENMASK(27, 20)
+ ++++++#define DART_T8110_ERROR_CODE GENMASK(14, 0)
+ ++++++#define DART_T8110_ERROR_FLAG BIT(31)
+ ++++++
+ ++++++#define DART_T8110_ERROR_MASK 0x104
+ ++++++
+ ++++++#define DART_T8110_ERROR_READ_FAULT BIT(5)
+ ++++++#define DART_T8110_ERROR_WRITE_FAULT BIT(4)
+ ++++++#define DART_T8110_ERROR_NO_PTE BIT(3)
+ ++++++#define DART_T8110_ERROR_NO_PMD BIT(2)
+ ++++++#define DART_T8110_ERROR_NO_PGD BIT(1)
+ ++++++#define DART_T8110_ERROR_NO_TTBR BIT(0)
+ ++++++
+ ++++++#define DART_T8110_ERROR_ADDR_LO 0x170
+ ++++++#define DART_T8110_ERROR_ADDR_HI 0x174
+ ++++++
+ ++++++#define DART_T8110_PROTECT 0x200
+ ++++++#define DART_T8110_UNPROTECT 0x204
+ ++++++#define DART_T8110_PROTECT_LOCK 0x208
+ ++++++#define DART_T8110_PROTECT_TTBR_TCR BIT(0)
+ ++++++
+ ++++++#define DART_T8110_ENABLE_STREAMS  0xc00
+ ++++++#define DART_T8110_DISABLE_STREAMS 0xc20
+ ++++++
+ ++++++#define DART_T8110_TCR                  0x1000
+ ++++++#define DART_T8110_TCR_REMAP            GENMASK(11, 8)
+ ++++++#define DART_T8110_TCR_REMAP_EN         BIT(7)
+ ++++++#define DART_T8110_TCR_BYPASS_DAPF      BIT(2)
+ ++++++#define DART_T8110_TCR_BYPASS_DART      BIT(1)
+ ++++++#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
+ ++++++
+ ++++++#define DART_T8110_TTBR       0x1400
+ ++++++#define DART_T8110_TTBR_VALID BIT(0)
+ ++++++#define DART_T8110_TTBR_ADDR_FIELD_SHIFT 2
+ ++++++#define DART_T8110_TTBR_SHIFT 14
+ ++++++
+ ++++++#define DART_TCR(dart, sid) ((dart)->hw->tcr + ((sid) << 2))
+ ++++++
+ ++++++#define DART_TTBR(dart, sid, idx) ((dart)->hw->ttbr + \
+ ++++++                                   (((dart)->hw->ttbr_count * (sid)) << 2) + \
+ ++++++                                   ((idx) << 2))
+ ++++++
+ ++++++struct apple_dart_stream_map;
+ ++++++
+ ++++++enum dart_type {
+ ++++++        DART_T8020,
+ ++++++        DART_T6000,
+ ++++++        DART_T8110,
+ ++++++};
        
        struct apple_dart_hw {
+ ++++++        enum dart_type type;
+ ++++++        irqreturn_t (*irq_handler)(int irq, void *dev);
+ ++++++        int (*invalidate_tlb)(struct apple_dart_stream_map *stream_map);
+ ++++++
                u32 oas;
                enum io_pgtable_fmt fmt;
+ ++++++
+ ++++++        int max_sid_count;
+ ++++++
+ ++++++        u64 lock;
+ ++++++        u64 lock_bit;
+ ++++++
+ ++++++        u64 error;
+ ++++++
+ ++++++        u64 enable_streams;
+ ++++++
+ ++++++        u64 tcr;
+ ++++++        u64 tcr_enabled;
+ ++++++        u64 tcr_disabled;
+ ++++++        u64 tcr_bypass;
+ ++++++
+ ++++++        u64 ttbr;
+ ++++++        u64 ttbr_valid;
+ ++++++        u64 ttbr_addr_field_shift;
+ ++++++        u64 ttbr_shift;
+ ++++++        int ttbr_count;
        };
        
        /*
@@@@@@@@@ -115,12 -212,18 -115,12 -115,12 -115,12 -115,12 -115,12 -115,12 +212,18 @@@@@@@@@ struct apple_dart 
        
                spinlock_t lock;
        
+ ++++++        u32 ias;
+ ++++++        u32 oas;
                u32 pgsize;
+ ++++++        u32 num_streams;
                u32 supports_bypass : 1;
                u32 force_bypass : 1;
        
                struct iommu_group *sid2group[DART_MAX_STREAMS];
                struct iommu_device iommu;
+ ++++++
+ ++++++        u32 save_tcr[DART_MAX_STREAMS];
+ ++++++        u32 save_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR];
        };
        
        /*
         */
        struct apple_dart_stream_map {
                struct apple_dart *dart;
- ------        unsigned long sidmap;
+ ++++++        DECLARE_BITMAP(sidmap, DART_MAX_STREAMS);
        };
        struct apple_dart_atomic_stream_map {
                struct apple_dart *dart;
- ------        atomic64_t sidmap;
+ ++++++        atomic_long_t sidmap[BITS_TO_LONGS(DART_MAX_STREAMS)];
        };
        
        /*
@@@@@@@@@ -202,50 -305,55 -202,50 -202,50 -202,50 -202,50 -202,50 -202,50 +305,55 @@@@@@@@@ static struct apple_dart_domain *to_dar
        static void
        apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
        {
+ ++++++        struct apple_dart *dart = stream_map->dart;
                int sid;
        
- ------        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- ------                writel(DART_TCR_TRANSLATE_ENABLE,
- ------                       stream_map->dart->regs + DART_TCR(sid));
+ ++++++        for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ ++++++                writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
        }
        
        static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
        {
+ ++++++        struct apple_dart *dart = stream_map->dart;
                int sid;
        
- ------        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- ------                writel(0, stream_map->dart->regs + DART_TCR(sid));
+ ++++++        for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ ++++++                writel(dart->hw->tcr_disabled, dart->regs + DART_TCR(dart, sid));
        }
        
        static void
        apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
        {
+ ++++++        struct apple_dart *dart = stream_map->dart;
                int sid;
        
                WARN_ON(!stream_map->dart->supports_bypass);
- ------        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- ------                writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
- ------                       stream_map->dart->regs + DART_TCR(sid));
+ ++++++        for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ ++++++                writel(dart->hw->tcr_bypass,
+ ++++++                       dart->regs + DART_TCR(dart, sid));
        }
        
        static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
                                           u8 idx, phys_addr_t paddr)
        {
+ ++++++        struct apple_dart *dart = stream_map->dart;
                int sid;
        
- ------        WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
- ------        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- ------                writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
- ------                       stream_map->dart->regs + DART_TTBR(sid, idx));
+ ++++++        WARN_ON(paddr & ((1 << dart->hw->ttbr_shift) - 1));
+ ++++++        for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ ++++++                writel(dart->hw->ttbr_valid |
+ ++++++                       (paddr >> dart->hw->ttbr_shift) << dart->hw->ttbr_addr_field_shift,
+ ++++++                       dart->regs + DART_TTBR(dart, sid, idx));
        }
        
        static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
                                             u8 idx)
        {
+ ++++++        struct apple_dart *dart = stream_map->dart;
                int sid;
        
- ------        for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- ------                writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
+ ++++++        for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ ++++++                writel(0, dart->regs + DART_TTBR(dart, sid, idx));
        }
        
        static void
@@@@@@@@@ -253,12 -361,12 -253,12 -253,12 -253,12 -253,12 -253,12 -253,12 +361,12 @@@@@@@@@ apple_dart_hw_clear_all_ttbrs(struct ap
        {
                int i;
        
- ------        for (i = 0; i < DART_MAX_TTBR; ++i)
+ ++++++        for (i = 0; i < stream_map->dart->hw->ttbr_count; ++i)
                        apple_dart_hw_clear_ttbr(stream_map, i);
        }
        
        static int
- ------apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
+ ++++++apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
                                     u32 command)
        {
                unsigned long flags;
        
                spin_lock_irqsave(&stream_map->dart->lock, flags);
        
- ------        writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
- ------        writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
+ ++++++        writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT);
+ ++++++        writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND);
        
                ret = readl_poll_timeout_atomic(
- ------                stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
- ------                !(command_reg & DART_STREAM_COMMAND_BUSY), 1,
+ ++++++                stream_map->dart->regs + DART_T8020_STREAM_COMMAND, command_reg,
+ ++++++                !(command_reg & DART_T8020_STREAM_COMMAND_BUSY), 1,
                        DART_STREAM_COMMAND_BUSY_TIMEOUT);
        
                spin_unlock_irqrestore(&stream_map->dart->lock, flags);
                if (ret) {
                        dev_err(stream_map->dart->dev,
                                "busy bit did not clear after command %x for streams %lx\n",
- ------                        command, stream_map->sidmap);
+ ++++++                        command, stream_map->sidmap[0]);
+ +++++                 return ret;
+ +++++         }
+ +++++ 
+ +++++         return 0;
+ +++++ }
+ +++++ 
+ +++++ static int
       -apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+ ++++++apple_dart_t8110_hw_tlb_command(struct apple_dart_stream_map *stream_map,
+ ++++++                                u32 command)
+ +++++ {
       -        return apple_dart_hw_stream_command(stream_map,
       -                                            DART_STREAM_COMMAND_INVALIDATE);
+ ++++++        struct apple_dart *dart = stream_map->dart;
+ ++++++        unsigned long flags;
+ ++++++        int ret = 0;
+ ++++++        int sid;
+ ++++++
+ ++++++        spin_lock_irqsave(&dart->lock, flags);
+ ++++++
+ ++++++        for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) {
+ ++++++                u32 val = FIELD_PREP(DART_T8110_TLB_CMD_OP, command) |
+ ++++++                        FIELD_PREP(DART_T8110_TLB_CMD_STREAM, sid);
+ ++++++                writel(val, dart->regs + DART_T8110_TLB_CMD);
+ ++++++
+ ++++++                ret = readl_poll_timeout_atomic(
+ ++++++                        dart->regs + DART_T8110_TLB_CMD, val,
+ ++++++                        !(val & DART_T8110_TLB_CMD_BUSY), 1,
+ ++++++                        DART_STREAM_COMMAND_BUSY_TIMEOUT);
+ ++++++
+ ++++++                if (ret)
+ ++++++                        break;
+ ++++++
+ ++++++        }
+ ++++++
+ ++++++        spin_unlock_irqrestore(&dart->lock, flags);
+ ++++++
+ ++++++        if (ret) {
+ ++++++                dev_err(stream_map->dart->dev,
+ ++++++                        "busy bit did not clear after command %x for stream %d\n",
+ ++++++                        command, sid);
       +                return ret;
       +        }
       +
       +        return 0;
       +}
       +
       +static int
- ----- apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+ ++++++apple_dart_t8020_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+ ++++++{
+ ++++++        return apple_dart_t8020_hw_stream_command(
+ ++++++                stream_map, DART_T8020_STREAM_COMMAND_INVALIDATE);
+ ++++++}
+ ++++++
+ ++++++static int
+ ++++++apple_dart_t8110_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
       +{
- -----         return apple_dart_hw_stream_command(stream_map,
- -----                                             DART_STREAM_COMMAND_INVALIDATE);
+ ++++++        return apple_dart_t8110_hw_tlb_command(
+ ++++++                stream_map, DART_T8110_TLB_CMD_OP_FLUSH_SID);
        }
        
        static int apple_dart_hw_reset(struct apple_dart *dart)
        {
                u32 config;
                struct apple_dart_stream_map stream_map;
+ ++++++        int i;
        
- ------        config = readl(dart->regs + DART_CONFIG);
- ------        if (config & DART_CONFIG_LOCK) {
+ ++++++        config = readl(dart->regs + dart->hw->lock);
+ ++++++        if (config & dart->hw->lock_bit) {
                        dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
                                config);
                        return -EINVAL;
                }
        
                stream_map.dart = dart;
- ------        stream_map.sidmap = DART_STREAM_ALL;
+ ++++++        bitmap_zero(stream_map.sidmap, DART_MAX_STREAMS);
+ ++++++        bitmap_set(stream_map.sidmap, 0, dart->num_streams);
                apple_dart_hw_disable_dma(&stream_map);
                apple_dart_hw_clear_all_ttbrs(&stream_map);
        
                /* enable all streams globally since TCR is used to control isolation */
- ------        writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
+ ++++++        for (i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+ ++++++                writel(U32_MAX, dart->regs + dart->hw->enable_streams + 4 * i);
        
                /* clear any pending errors before the interrupt is unmasked */
- ------        writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
+ ++++++        writel(readl(dart->regs + dart->hw->error), dart->regs + dart->hw->error);
+ +++++ 
       -        return apple_dart_hw_invalidate_tlb(&stream_map);
+ ++++++        if (dart->hw->type == DART_T8110)
+ ++++++                writel(0,  dart->regs + DART_T8110_ERROR_MASK);
       +
- -----         return apple_dart_hw_invalidate_tlb(&stream_map);
+ ++++++        return dart->hw->invalidate_tlb(&stream_map);
        }
        
        static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
        {
- ------        int i;
+ ++++++        int i, j;
                struct apple_dart_atomic_stream_map *domain_stream_map;
                struct apple_dart_stream_map stream_map;
        
                for_each_stream_map(i, domain, domain_stream_map) {
                        stream_map.dart = domain_stream_map->dart;
- ------                stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
- ------                apple_dart_hw_invalidate_tlb(&stream_map);
+ ++++++
+ ++++++                for (j = 0; j < BITS_TO_LONGS(stream_map.dart->num_streams); j++)
+ ++++++                        stream_map.sidmap[j] = atomic_long_read(&domain_stream_map->sidmap[j]);
+ ++++++
+ ++++++                stream_map.dart->hw->invalidate_tlb(&stream_map);
                }
        }
        
@@@@@@@@@ -399,11 -561,11 -399,11 -399,11 -399,11 -399,11 -399,11 -399,11 +561,11 @@@@@@@@@ apple_dart_setup_translation(struct app
                for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
                        apple_dart_hw_set_ttbr(stream_map, i,
                                               pgtbl_cfg->apple_dart_cfg.ttbr[i]);
- ------        for (; i < DART_MAX_TTBR; ++i)
+ ++++++        for (; i < stream_map->dart->hw->ttbr_count; ++i)
                        apple_dart_hw_clear_ttbr(stream_map, i);
        
                apple_dart_hw_enable_translation(stream_map);
- ------        apple_dart_hw_invalidate_tlb(stream_map);
+ ++++++        stream_map->dart->hw->invalidate_tlb(stream_map);
        }
        
        static int apple_dart_finalize_domain(struct iommu_domain *domain,
                struct apple_dart *dart = cfg->stream_maps[0].dart;
                struct io_pgtable_cfg pgtbl_cfg;
                int ret = 0;
- ------        int i;
+ ++++++        int i, j;
        
                mutex_lock(&dart_domain->init_lock);
        
        
                for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
                        dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
- ------                atomic64_set(&dart_domain->stream_maps[i].sidmap,
- ------                             cfg->stream_maps[i].sidmap);
+ ++++++                for (j = 0; j < BITS_TO_LONGS(dart->num_streams); j++)
+ ++++++                        atomic_long_set(&dart_domain->stream_maps[i].sidmap[j],
+ ++++++                                        cfg->stream_maps[i].sidmap[j]);
                }
        
                pgtbl_cfg = (struct io_pgtable_cfg){
                        .pgsize_bitmap = dart->pgsize,
- ------                .ias = 32,
- ------                .oas = dart->hw->oas,
+ ++++++                .ias = dart->ias,
+ ++++++                .oas = dart->oas,
                        .coherent_walk = 1,
                        .iommu_dev = dart->dev,
                };
        
                domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
                domain->geometry.aperture_start = 0;
- ------        domain->geometry.aperture_end = DMA_BIT_MASK(32);
+ ++++++        domain->geometry.aperture_end = (dma_addr_t)DMA_BIT_MASK(dart->ias);
                domain->geometry.force_aperture = true;
        
                dart_domain->finalized = true;
@@@@@@@@@ -458,7 -621,7 -458,7 -458,7 -458,7 -458,7 -458,7 -458,7 +621,7 @@@@@@@@@ apple_dart_mod_streams(struct apple_dar
                               struct apple_dart_stream_map *master_maps,
                               bool add_streams)
        {
- ------        int i;
+ ++++++        int i, j;
        
                for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
                        if (domain_maps[i].dart != master_maps[i].dart)
                for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
                        if (!domain_maps[i].dart)
                                break;
- ------                if (add_streams)
- ------                        atomic64_or(master_maps[i].sidmap,
- ------                                    &domain_maps[i].sidmap);
- ------                else
- ------                        atomic64_and(~master_maps[i].sidmap,
- ------                                     &domain_maps[i].sidmap);
+ ++++++                for (j = 0; j < BITS_TO_LONGS(domain_maps[i].dart->num_streams); j++) {
+ ++++++                        if (add_streams)
+ ++++++                                atomic_long_or(master_maps[i].sidmap[j],
+ ++++++                                               &domain_maps[i].sidmap[j]);
+ ++++++                        else
+ ++++++                                atomic_long_and(~master_maps[i].sidmap[j],
+ ++++++                                                &domain_maps[i].sidmap[j]);
+ ++++++                }
                }
        
                return 0;
@@@@@@@@@ -486,13 -651,13 -486,13 -486,13 -486,13 -486,13 -486,13 -486,6 +651,6 @@@@@@@@@ static int apple_dart_domain_add_stream
                                              true);
        }
        
------- static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain,
-------                                             struct apple_dart_master_cfg *cfg)
------- {
-------         return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
-------                                       false);
------- }
------- 
        static int apple_dart_attach_dev(struct iommu_domain *domain,
                                         struct device *dev)
        {
                return ret;
        }
        
------- static void apple_dart_detach_dev(struct iommu_domain *domain,
-------                                   struct device *dev)
------- {
-------         int i;
-------         struct apple_dart_stream_map *stream_map;
-------         struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
-------         struct apple_dart_domain *dart_domain = to_dart_domain(domain);
------- 
-------         for_each_stream_map(i, cfg, stream_map)
-------                 apple_dart_hw_disable_dma(stream_map);
------- 
-------         if (domain->type == IOMMU_DOMAIN_DMA ||
-------             domain->type == IOMMU_DOMAIN_UNMANAGED)
-------                 apple_dart_domain_remove_streams(dart_domain, cfg);
------- }
------- 
        static struct iommu_device *apple_dart_probe_device(struct device *dev)
        {
                struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
@@@@@@@@@ -637,14 -802,14 -637,14 -637,14 -637,14 -637,14 -637,14 -614,14 +779,14 @@@@@@@@@ static int apple_dart_of_xlate(struct d
        
                for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
                        if (cfg->stream_maps[i].dart == dart) {
- ------                        cfg->stream_maps[i].sidmap |= 1 << sid;
+ ++++++                        set_bit(sid, cfg->stream_maps[i].sidmap);
                                return 0;
                        }
                }
                for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
                        if (!cfg->stream_maps[i].dart) {
                                cfg->stream_maps[i].dart = dart;
- ------                        cfg->stream_maps[i].sidmap = 1 << sid;
+ ++++++                        set_bit(sid, cfg->stream_maps[i].sidmap);
                                return 0;
                        }
                }
@@@@@@@@@ -663,13 -828,36 -663,13 -663,13 -663,13 -663,13 -663,13 -640,13 +805,36 @@@@@@@@@ static void apple_dart_release_group(vo
                mutex_lock(&apple_dart_groups_lock);
        
                for_each_stream_map(i, group_master_cfg, stream_map)
- ------                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ ++++++                for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
                                stream_map->dart->sid2group[sid] = NULL;
        
                kfree(iommu_data);
                mutex_unlock(&apple_dart_groups_lock);
        }
        
+ ++++++static int apple_dart_merge_master_cfg(struct apple_dart_master_cfg *dst,
+ ++++++                                       struct apple_dart_master_cfg *src)
+ ++++++{
+ ++++++        /*
+ ++++++         * We know that this function is only called for groups returned from
+ ++++++         * pci_device_group and that all Apple Silicon platforms never spread
+ ++++++         * PCIe devices from the same bus across multiple DARTs such that we can
+ ++++++         * just assume that both src and dst only have the same single DART.
+ ++++++         */
+ ++++++        if (src->stream_maps[1].dart)
+ ++++++                return -EINVAL;
+ ++++++        if (dst->stream_maps[1].dart)
+ ++++++                return -EINVAL;
+ ++++++        if (src->stream_maps[0].dart != dst->stream_maps[0].dart)
+ ++++++                return -EINVAL;
+ ++++++
+ ++++++        bitmap_or(dst->stream_maps[0].sidmap,
+ ++++++                  dst->stream_maps[0].sidmap,
+ ++++++                  src->stream_maps[0].sidmap,
+ ++++++                  dst->stream_maps[0].dart->num_streams);
+ ++++++        return 0;
+ ++++++}
+ ++++++
        static struct iommu_group *apple_dart_device_group(struct device *dev)
        {
                int i, sid;
                mutex_lock(&apple_dart_groups_lock);
        
                for_each_stream_map(i, cfg, stream_map) {
- ------                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
+ ++++++                for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) {
                                struct iommu_group *stream_group =
                                        stream_map->dart->sid2group[sid];
        
                if (!group)
                        goto out;
        
- ------        group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
- ------        if (!group_master_cfg) {
- ------                iommu_group_put(group);
- ------                goto out;
- ------        }
+ ++++++        group_master_cfg = iommu_group_get_iommudata(group);
+ ++++++        if (group_master_cfg) {
+ ++++++                int ret;
+ +++++ 
       -        iommu_group_set_iommudata(group, group_master_cfg,
       -                apple_dart_release_group);
+ ++++++                ret = apple_dart_merge_master_cfg(group_master_cfg, cfg);
+ ++++++                if (ret) {
+ ++++++                        dev_err(dev, "Failed to merge DART IOMMU grups.\n");
+ ++++++                        iommu_group_put(group);
+ ++++++                        res = ERR_PTR(ret);
+ ++++++                        goto out;
+ ++++++                }
+ ++++++        } else {
+ ++++++                group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg),
+ ++++++                                           GFP_KERNEL);
+ ++++++                if (!group_master_cfg) {
+ ++++++                        iommu_group_put(group);
+ ++++++                        goto out;
+ ++++++                }
       +
- -----         iommu_group_set_iommudata(group, group_master_cfg,
- -----                 apple_dart_release_group);
+ ++++++                iommu_group_set_iommudata(group, group_master_cfg,
+ ++++++                        apple_dart_release_group);
+ ++++++        }
        
                for_each_stream_map(i, cfg, stream_map)
- ------                for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ ++++++                for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
                                stream_map->dart->sid2group[sid] = group;
        
                res = group;
@@@@@@@@@ -780,7 -982,7 -780,7 -780,7 -780,7 -780,7 -780,7 -757,6 +959,6 @@@@@@@@@ static const struct iommu_ops apple_dar
                .owner = THIS_MODULE,
                .default_domain_ops = &(const struct iommu_domain_ops) {
                        .attach_dev     = apple_dart_attach_dev,
-------                 .detach_dev     = apple_dart_detach_dev,
                        .map_pages      = apple_dart_map_pages,
                        .unmap_pages    = apple_dart_unmap_pages,
                        .flush_iotlb_all = apple_dart_flush_iotlb_all,
                }
        };
        
- ------static irqreturn_t apple_dart_irq(int irq, void *dev)
+ ++++++static irqreturn_t apple_dart_t8020_irq(int irq, void *dev)
+ ++++++{
+ ++++++        struct apple_dart *dart = dev;
+ ++++++        const char *fault_name = NULL;
+ ++++++        u32 error = readl(dart->regs + DART_T8020_ERROR);
+ ++++++        u32 error_code = FIELD_GET(DART_T8020_ERROR_CODE, error);
+ ++++++        u32 addr_lo = readl(dart->regs + DART_T8020_ERROR_ADDR_LO);
+ ++++++        u32 addr_hi = readl(dart->regs + DART_T8020_ERROR_ADDR_HI);
+ ++++++        u64 addr = addr_lo | (((u64)addr_hi) << 32);
+ ++++++        u8 stream_idx = FIELD_GET(DART_T8020_ERROR_STREAM, error);
+ ++++++
+ ++++++        if (!(error & DART_T8020_ERROR_FLAG))
+ ++++++                return IRQ_NONE;
+ ++++++
+ ++++++        /* there should only be a single bit set but let's use == to be sure */
+ ++++++        if (error_code == DART_T8020_ERROR_READ_FAULT)
+ ++++++                fault_name = "READ FAULT";
+ ++++++        else if (error_code == DART_T8020_ERROR_WRITE_FAULT)
+ ++++++                fault_name = "WRITE FAULT";
+ ++++++        else if (error_code == DART_T8020_ERROR_NO_PTE)
+ ++++++                fault_name = "NO PTE FOR IOVA";
+ ++++++        else if (error_code == DART_T8020_ERROR_NO_PMD)
+ ++++++                fault_name = "NO PMD FOR IOVA";
+ ++++++        else if (error_code == DART_T8020_ERROR_NO_TTBR)
+ ++++++                fault_name = "NO TTBR FOR IOVA";
+ ++++++        else
+ ++++++                fault_name = "unknown";
+ ++++++
+ ++++++        dev_err_ratelimited(
+ ++++++                dart->dev,
+ ++++++                "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
+ ++++++                error, stream_idx, error_code, fault_name, addr);
+ ++++++
+ ++++++        writel(error, dart->regs + DART_T8020_ERROR);
+ ++++++        return IRQ_HANDLED;
+ ++++++}
+ ++++++
+ ++++++static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
        {
                struct apple_dart *dart = dev;
                const char *fault_name = NULL;
- ------        u32 error = readl(dart->regs + DART_ERROR);
- ------        u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
- ------        u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
- ------        u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
+ ++++++        u32 error = readl(dart->regs + DART_T8110_ERROR);
+ ++++++        u32 error_code = FIELD_GET(DART_T8110_ERROR_CODE, error);
+ ++++++        u32 addr_lo = readl(dart->regs + DART_T8110_ERROR_ADDR_LO);
+ ++++++        u32 addr_hi = readl(dart->regs + DART_T8110_ERROR_ADDR_HI);
                u64 addr = addr_lo | (((u64)addr_hi) << 32);
- ------        u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
+ ++++++        u8 stream_idx = FIELD_GET(DART_T8110_ERROR_STREAM, error);
        
- ------        if (!(error & DART_ERROR_FLAG))
+ ++++++        if (!(error & DART_T8110_ERROR_FLAG))
                        return IRQ_NONE;
        
                /* there should only be a single bit set but let's use == to be sure */
- ------        if (error_code == DART_ERROR_READ_FAULT)
+ ++++++        if (error_code == DART_T8110_ERROR_READ_FAULT)
                        fault_name = "READ FAULT";
- ------        else if (error_code == DART_ERROR_WRITE_FAULT)
+ ++++++        else if (error_code == DART_T8110_ERROR_WRITE_FAULT)
                        fault_name = "WRITE FAULT";
- ------        else if (error_code == DART_ERROR_NO_PTE)
+ ++++++        else if (error_code == DART_T8110_ERROR_NO_PTE)
                        fault_name = "NO PTE FOR IOVA";
- ------        else if (error_code == DART_ERROR_NO_PMD)
+ ++++++        else if (error_code == DART_T8110_ERROR_NO_PMD)
                        fault_name = "NO PMD FOR IOVA";
- ------        else if (error_code == DART_ERROR_NO_TTBR)
+ ++++++        else if (error_code == DART_T8110_ERROR_NO_PGD)
+ ++++++                fault_name = "NO PGD FOR IOVA";
+ ++++++        else if (error_code == DART_T8110_ERROR_NO_TTBR)
                        fault_name = "NO TTBR FOR IOVA";
                else
                        fault_name = "unknown";
                        "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
                        error, stream_idx, error_code, fault_name, addr);
        
- ------        writel(error, dart->regs + DART_ERROR);
+ ++++++        writel(error, dart->regs + DART_T8110_ERROR);
                return IRQ_HANDLED;
        }
        
        static int apple_dart_probe(struct platform_device *pdev)
        {
                int ret;
- ------        u32 dart_params[2];
+ ++++++        u32 dart_params[4];
                struct resource *res;
                struct apple_dart *dart;
                struct device *dev = &pdev->dev;
                if (ret)
                        return ret;
        
- ------        ret = apple_dart_hw_reset(dart);
- ------        if (ret)
- ------                goto err_clk_disable;
- ------
                dart_params[0] = readl(dart->regs + DART_PARAMS1);
                dart_params[1] = readl(dart->regs + DART_PARAMS2);
- ------        dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
- ------        dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
+ ++++++        dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]);
+ ++++++        dart->supports_bypass = dart_params[1] & DART_PARAMS2_BYPASS_SUPPORT;
+ ++++++
+ ++++++        switch (dart->hw->type) {
+ ++++++        case DART_T8020:
+ ++++++        case DART_T6000:
+ ++++++                dart->ias = 32;
+ ++++++                dart->oas = dart->hw->oas;
+ ++++++                dart->num_streams = dart->hw->max_sid_count;
+ ++++++                break;
+ ++++++
+ ++++++        case DART_T8110:
+ ++++++                dart_params[2] = readl(dart->regs + DART_T8110_PARAMS3);
+ ++++++                dart_params[3] = readl(dart->regs + DART_T8110_PARAMS4);
+ ++++++                dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
+ ++++++                dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
+ ++++++                dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+ ++++++                break;
+ ++++++        }
+ ++++++
+ ++++++        if (dart->num_streams > DART_MAX_STREAMS) {
+ ++++++                dev_err(&pdev->dev, "Too many streams (%d > %d)\n",
+ ++++++                        dart->num_streams, DART_MAX_STREAMS);
+ ++++++                ret = -EINVAL;
+ ++++++                goto err_clk_disable;
+ ++++++        }
+ ++++++
                dart->force_bypass = dart->pgsize > PAGE_SIZE;
        
- ------        ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
+ ++++++        ret = apple_dart_hw_reset(dart);
+ ++++++        if (ret)
+ ++++++                goto err_clk_disable;
+ ++++++
+ ++++++        ret = request_irq(dart->irq, dart->hw->irq_handler, IRQF_SHARED,
                                  "apple-dart fault handler", dart);
                if (ret)
                        goto err_clk_disable;
        
                dev_info(
                        &pdev->dev,
- ------                "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
- ------                dart->pgsize, dart->supports_bypass, dart->force_bypass);
+ ++++++                "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
+ ++++++                dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass);
                return 0;
        
        err_sysfs_remove:
@@@@@@@@@ -924,16 -1190,123 -924,16 -924,16 -924,16 -924,16 -924,16 -900,16 +1166,123 @@@@@@@@@ static int apple_dart_remove(struct pla
        }
        
        static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ ++++++        .type = DART_T8020,
+ ++++++        .irq_handler = apple_dart_t8020_irq,
+ ++++++        .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
                .oas = 36,
                .fmt = APPLE_DART,
+ ++++++        .max_sid_count = 16,
+ ++++++
+ ++++++        .enable_streams = DART_T8020_STREAMS_ENABLE,
+ ++++++        .lock = DART_T8020_CONFIG,
+ ++++++        .lock_bit = DART_T8020_CONFIG_LOCK,
+ ++++++
+ ++++++        .error = DART_T8020_ERROR,
+ ++++++
+ ++++++        .tcr = DART_T8020_TCR,
+ ++++++        .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ ++++++        .tcr_disabled = 0,
+ ++++++        .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART,
+ ++++++
+ ++++++        .ttbr = DART_T8020_TTBR,
+ ++++++        .ttbr_valid = DART_T8020_TTBR_VALID,
+ ++++++        .ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
+ ++++++        .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ ++++++        .ttbr_count = 4,
        };
        static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ ++++++        .type = DART_T6000,
+ ++++++        .irq_handler = apple_dart_t8020_irq,
+ ++++++        .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
                .oas = 42,
                .fmt = APPLE_DART2,
+ ++++++        .max_sid_count = 16,
+ ++++++
+ ++++++        .enable_streams = DART_T8020_STREAMS_ENABLE,
+ ++++++        .lock = DART_T8020_CONFIG,
+ ++++++        .lock_bit = DART_T8020_CONFIG_LOCK,
+ ++++++
+ ++++++        .error = DART_T8020_ERROR,
+ ++++++
+ ++++++        .tcr = DART_T8020_TCR,
+ ++++++        .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ ++++++        .tcr_disabled = 0,
+ ++++++        .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART,
+ ++++++
+ ++++++        .ttbr = DART_T8020_TTBR,
+ ++++++        .ttbr_valid = DART_T8020_TTBR_VALID,
+ ++++++        .ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
+ ++++++        .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ ++++++        .ttbr_count = 4,
        };
        
+ ++++++static const struct apple_dart_hw apple_dart_hw_t8110 = {
+ ++++++        .type = DART_T8110,
+ ++++++        .irq_handler = apple_dart_t8110_irq,
+ ++++++        .invalidate_tlb = apple_dart_t8110_hw_invalidate_tlb,
+ ++++++        .fmt = APPLE_DART2,
+ ++++++        .max_sid_count = 256,
+ ++++++
+ ++++++        .enable_streams = DART_T8110_ENABLE_STREAMS,
+ ++++++        .lock = DART_T8110_PROTECT,
+ ++++++        .lock_bit = DART_T8110_PROTECT_TTBR_TCR,
+ ++++++
+ ++++++        .error = DART_T8110_ERROR,
+ ++++++
+ ++++++        .tcr = DART_T8110_TCR,
+ ++++++        .tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
+ ++++++        .tcr_disabled = 0,
+ ++++++        .tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+ ++++++
+ ++++++        .ttbr = DART_T8110_TTBR,
+ ++++++        .ttbr_valid = DART_T8110_TTBR_VALID,
+ ++++++        .ttbr_addr_field_shift = DART_T8110_TTBR_ADDR_FIELD_SHIFT,
+ ++++++        .ttbr_shift = DART_T8110_TTBR_SHIFT,
+ ++++++        .ttbr_count = 1,
+ ++++++};
+ ++++++
+ ++++++static __maybe_unused int apple_dart_suspend(struct device *dev)
+ ++++++{
+ ++++++        struct apple_dart *dart = dev_get_drvdata(dev);
+ ++++++        unsigned int sid, idx;
+ ++++++
+ ++++++        for (sid = 0; sid < dart->num_streams; sid++) {
+ ++++++                dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid));
+ ++++++                for (idx = 0; idx < dart->hw->ttbr_count; idx++)
+ ++++++                        dart->save_ttbr[sid][idx] =
+ ++++++                                readl(dart->regs + DART_TTBR(dart, sid, idx));
+ ++++++        }
+ ++++++
+ ++++++        return 0;
+ ++++++}
+ ++++++
+ ++++++static __maybe_unused int apple_dart_resume(struct device *dev)
+ ++++++{
+ ++++++        struct apple_dart *dart = dev_get_drvdata(dev);
+ ++++++        unsigned int sid, idx;
+ ++++++        int ret;
+ ++++++
+ ++++++        ret = apple_dart_hw_reset(dart);
+ ++++++        if (ret) {
+ ++++++                dev_err(dev, "Failed to reset DART on resume\n");
+ ++++++                return ret;
+ ++++++        }
+ ++++++
+ ++++++        for (sid = 0; sid < dart->num_streams; sid++) {
+ ++++++                for (idx = 0; idx < dart->hw->ttbr_count; idx++)
+ ++++++                        writel(dart->save_ttbr[sid][idx],
+ ++++++                               dart->regs + DART_TTBR(dart, sid, idx));
+ ++++++                writel(dart->save_tcr[sid], dart->regs + DART_TCR(dart, sid));
+ ++++++        }
+ ++++++
+ ++++++        return 0;
+ ++++++}
+ ++++++
+ ++++++DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
+ ++++++
        static const struct of_device_id apple_dart_of_match[] = {
                { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ ++++++        { .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 },
                { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
                {},
        };
@@@@@@@@@ -944,6 -1317,7 -944,6 -944,6 -944,6 -944,6 -944,6 -920,6 +1293,7 @@@@@@@@@ static struct platform_driver apple_dar
                        .name                   = "apple-dart",
                        .of_match_table         = apple_dart_of_match,
                        .suppress_bind_attrs    = true,
+ ++++++                .pm                     = pm_sleep_ptr(&apple_dart_pm_ops),
                },
                .probe  = apple_dart_probe,
                .remove = apple_dart_remove,
index b0cde22119875eadcfb36cbc637d2a48133a70b6,b0cde22119875eadcfb36cbc637d2a48133a70b6,69fd7fa3e49a4606d9a188903ce98c251e039cf8,b0cde22119875eadcfb36cbc637d2a48133a70b6,b0cde22119875eadcfb36cbc637d2a48133a70b6,b0cde22119875eadcfb36cbc637d2a48133a70b6,b0cde22119875eadcfb36cbc637d2a48133a70b6,7eaa58ced4aea991811c93f5a4a91c089561c34a..483aaaeb6daeac685dbead517c6409f54b2db807
@@@@@@@@@ -185,38 -185,38 -185,43 -185,38 -185,38 -185,38 -185,38 -185,38 +185,43 @@@@@@@@@ static sysmmu_pte_t *page_entry(sysmmu_
                                        lv2table_base(sent)) + lv2ent_offset(iova);
        }
        
-- -----/*
-- ----- * IOMMU fault information register
-- ----- */
-- -----struct sysmmu_fault_info {
-- -----        unsigned int bit;       /* bit number in STATUS register */
-- -----        unsigned short addr_reg; /* register to read VA fault address */
++ +++++struct sysmmu_fault {
++ +++++        sysmmu_iova_t addr;     /* IOVA address that caused fault */
++ +++++        const char *name;       /* human readable fault name */
++ +++++        unsigned int type;      /* fault type for report_iommu_fault() */
++ +++++};
++ +++++
++ +++++struct sysmmu_v1_fault_info {
++ +++++        unsigned short addr_reg; /* register to read IOVA fault address */
                const char *name;       /* human readable fault name */
                unsigned int type;      /* fault type for report_iommu_fault */
        };
        
-- -----static const struct sysmmu_fault_info sysmmu_faults[] = {
-- -----        { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
-- -----        { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
-- -----        { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
-- -----        { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
-- -----        { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
-- -----        { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
-- -----        { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
-- -----        { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
++ +++++static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = {
++ +++++        { REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
++ +++++        { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
++ +++++        { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
++ +++++        { REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
++ +++++        { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
++ +++++        { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
++ +++++        { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
++ +++++        { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
++ +++++};
++ +++++
++ +++++/* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */
++ +++++static const char * const sysmmu_v5_fault_names[] = {
++ +++++        "PTW",
++ +++++        "PAGE",
++ +++++        "MULTI-HIT",
++ +++++        "ACCESS PROTECTION",
++ +++++        "SECURITY PROTECTION"
        };
        
-- -----static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
-- -----        { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
-- -----        { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
-- -----        { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
-- -----        { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
-- -----        { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
-- -----        { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
-- -----        { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
-- -----        { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
-- -----        { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
-- -----        { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
++ +++++static const char * const sysmmu_v7_fault_names[] = {
++ +++++        "PTW",
++ +++++        "PAGE",
++ +++++        "ACCESS PROTECTION",
++ +++++        "RESERVED"
        };
        
        /*
@@@@@@@@@ -246,9 -246,9 -251,12 -246,9 -246,9 -246,9 -246,9 -246,9 +251,12 @@@@@@@@@ struct exynos_iommu_domain 
                struct iommu_domain domain; /* generic domain data structure */
        };
        
++ +++++struct sysmmu_drvdata;
++ +++++
        /*
         * SysMMU version specific data. Contains offsets for the registers which can
         * be found in different SysMMU variants, but have different offset values.
++ +++++ * Also contains version specific callbacks to abstract the hardware.
         */
        struct sysmmu_variant {
                u32 pt_base;            /* page table base address (physical) */
                u32 flush_end;          /* end address of range invalidation */
                u32 int_status;         /* interrupt status information */
                u32 int_clear;          /* clear the interrupt */
++ +++++        u32 fault_va;           /* IOVA address that caused fault */
++ +++++        u32 fault_info;         /* fault transaction info */
++ +++++
++ +++++        int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype,
++ +++++                              struct sysmmu_fault *fault);
        };
        
        /*
@@@@@@@@@ -293,6 -293,6 -306,59 -293,6 -293,6 -293,6 -293,6 -293,6 +306,59 @@@@@@@@@ struct sysmmu_drvdata 
        
        #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
        
++ +++++static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data,
++ +++++                                           unsigned int itype,
++ +++++                                           struct sysmmu_fault *fault)
++ +++++{
++ +++++        const struct sysmmu_v1_fault_info *finfo;
++ +++++
++ +++++        if (itype >= ARRAY_SIZE(sysmmu_v1_faults))
++ +++++                return -ENXIO;
++ +++++
++ +++++        finfo = &sysmmu_v1_faults[itype];
++ +++++        fault->addr = readl(data->sfrbase + finfo->addr_reg);
++ +++++        fault->name = finfo->name;
++ +++++        fault->type = finfo->type;
++ +++++
++ +++++        return 0;
++ +++++}
++ +++++
++ +++++static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data,
++ +++++                                           unsigned int itype,
++ +++++                                           struct sysmmu_fault *fault)
++ +++++{
++ +++++        unsigned int addr_reg;
++ +++++
++ +++++        if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) {
++ +++++                fault->type = IOMMU_FAULT_READ;
++ +++++                addr_reg = REG_V5_FAULT_AR_VA;
++ +++++        } else if (itype >= 16 && itype <= 20) {
++ +++++                fault->type = IOMMU_FAULT_WRITE;
++ +++++                addr_reg = REG_V5_FAULT_AW_VA;
++ +++++                itype -= 16;
++ +++++        } else {
++ +++++                return -ENXIO;
++ +++++        }
++ +++++
++ +++++        fault->name = sysmmu_v5_fault_names[itype];
++ +++++        fault->addr = readl(data->sfrbase + addr_reg);
++ +++++
++ +++++        return 0;
++ +++++}
++ +++++
++ +++++static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data,
++ +++++                                           unsigned int itype,
++ +++++                                           struct sysmmu_fault *fault)
++ +++++{
++ +++++        u32 info = readl(SYSMMU_REG(data, fault_info));
++ +++++
++ +++++        fault->addr = readl(SYSMMU_REG(data, fault_va));
++ +++++        fault->name = sysmmu_v7_fault_names[itype % 4];
++ +++++        fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
++ +++++
++ +++++        return 0;
++ +++++}
++ +++++
        /* SysMMU v1..v3 */
        static const struct sysmmu_variant sysmmu_v1_variant = {
                .flush_all      = 0x0c,
                .pt_base        = 0x14,
                .int_status     = 0x18,
                .int_clear      = 0x1c,
++ +++++
++ +++++        .get_fault_info = exynos_sysmmu_v1_get_fault_info,
        };
        
-- -----/* SysMMU v5 and v7 (non-VM capable) */
++ +++++/* SysMMU v5 */
        static const struct sysmmu_variant sysmmu_v5_variant = {
                .pt_base        = 0x0c,
                .flush_all      = 0x10,
                .flush_end      = 0x24,
                .int_status     = 0x60,
                .int_clear      = 0x64,
-- ---- /* SysMMU v7: VM capable register set */
++ +++++
++ +++++        .get_fault_info = exynos_sysmmu_v5_get_fault_info,
       +};
       +
       -/* SysMMU v7: VM capable register set */
++ +++++/* SysMMU v7: non-VM capable register layout */
++ +++++static const struct sysmmu_variant sysmmu_v7_variant = {
++ +++++        .pt_base        = 0x0c,
++ +++++        .flush_all      = 0x10,
++ +++++        .flush_entry    = 0x14,
++ +++++        .flush_range    = 0x18,
++ +++++        .flush_start    = 0x20,
++ +++++        .flush_end      = 0x24,
++ +++++        .int_status     = 0x60,
++ +++++        .int_clear      = 0x64,
++ +++++        .fault_va       = 0x70,
++ +++++        .fault_info     = 0x78,
++ +++++
++ +++++        .get_fault_info = exynos_sysmmu_v7_get_fault_info,
++ ++++ };
++ ++++ 
++ +++++/* SysMMU v7: VM capable register layout */
        static const struct sysmmu_variant sysmmu_v7_vm_variant = {
                .pt_base        = 0x800c,
                .flush_all      = 0x8010,
                .flush_end      = 0x8024,
                .int_status     = 0x60,
                .int_clear      = 0x64,
++ +++++        .fault_va       = 0x1000,
++ +++++        .fault_info     = 0x1004,
++ +++++
++ +++++        .get_fault_info = exynos_sysmmu_v7_get_fault_info,
        };
        
        static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@@@@@@@@ -446,75 -446,75 -536,63 -446,75 -446,75 -446,75 -446,75 -446,75 +536,63 @@@@@@@@@ static void __sysmmu_get_version(struc
                        if (data->has_vcr)
                                data->variant = &sysmmu_v7_vm_variant;
                        else
-- -----                        data->variant = &sysmmu_v5_variant;
++ +++++                        data->variant = &sysmmu_v7_variant;
                }
        
                __sysmmu_disable_clocks(data);
        }
        
        static void show_fault_information(struct sysmmu_drvdata *data,
-- -----                                   const struct sysmmu_fault_info *finfo,
-- -----                                   sysmmu_iova_t fault_addr)
++ +++++                                   const struct sysmmu_fault *fault)
        {
                sysmmu_pte_t *ent;
        
-- -----        dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
-- -----                dev_name(data->master), finfo->name, fault_addr);
++ +++++        dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n",
++ +++++                dev_name(data->master),
++ +++++                fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE",
++ +++++                fault->name, fault->addr);
                dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
-- -----        ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
++ +++++        ent = section_entry(phys_to_virt(data->pgtable), fault->addr);
                dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
                if (lv1ent_page(ent)) {
-- -----                ent = page_entry(ent, fault_addr);
++ +++++                ent = page_entry(ent, fault->addr);
                        dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
                }
        }
        
        static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
        {
-- -----        /* SYSMMU is in blocked state when interrupt occurred. */
                struct sysmmu_drvdata *data = dev_id;
-- -----        const struct sysmmu_fault_info *finfo;
-- -----        unsigned int i, n, itype;
-- -----        sysmmu_iova_t fault_addr;
++ +++++        unsigned int itype;
++ +++++        struct sysmmu_fault fault;
                int ret = -ENOSYS;
        
                WARN_ON(!data->active);
        
-- -----        if (MMU_MAJ_VER(data->version) < 5) {
-- -----                finfo = sysmmu_faults;
-- -----                n = ARRAY_SIZE(sysmmu_faults);
-- -----        } else {
-- -----                finfo = sysmmu_v5_faults;
-- -----                n = ARRAY_SIZE(sysmmu_v5_faults);
-- -----        }
-- -----
                spin_lock(&data->lock);
-- -----
                clk_enable(data->clk_master);
        
                itype = __ffs(readl(SYSMMU_REG(data, int_status)));
-- -----        for (i = 0; i < n; i++, finfo++)
-- -----                if (finfo->bit == itype)
-- -----                        break;
-- -----        /* unknown/unsupported fault */
-- -----        BUG_ON(i == n);
-- -----
-- -----        /* print debug message */
-- -----        fault_addr = readl(data->sfrbase + finfo->addr_reg);
-- -----        show_fault_information(data, finfo, fault_addr);
-- -----
-- -----        if (data->domain)
-- -----                ret = report_iommu_fault(&data->domain->domain,
-- -----                                        data->master, fault_addr, finfo->type);
-- -----        /* fault is not recovered by fault handler */
-- -----        BUG_ON(ret != 0);
++ +++++        ret = data->variant->get_fault_info(data, itype, &fault);
++ +++++        if (ret) {
++ +++++                dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype);
++ +++++                goto out;
++ +++++        }
++ +++++        show_fault_information(data, &fault);
        
++ +++++        if (data->domain) {
++ +++++                ret = report_iommu_fault(&data->domain->domain, data->master,
++ +++++                                         fault.addr, fault.type);
++ +++++        }
++ +++++        if (ret)
++ +++++                panic("Unrecoverable System MMU Fault!");
++ +++++
++ +++++out:
                writel(1 << itype, SYSMMU_REG(data, int_clear));
        
++ +++++        /* SysMMU is in blocked state when interrupt occurred */
                sysmmu_unblock(data);
-- -----
                clk_disable(data->clk_master);
-- -----
                spin_unlock(&data->lock);
        
                return IRQ_HANDLED;
@@@@@@@@@ -1346,8 -1346,8 -1424,8 -1346,8 -1346,8 -1346,8 -1346,8 -1346,10 +1424,10 @@@@@@@@@ static void exynos_iommu_release_device
                        struct iommu_group *group = iommu_group_get(dev);
        
                        if (group) {
+++++++ #ifndef CONFIG_ARM
                                WARN_ON(owner->domain !=
                                        iommu_group_default_domain(group));
+++++++ #endif
                                exynos_iommu_detach_device(owner->domain, dev);
                                iommu_group_put(group);
                        }
@@@@@@@@@ -1398,13 -1398,13 -1476,13 -1398,13 -1398,13 -1398,13 -1398,13 -1400,15 +1478,15 @@@@@@@@@ static int exynos_iommu_of_xlate(struc
        static const struct iommu_ops exynos_iommu_ops = {
                .domain_alloc = exynos_iommu_domain_alloc,
                .device_group = generic_device_group,
+++++++ #ifdef CONFIG_ARM
+++++++         .set_platform_dma_ops = exynos_iommu_release_device,
+++++++ #endif
                .probe_device = exynos_iommu_probe_device,
                .release_device = exynos_iommu_release_device,
                .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
                .of_xlate = exynos_iommu_of_xlate,
                .default_domain_ops = &(const struct iommu_domain_ops) {
                        .attach_dev     = exynos_iommu_attach_device,
-------                 .detach_dev     = exynos_iommu_detach_device,
                        .map            = exynos_iommu_map,
                        .unmap          = exynos_iommu_unmap,
                        .iova_to_phys   = exynos_iommu_iova_to_phys,
@@@@@@@@@ -1446,7 -1446,7 -1524,7 -1446,7 -1446,7 -1446,7 -1446,7 -1450,7 +1528,7 @@@@@@@@@ static int __init exynos_iommu_init(voi
        
                return 0;
        err_reg_driver:
-- -----        platform_driver_unregister(&exynos_sysmmu_driver);
++ +++++        kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
        err_zero_lv2:
                kmem_cache_destroy(lv2table_kmem_cache);
                return ret;
index 59df7e42fd533cae18444d83e5da9d0083c7a8aa,59df7e42fd533cae18444d83e5da9d0083c7a8aa,59df7e42fd533cae18444d83e5da9d0083c7a8aa,59df7e42fd533cae18444d83e5da9d0083c7a8aa,59df7e42fd533cae18444d83e5da9d0083c7a8aa,34b45ae990254d16b3970cee1c2c11a93accda95,59df7e42fd533cae18444d83e5da9d0083c7a8aa,a1a66798e1f06ce17c24d1826e9513f80cdf2c64..86ee06ac058b735689abbc1311432eae66363fdc
        #include <linux/crash_dump.h>
        #include <linux/dma-direct.h>
        #include <linux/dmi.h>
----- --#include <linux/intel-svm.h>
        #include <linux/memory.h>
        #include <linux/pci.h>
        #include <linux/pci-ats.h>
        #include "../iommu-sva.h"
        #include "pasid.h"
        #include "cap_audit.h"
+++++ ++#include "perfmon.h"
        
        #define ROOT_SIZE               VTD_PAGE_SIZE
        #define CONTEXT_SIZE            VTD_PAGE_SIZE
@@@@@@@@@ -362,12 -362,12 -362,12 -362,12 -362,12 -362,12 -362,12 -362,12 +362,12 @@@@@@@@@ static int __init intel_iommu_setup(cha
        }
        __setup("intel_iommu=", intel_iommu_setup);
        
------- void *alloc_pgtable_page(int node)
+++++++ void *alloc_pgtable_page(int node, gfp_t gfp)
        {
                struct page *page;
                void *vaddr = NULL;
        
-------         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+++++++         page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
                if (page)
                        vaddr = page_address(page);
                return vaddr;
@@@@@@@@@ -612,7 -612,7 -612,7 -612,7 -612,7 -612,7 -612,7 -612,7 +612,7 @@@@@@@@@ struct context_entry *iommu_context_add
                        if (!alloc)
                                return NULL;
        
-------                 context = alloc_pgtable_page(iommu->node);
+++++++                 context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
                        if (!context)
                                return NULL;
        
@@@@@@@@@ -908,7 -908,7 -908,7 -908,7 -908,7 -908,7 -908,7 -908,8 +908,8 @@@@@@@@@ pgtable_walk
        #endif
        
        static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
-------                                       unsigned long pfn, int *target_level)
+++++++                                       unsigned long pfn, int *target_level,
+++++++                                       gfp_t gfp)
        {
                struct dma_pte *parent, *pte;
                int level = agaw_to_level(domain->agaw);
                        if (!dma_pte_present(pte)) {
                                uint64_t pteval;
        
-------                         tmp_page = alloc_pgtable_page(domain->nid);
+++++++                         tmp_page = alloc_pgtable_page(domain->nid, gfp);
        
                                if (!tmp_page)
                                        return NULL;
@@@@@@@@@ -1186,7 -1186,7 -1186,7 -1186,7 -1186,7 -1186,7 -1186,7 -1187,7 +1187,7 @@@@@@@@@ static int iommu_alloc_root_entry(struc
        {
                struct root_entry *root;
        
-------         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
+++++++         root = (struct root_entry *)alloc_pgtable_page(iommu->node, GFP_ATOMIC);
                if (!root) {
                        pr_err("Allocating root entry for %s failed\n",
                                iommu->name);
@@@@@@@@@ -2150,7 -2150,7 -2150,7 -2150,7 -2150,7 -2150,7 -2150,7 -2151,8 +2151,8 @@@@@@@@@ static void switch_to_super_page(struc
        
                while (start_pfn <= end_pfn) {
                        if (!pte)
-------                         pte = pfn_to_dma_pte(domain, start_pfn, &level);
+++++++                         pte = pfn_to_dma_pte(domain, start_pfn, &level,
+++++++                                              GFP_ATOMIC);
        
                        if (dma_pte_present(pte)) {
                                dma_pte_free_pagetable(domain, start_pfn,
        
        static int
        __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
-------                  unsigned long phys_pfn, unsigned long nr_pages, int prot)
+++++++                  unsigned long phys_pfn, unsigned long nr_pages, int prot,
+++++++                  gfp_t gfp)
        {
                struct dma_pte *first_pte = NULL, *pte = NULL;
                unsigned int largepage_lvl = 0;
                                largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
                                                phys_pfn, nr_pages);
        
-------                         pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+++++++                         pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl,
+++++++                                              gfp);
                                if (!pte)
                                        return -ENOMEM;
                                first_pte = pte;
@@@@@@@@@ -2368,7 -2368,7 -2368,7 -2368,7 -2368,7 -2368,7 -2368,7 -2372,7 +2372,7 @@@@@@@@@ static int iommu_domain_identity_map(st
        
                return __domain_mapping(domain, first_vpfn,
                                        first_vpfn, last_vpfn - first_vpfn + 1,
-------                                 DMA_PTE_READ|DMA_PTE_WRITE);
+++++++                                 DMA_PTE_READ|DMA_PTE_WRITE, GFP_KERNEL);
        }
        
        static int md_domain_init(struct dmar_domain *domain, int guest_width);
@@@@@@@@@ -2676,7 -2676,7 -2676,7 -2676,7 -2676,7 -2676,7 -2676,7 -2680,7 +2680,7 @@@@@@@@@ static int copy_context_table(struct in
                                if (!old_ce)
                                        goto out;
        
-------                         new_ce = alloc_pgtable_page(iommu->node);
+++++++                         new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
                                if (!new_ce)
                                        goto out_unmap;
        
@@@@@@@@@ -4005,7 -4005,7 -4005,7 -4005,7 -4005,7 -4005,8 -4005,7 -4009,7 +4009,8 @@@@@@@@@ int __init intel_iommu_init(void
                         * is likely to be much lower than the overhead of synchronizing
                         * the virtual and physical IOMMU page-tables.
                         */
----- --                if (cap_caching_mode(iommu->cap)) {
+++++ ++                if (cap_caching_mode(iommu->cap) &&
+++++ ++                    !first_level_by_default(IOMMU_DOMAIN_DMA)) {
                                pr_info_once("IOMMU batching disallowed due to virtualization\n");
                                iommu_set_dma_strict();
                        }
                                               intel_iommu_groups,
                                               "%s", iommu->name);
                        iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+++++ ++
+++++ ++                iommu_pmu_register(iommu);
                }
                up_read(&dmar_global_lock);
        
@@@@@@@@@ -4136,7 -4136,7 -4136,7 -4136,7 -4136,7 -4139,7 -4136,7 -4140,7 +4143,7 @@@@@@@@@ static int md_domain_init(struct dmar_d
                domain->max_addr = 0;
        
                /* always allocate the top pgd */
-------         domain->pgd = alloc_pgtable_page(domain->nid);
+++++++         domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
                if (!domain->pgd)
                        return -ENOMEM;
                domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
@@@@@@@@@ -4298,7 -4298,7 -4298,7 -4298,7 -4298,7 -4301,7 -4298,7 -4302,7 +4305,7 @@@@@@@@@ static int intel_iommu_map(struct iommu
                   the low bits of hpa would take us onto the next page */
                size = aligned_nrpages(hpa, size);
                return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
-------                                 hpa >> VTD_PAGE_SHIFT, size, prot);
+++++++                                 hpa >> VTD_PAGE_SHIFT, size, prot, gfp);
        }
        
        static int intel_iommu_map_pages(struct iommu_domain *domain,
@@@@@@@@@ -4333,7 -4333,7 -4333,7 -4333,7 -4333,7 -4336,7 -4333,7 -4337,8 +4340,8 @@@@@@@@@ static size_t intel_iommu_unmap(struct 
        
                /* Cope with horrid API which requires us to unmap more than the
                   size argument if it happens to be a large-page mapping. */
-------         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+++++++         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
+++++++                                GFP_ATOMIC));
        
                if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
                        size = VTD_PAGE_SIZE << level_to_offset_bits(level);
                if (dmar_domain->max_addr == iova + size)
                        dmar_domain->max_addr = iova;
        
----- --        iommu_iotlb_gather_add_page(domain, gather, iova, size);
+++++ ++        /*
+++++ ++         * We do not use page-selective IOTLB invalidation in flush queue,
+++++ ++         * so there is no need to track page and sync iotlb.
+++++ ++         */
+++++ ++        if (!iommu_iotlb_gather_queued(gather))
+++++ ++                iommu_iotlb_gather_add_page(domain, gather, iova, size);
        
                return size;
        }
@@@@@@@@@ -4392,7 -4392,7 -4392,7 -4392,7 -4392,7 -4400,7 -4392,7 -4397,8 +4405,8 @@@@@@@@@ static phys_addr_t intel_iommu_iova_to_
                int level = 0;
                u64 phys = 0;
        
-------         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
+++++++         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
+++++++                              GFP_ATOMIC);
                if (pte && dma_pte_present(pte))
                        phys = dma_pte_addr(pte) +
                                (iova & (BIT_MASK(level_to_offset_bits(level) +
@@@@@@@@@ -4642,8 -4642,8 -4642,8 -4642,8 -4642,8 -4650,12 -4642,8 -4648,8 +4656,12 @@@@@@@@@ static int intel_iommu_enable_sva(struc
                        return -EINVAL;
        
                ret = iopf_queue_add_device(iommu->iopf_queue, dev);
----- --        if (!ret)
----- --                ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+++++ ++        if (ret)
+++++ ++                return ret;
+++++ ++
+++++ ++        ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+++++ ++        if (ret)
+++++ ++                iopf_queue_remove_device(iommu->iopf_queue, dev);
        
                return ret;
        }
@@@@@@@@@ -4655,8 -4655,8 -4655,8 -4655,8 -4655,8 -4667,12 -4655,8 -4661,8 +4673,12 @@@@@@@@@ static int intel_iommu_disable_sva(stru
                int ret;
        
                ret = iommu_unregister_device_fault_handler(dev);
----- --        if (!ret)
----- --                ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
+++++ ++        if (ret)
+++++ ++                return ret;
+++++ ++
+++++ ++        ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
+++++ ++        if (ret)
+++++ ++                iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
        
                return ret;
        }
@@@@@@@@@ -5023,3 -5023,3 -5023,3 -5023,3 -5023,3 -5039,59 -5023,3 -5029,3 +5045,59 @@@@@@@@@ void quirk_extra_dev_tlb_flush(struct d
                                                 pasid, qdep, address, mask);
                }
        }
+++++ ++
+++++ ++#define ecmd_get_status_code(res)       (((res) & 0xff) >> 1)
+++++ ++
+++++ ++/*
+++++ ++ * Function to submit a command to the enhanced command interface. The
+++++ ++ * valid enhanced command descriptions are defined in Table 47 of the
+++++ ++ * VT-d spec. The VT-d hardware implementation may support some but not
+++++ ++ * all commands, which can be determined by checking the Enhanced
+++++ ++ * Command Capability Register.
+++++ ++ *
+++++ ++ * Return values:
+++++ ++ *  - 0: Command successful without any error;
+++++ ++ *  - Negative: software error value;
+++++ ++ *  - Nonzero positive: failure status code defined in Table 48.
+++++ ++ */
+++++ ++int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
+++++ ++{
+++++ ++        unsigned long flags;
+++++ ++        u64 res;
+++++ ++        int ret;
+++++ ++
+++++ ++        if (!cap_ecmds(iommu->cap))
+++++ ++                return -ENODEV;
+++++ ++
+++++ ++        raw_spin_lock_irqsave(&iommu->register_lock, flags);
+++++ ++
+++++ ++        res = dmar_readq(iommu->reg + DMAR_ECRSP_REG);
+++++ ++        if (res & DMA_ECMD_ECRSP_IP) {
+++++ ++                ret = -EBUSY;
+++++ ++                goto err;
+++++ ++        }
+++++ ++
+++++ ++        /*
+++++ ++         * Unconditionally write the operand B, because
+++++ ++         * - There is no side effect if an ecmd doesn't require an
+++++ ++         *   operand B, but we set the register to some value.
+++++ ++         * - It's not invoked in any critical path. The extra MMIO
+++++ ++         *   write doesn't bring any performance concerns.
+++++ ++         */
+++++ ++        dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob);
+++++ ++        dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT));
+++++ ++
+++++ ++        IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq,
+++++ ++                      !(res & DMA_ECMD_ECRSP_IP), res);
+++++ ++
+++++ ++        if (res & DMA_ECMD_ECRSP_IP) {
+++++ ++                ret = -ETIMEDOUT;
+++++ ++                goto err;
+++++ ++        }
+++++ ++
+++++ ++        ret = ecmd_get_status_code(res);
+++++ ++err:
+++++ ++        raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+++++ ++
+++++ ++        return ret;
+++++ ++}
index 06e61e4748567a69cb891e07a7e077eee9dad2e1,06e61e4748567a69cb891e07a7e077eee9dad2e1,06e61e4748567a69cb891e07a7e077eee9dad2e1,06e61e4748567a69cb891e07a7e077eee9dad2e1,06e61e4748567a69cb891e07a7e077eee9dad2e1,d7c61eb20f6fd2fb690187abb49f637024bc5126,06e61e4748567a69cb891e07a7e077eee9dad2e1,ca9a035e0110af02f54c02bf2c616568e75c094e..d6df3b8658129a39721b4a011c6272b5ca574b38
        #include <linux/ioasid.h>
        #include <linux/bitfield.h>
        #include <linux/xarray.h>
+++++ ++#include <linux/perf_event.h>
        
        #include <asm/cacheflush.h>
        #include <asm/iommu.h>
        #define DMAR_MTRR_PHYSMASK8_REG 0x208
        #define DMAR_MTRR_PHYSBASE9_REG 0x210
        #define DMAR_MTRR_PHYSMASK9_REG 0x218
+++++ ++#define DMAR_PERFCAP_REG        0x300
+++++ ++#define DMAR_PERFCFGOFF_REG     0x310
+++++ ++#define DMAR_PERFOVFOFF_REG     0x318
+++++ ++#define DMAR_PERFCNTROFF_REG    0x31c
+++++ ++#define DMAR_PERFINTRSTS_REG    0x324
+++++ ++#define DMAR_PERFINTRCTL_REG    0x328
+++++ ++#define DMAR_PERFEVNTCAP_REG    0x380
+++++ ++#define DMAR_ECMD_REG           0x400
+++++ ++#define DMAR_ECEO_REG           0x408
+++++ ++#define DMAR_ECRSP_REG          0x410
+++++ ++#define DMAR_ECCAP_REG          0x430
        #define DMAR_VCCAP_REG          0xe30 /* Virtual command capability register */
        #define DMAR_VCMD_REG           0xe00 /* Virtual command register */
        #define DMAR_VCRSP_REG          0xe10 /* Virtual command response register */
         */
        #define cap_esrtps(c)           (((c) >> 63) & 1)
        #define cap_esirtps(c)          (((c) >> 62) & 1)
+++++ ++#define cap_ecmds(c)            (((c) >> 61) & 1)
        #define cap_fl5lp_support(c)    (((c) >> 60) & 1)
        #define cap_pi_support(c)       (((c) >> 59) & 1)
        #define cap_fl1gp_support(c)    (((c) >> 56) & 1)
         * Extended Capability Register
         */
        
----- --#define ecap_rps(e)             (((e) >> 49) & 0x1)
+++++ ++#define ecap_pms(e)             (((e) >> 51) & 0x1)
+++++ ++#define ecap_rps(e)             (((e) >> 49) & 0x1)
        #define ecap_smpwc(e)           (((e) >> 48) & 0x1)
        #define ecap_flts(e)            (((e) >> 47) & 0x1)
        #define ecap_slts(e)            (((e) >> 46) & 0x1)
        #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
        #define ecap_sc_support(e)      (((e) >> 7) & 0x1) /* Snooping Control */
        
+++++ ++/*
+++++ ++ * Decoding Perf Capability Register
+++++ ++ */
+++++ ++#define pcap_num_cntr(p)        ((p) & 0xffff)
+++++ ++#define pcap_cntr_width(p)      (((p) >> 16) & 0x7f)
+++++ ++#define pcap_num_event_group(p) (((p) >> 24) & 0x1f)
+++++ ++#define pcap_filters_mask(p)    (((p) >> 32) & 0x1f)
+++++ ++#define pcap_interrupt(p)       (((p) >> 50) & 0x1)
+++++ ++/* The counter stride is calculated as 2 ^ (x+10) bytes */
+++++ ++#define pcap_cntr_stride(p)     (1ULL << ((((p) >> 52) & 0x7) + 10))
+++++ ++
+++++ ++/*
+++++ ++ * Decoding Perf Event Capability Register
+++++ ++ */
+++++ ++#define pecap_es(p)             ((p) & 0xfffffff)
+++++ ++
        /* Virtual command interface capability */
        #define vccap_pasid(v)          (((v) & DMA_VCS_PAS)) /* PASID allocation */
        
        #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
        #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
        
+++++ ++/* ECMD_REG */
+++++ ++#define DMA_MAX_NUM_ECMD                256
+++++ ++#define DMA_MAX_NUM_ECMDCAP             (DMA_MAX_NUM_ECMD / 64)
+++++ ++#define DMA_ECMD_REG_STEP               8
+++++ ++#define DMA_ECMD_ENABLE                 0xf0
+++++ ++#define DMA_ECMD_DISABLE                0xf1
+++++ ++#define DMA_ECMD_FREEZE                 0xf4
+++++ ++#define DMA_ECMD_UNFREEZE               0xf5
+++++ ++#define DMA_ECMD_OA_SHIFT               16
+++++ ++#define DMA_ECMD_ECRSP_IP               0x1
+++++ ++#define DMA_ECMD_ECCAP3                 3
+++++ ++#define DMA_ECMD_ECCAP3_ECNTS           BIT_ULL(48)
+++++ ++#define DMA_ECMD_ECCAP3_DCNTS           BIT_ULL(49)
+++++ ++#define DMA_ECMD_ECCAP3_FCNTS           BIT_ULL(52)
+++++ ++#define DMA_ECMD_ECCAP3_UFCNTS          BIT_ULL(53)
+++++ ++#define DMA_ECMD_ECCAP3_ESSENTIAL       (DMA_ECMD_ECCAP3_ECNTS |        \
+++++ ++                                         DMA_ECMD_ECCAP3_DCNTS |        \
+++++ ++                                         DMA_ECMD_ECCAP3_FCNTS |        \
+++++ ++                                         DMA_ECMD_ECCAP3_UFCNTS)
+++++ ++
        /* FECTL_REG */
        #define DMA_FECTL_IM (((u32)1) << 31)
        
        
        #define DMA_VCS_PAS     ((u64)1)
        
+++++ ++/* PERFINTRSTS_REG */
+++++ ++#define DMA_PERFINTRSTS_PIS     ((u32)1)
+++++ ++
        #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)                     \
        do {                                                                    \
                cycles_t start_time = get_cycles();                             \
@@@@@@@@@ -438,6 -438,6 -438,6 -438,6 -438,6 -491,11 -438,6 -438,6 +491,11 @@@@@@@@@ struct q_inval 
                int             free_cnt;
        };
        
+++++ ++/* Page Request Queue depth */
+++++ ++#define PRQ_ORDER       4
+++++ ++#define PRQ_RING_MASK   ((0x1000 << PRQ_ORDER) - 0x20)
+++++ ++#define PRQ_DEPTH       ((0x1000 << PRQ_ORDER) >> 5)
+++++ ++
        struct dmar_pci_notify_info;
        
        #ifdef CONFIG_IRQ_REMAP
@@@@@@@@@ -554,6 -554,6 -554,6 -554,6 -554,6 -612,40 -554,6 -554,6 +612,40 @@@@@@@@@ struct dmar_domain 
                                                   iommu core */
        };
        
+++++ ++/*
+++++ ++ * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters.
+++++ ++ * But in practice, there are only 14 counters for the existing
+++++ ++ * platform. Setting the max number of counters to 64 should be good
+++++ ++ * enough for a long time. Also, supporting more than 64 counters
+++++ ++ * requires more extras, e.g., extra freeze and overflow registers,
+++++ ++ * which is not necessary for now.
+++++ ++ */
+++++ ++#define IOMMU_PMU_IDX_MAX               64
+++++ ++
+++++ ++struct iommu_pmu {
+++++ ++        struct intel_iommu      *iommu;
+++++ ++        u32                     num_cntr;       /* Number of counters */
+++++ ++        u32                     num_eg;         /* Number of event group */
+++++ ++        u32                     cntr_width;     /* Counter width */
+++++ ++        u32                     cntr_stride;    /* Counter Stride */
+++++ ++        u32                     filter;         /* Bitmask of filter support */
+++++ ++        void __iomem            *base;          /* the PerfMon base address */
+++++ ++        void __iomem            *cfg_reg;       /* counter configuration base address */
+++++ ++        void __iomem            *cntr_reg;      /* counter 0 address*/
+++++ ++        void __iomem            *overflow;      /* overflow status register */
+++++ ++
+++++ ++        u64                     *evcap;         /* Indicates all supported events */
+++++ ++        u32                     **cntr_evcap;   /* Supported events of each counter. */
+++++ ++
+++++ ++        struct pmu              pmu;
+++++ ++        DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
+++++ ++        struct perf_event       *event_list[IOMMU_PMU_IDX_MAX];
+++++ ++        unsigned char           irq_name[16];
+++++ ++};
+++++ ++
+++++ ++#define IOMMU_IRQ_ID_OFFSET_PRQ         (DMAR_UNITS_SUPPORTED)
+++++ ++#define IOMMU_IRQ_ID_OFFSET_PERF        (2 * DMAR_UNITS_SUPPORTED)
+++++ ++
        struct intel_iommu {
                void __iomem    *reg; /* Pointer to hardware regs, virtual addr */
                u64             reg_phys; /* physical address of hw register set */
                u64             cap;
                u64             ecap;
                u64             vccap;
+++++ ++        u64             ecmdcap[DMA_MAX_NUM_ECMDCAP];
                u32             gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
                raw_spinlock_t  register_lock; /* protect register handling */
                int             seq_id; /* sequence id of the iommu */
                int             agaw; /* agaw of this iommu */
                int             msagaw; /* max sagaw of this iommu */
----- --        unsigned int    irq, pr_irq;
+++++ ++        unsigned int    irq, pr_irq, perf_irq;
                u16             segment;     /* PCI segment# */
                unsigned char   name[13];    /* Device Name */
        
        
                struct dmar_drhd_unit *drhd;
                void *perf_statistic;
+++++ ++
+++++ ++        struct iommu_pmu *pmu;
        };
        
        /* PCI domain-device relationship */
@@@@@@@@@ -737,7 -737,7 -737,7 -737,7 -737,7 -832,7 -737,7 -737,7 +832,7 @@@@@@@@@ int qi_submit_sync(struct intel_iommu *
        
        extern int dmar_ir_support(void);
        
------- void *alloc_pgtable_page(int node);
+++++++ void *alloc_pgtable_page(int node, gfp_t gfp);
        void free_pgtable_page(void *vaddr);
        void iommu_flush_write_buffer(struct intel_iommu *iommu);
        struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
@@@@@@@@@ -756,19 -756,19 -756,19 -756,19 -756,19 -851,13 -756,19 -756,19 +851,13 @@@@@@@@@ struct intel_svm_dev 
                struct rcu_head rcu;
                struct device *dev;
                struct intel_iommu *iommu;
----- --        struct iommu_sva sva;
----- --        u32 pasid;
----- --        int users;
                u16 did;
----- --        u16 dev_iotlb:1;
                u16 sid, qdep;
        };
        
        struct intel_svm {
                struct mmu_notifier notifier;
                struct mm_struct *mm;
----- --
----- --        unsigned int flags;
                u32 pasid;
                struct list_head devs;
        };
@@@@@@@@@ -800,6 -800,6 -800,6 -800,6 -800,6 -889,14 -800,6 -800,6 +889,14 @@@@@@@@@ extern const struct iommu_ops intel_iom
        extern int intel_iommu_sm;
        extern int iommu_calculate_agaw(struct intel_iommu *iommu);
        extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+++++ ++int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob);
+++++ ++
+++++ ++static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu)
+++++ ++{
+++++ ++        return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) ==
+++++ ++                DMA_ECMD_ECCAP3_ESSENTIAL;
+++++ ++}
+++++ ++
        extern int dmar_disabled;
        extern int intel_iommu_enabled;
        #else
index fb3c7020028d07cc4e8b598f25b4b4f185c67376,fb3c7020028d07cc4e8b598f25b4b4f185c67376,fb3c7020028d07cc4e8b598f25b4b4f185c67376,fb3c7020028d07cc4e8b598f25b4b4f185c67376,fb3c7020028d07cc4e8b598f25b4b4f185c67376,9d2f05cf61648da53119778d95d073afb41cc57d,fb3c7020028d07cc4e8b598f25b4b4f185c67376,c5bf74e9372d62b04320204e9494559474ee76eb..633e0a4a01e7061a11c959b7c435a7f49f9314a0
@@@@@@@@@ -128,6 -128,6 -128,6 -128,6 -128,6 -128,9 -128,6 -128,6 +128,9 @@@@@@@@@ int intel_pasid_alloc_table(struct devi
                pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
                info->pasid_table = pasid_table;
        
+++++ ++        if (!ecap_coherent(info->iommu->ecap))
+++++ ++                clflush_cache_range(pasid_table->table, size);
+++++ ++
                return 0;
        }
        
@@@@@@@@@ -200,7 -200,7 -200,7 -200,7 -200,7 -203,7 -200,7 -200,7 +203,7 @@@@@@@@@ static struct pasid_entry *intel_pasid_
        retry:
                entries = get_pasid_table_from_pde(&dir[dir_index]);
                if (!entries) {
-------                 entries = alloc_pgtable_page(info->iommu->node);
+++++++                 entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
                        if (!entries)
                                return NULL;
        
                                free_pgtable_page(entries);
                                goto retry;
                        }
+++++ ++                if (!ecap_coherent(info->iommu->ecap)) {
+++++ ++                        clflush_cache_range(entries, VTD_PAGE_SIZE);
+++++ ++                        clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
+++++ ++                }
                }
        
                return &entries[index];
@@@@@@@@@ -364,6 -364,6 -364,6 -364,6 -364,6 -371,16 -364,6 -364,6 +371,16 @@@@@@@@@ static inline void pasid_set_page_snoop
                pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
        }
        
+++++ ++/*
+++++ ++ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
+++++ ++ * entry. It is required when XD bit of the first level page table
+++++ ++ * entry is about to be set.
+++++ ++ */
+++++ ++static inline void pasid_set_nxe(struct pasid_entry *pe)
+++++ ++{
+++++ ++        pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
+++++ ++}
+++++ ++
        /*
         * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
         * PASID entry.
@@@@@@@@@ -557,6 -557,6 -557,6 -557,6 -557,6 -574,7 -557,6 -557,6 +574,7 @@@@@@@@@ int intel_pasid_setup_first_level(struc
                pasid_set_domain_id(pte, did);
                pasid_set_address_width(pte, iommu->agaw);
                pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+++++ ++        pasid_set_nxe(pte);
        
                /* Setup Present and PASID Granular Transfer Type: */
                pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
diff --combined drivers/iommu/iommu.c
index 5f6a85aea501ecc8c2a390cc63f5cb232cbe6e95,de91dd88705bd3dc108ac4d68ed52445d7faa9ff,de91dd88705bd3dc108ac4d68ed52445d7faa9ff,de91dd88705bd3dc108ac4d68ed52445d7faa9ff,5f6a85aea501ecc8c2a390cc63f5cb232cbe6e95,de91dd88705bd3dc108ac4d68ed52445d7faa9ff,de91dd88705bd3dc108ac4d68ed52445d7faa9ff,1a0e41c8839b65877ec9b88359023607864e5b39..b3f847b25b4f34101ffdb3456b0ae22bd2d64c38
@@@@@@@@@ -371,6 -371,6 -371,6 -371,6 -371,6 -371,6 -371,6 -371,30 +371,30 @@@@@@@@@ err_unlock
                return ret;
        }
        
+++++++ static bool iommu_is_attach_deferred(struct device *dev)
+++++++ {
+++++++         const struct iommu_ops *ops = dev_iommu_ops(dev);
+++++++ 
+++++++         if (ops->is_attach_deferred)
+++++++                 return ops->is_attach_deferred(dev);
+++++++ 
+++++++         return false;
+++++++ }
+++++++ 
+++++++ static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
+++++++ {
+++++++         struct iommu_domain *domain = data;
+++++++ 
+++++++         lockdep_assert_held(&dev->iommu_group->mutex);
+++++++ 
+++++++         if (iommu_is_attach_deferred(dev)) {
+++++++                 dev->iommu->attach_deferred = 1;
+++++++                 return 0;
+++++++         }
+++++++ 
+++++++         return __iommu_attach_device(domain, dev);
+++++++ }
+++++++ 
        int iommu_probe_device(struct device *dev)
        {
                const struct iommu_ops *ops;
                 * attach the default domain.
                 */
                if (group->default_domain && !group->owner) {
-------                 ret = __iommu_attach_device(group->default_domain, dev);
+++++++                 ret = iommu_group_do_dma_first_attach(dev, group->default_domain);
                        if (ret) {
                                mutex_unlock(&group->mutex);
                                iommu_group_put(group);
@@@@@@@@@ -774,12 -774,12 -774,12 -774,12 -774,12 -774,12 -774,12 -798,16 +798,16 @@@@@@@@@ struct iommu_group *iommu_group_alloc(v
        
                ret = iommu_group_create_file(group,
                                              &iommu_group_attr_reserved_regions);
-------         if (ret)
+++++++         if (ret) {
+++++++                 kobject_put(group->devices_kobj);
                        return ERR_PTR(ret);
+++++++         }
        
                ret = iommu_group_create_file(group, &iommu_group_attr_type);
-------         if (ret)
+++++++         if (ret) {
+++++++                 kobject_put(group->devices_kobj);
                        return ERR_PTR(ret);
+++++++         }
        
                pr_debug("Allocated group %d\n", group->id);
        
@@@@@@@@@ -930,7 -930,7 -930,7 -930,7 -930,7 -930,7 -930,7 -958,7 +958,7 @@@@@@@@@ map_end
                                if (map_size) {
                                        ret = iommu_map(domain, addr - map_size,
                                                        addr - map_size, map_size,
-------                                                 entry->prot);
+++++++                                                 entry->prot, GFP_KERNEL);
                                        if (ret)
                                                goto out;
                                        map_size = 0;
                return ret;
        }
        
------- static bool iommu_is_attach_deferred(struct device *dev)
------- {
-------         const struct iommu_ops *ops = dev_iommu_ops(dev);
------- 
-------         if (ops->is_attach_deferred)
-------                 return ops->is_attach_deferred(dev);
------- 
-------         return false;
------- }
------- 
        /**
         * iommu_group_add_device - add a device to an iommu group
         * @group: the group into which to add the device (reference should be held)
        
                mutex_lock(&group->mutex);
                list_add_tail(&device->list, &group->devices);
-------         if (group->domain  && !iommu_is_attach_deferred(dev))
-------                 ret = __iommu_attach_device(group->domain, dev);
+++++++         if (group->domain)
+++++++                 ret = iommu_group_do_dma_first_attach(dev, group->domain);
                mutex_unlock(&group->mutex);
                if (ret)
                        goto err_put_group;
@@@@@@@@@ -1776,21 -1776,21 -1776,21 -1776,21 -1776,21 -1776,21 -1776,21 -1794,10 +1794,10 @@@@@@@@@ static void probe_alloc_default_domain(
        
        }
        
------- static int iommu_group_do_dma_attach(struct device *dev, void *data)
------- {
-------         struct iommu_domain *domain = data;
-------         int ret = 0;
------- 
-------         if (!iommu_is_attach_deferred(dev))
-------                 ret = __iommu_attach_device(domain, dev);
------- 
-------         return ret;
------- }
------- 
------- static int __iommu_group_dma_attach(struct iommu_group *group)
+++++++ static int __iommu_group_dma_first_attach(struct iommu_group *group)
        {
                return __iommu_group_for_each_dev(group, group->default_domain,
-------                                           iommu_group_do_dma_attach);
+++++++                                           iommu_group_do_dma_first_attach);
        }
        
        static int iommu_group_do_probe_finalize(struct device *dev, void *data)
@@@@@@@@@ -1855,7 -1855,7 -1855,7 -1855,7 -1855,7 -1855,7 -1855,7 -1862,7 +1862,7 @@@@@@@@@ int bus_iommu_probe(struct bus_type *bu
        
                        iommu_group_create_direct_mappings(group);
        
-------                 ret = __iommu_group_dma_attach(group);
+++++++                 ret = __iommu_group_dma_first_attach(group);
        
                        mutex_unlock(&group->mutex);
        
@@@@@@@@@ -1987,9 -1987,9 -1987,9 -1987,9 -1987,9 -1987,9 -1987,9 -1994,11 +1994,11 @@@@@@@@@ static int __iommu_attach_device(struc
                        return -ENODEV;
        
                ret = domain->ops->attach_dev(domain, dev);
-------         if (!ret)
-------                 trace_attach_device_to_domain(dev);
-------         return ret;
+++++++         if (ret)
+++++++                 return ret;
+++++++         dev->iommu->attach_deferred = 0;
+++++++         trace_attach_device_to_domain(dev);
+++++++         return 0;
        }
        
        /**
@@@@@@@@@ -2034,22 -2034,22 -2034,22 -2034,22 -2034,22 -2034,22 -2034,22 -2043,12 +2043,12 @@@@@@@@@ EXPORT_SYMBOL_GPL(iommu_attach_device)
        
        int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
        {
-------         if (iommu_is_attach_deferred(dev))
+++++++         if (dev->iommu && dev->iommu->attach_deferred)
                        return __iommu_attach_device(domain, dev);
        
                return 0;
        }
        
------- static void __iommu_detach_device(struct iommu_domain *domain,
-------                                   struct device *dev)
------- {
-------         if (iommu_is_attach_deferred(dev))
-------                 return;
------- 
-------         domain->ops->detach_dev(domain, dev);
-------         trace_detach_device_from_domain(dev);
------- }
------- 
        void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
        {
                struct iommu_group *group;
@@@@@@@@@ -2124,8 -2124,8 -2124,8 -2124,8 -2124,8 -2124,8 -2124,8 -2123,22 +2123,22 @@@@@@@@@ static int __iommu_attach_group(struct 
        
                ret = __iommu_group_for_each_dev(group, domain,
                                                 iommu_group_do_attach_device);
-------         if (ret == 0)
+++++++         if (ret == 0) {
                        group->domain = domain;
+++++++         } else {
+++++++                 /*
+++++++                  * To recover from the case when certain device within the
+++++++                  * group fails to attach to the new domain, we need force
+++++++                  * attaching all devices back to the old domain. The old
+++++++                  * domain is compatible for all devices in the group,
+++++++                  * hence the iommu driver should always return success.
+++++++                  */
+++++++                 struct iommu_domain *old_domain = group->domain;
+++++++ 
+++++++                 group->domain = NULL;
+++++++                 WARN(__iommu_group_set_domain(group, old_domain),
+++++++                      "iommu driver failed to attach a compatible domain");
+++++++         }
        
                return ret;
        }
@@@@@@@@@ -2154,11 -2154,11 -2154,11 -2154,11 -2154,11 -2154,11 -2154,11 -2167,12 +2167,12 @@@@@@@@@ int iommu_attach_group(struct iommu_dom
        }
        EXPORT_SYMBOL_GPL(iommu_attach_group);
        
------- static int iommu_group_do_detach_device(struct device *dev, void *data)
+++++++ static int iommu_group_do_set_platform_dma(struct device *dev, void *data)
        {
-------         struct iommu_domain *domain = data;
+++++++         const struct iommu_ops *ops = dev_iommu_ops(dev);
        
-------         __iommu_detach_device(domain, dev);
+++++++         if (!WARN_ON(!ops->set_platform_dma_ops))
+++++++                 ops->set_platform_dma_ops(dev);
        
                return 0;
        }
@@@@@@@@@ -2172,15 -2172,15 -2172,15 -2172,15 -2172,15 -2172,15 -2172,15 -2186,13 +2186,13 @@@@@@@@@ static int __iommu_group_set_domain(str
                        return 0;
        
                /*
-------          * New drivers should support default domains and so the detach_dev() op
-------          * will never be called. Otherwise the NULL domain represents some
+++++++          * New drivers should support default domains, so set_platform_dma()
+++++++          * op will never be called. Otherwise the NULL domain represents some
                 * platform specific behavior.
                 */
                if (!new_domain) {
-------                 if (WARN_ON(!group->domain->ops->detach_dev))
-------                         return -EINVAL;
-------                 __iommu_group_for_each_dev(group, group->domain,
-------                                            iommu_group_do_detach_device);
+++++++                 __iommu_group_for_each_dev(group, NULL,
+++++++                                            iommu_group_do_set_platform_dma);
                        group->domain = NULL;
                        return 0;
                }
@@@@@@@@@ -2360,34 -2360,34 -2360,34 -2360,34 -2360,34 -2360,34 -2360,34 -2372,27 +2372,27 @@@@@@@@@ static int __iommu_map(struct iommu_dom
                return ret;
        }
        
------- static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
-------                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+++++++ int iommu_map(struct iommu_domain *domain, unsigned long iova,
+++++++               phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
        {
                const struct iommu_domain_ops *ops = domain->ops;
                int ret;
        
+++++++         might_sleep_if(gfpflags_allow_blocking(gfp));
+++++++ 
+++++++         /* Discourage passing strange GFP flags */
+++++++         if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+++++++                                 __GFP_HIGHMEM)))
+++++++                 return -EINVAL;
+++++++ 
                ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
                if (ret == 0 && ops->iotlb_sync_map)
                        ops->iotlb_sync_map(domain, iova, size);
        
                return ret;
        }
------- 
------- int iommu_map(struct iommu_domain *domain, unsigned long iova,
-------               phys_addr_t paddr, size_t size, int prot)
------- {
-------         might_sleep();
-------         return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
------- }
        EXPORT_SYMBOL_GPL(iommu_map);
        
------- int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
-------               phys_addr_t paddr, size_t size, int prot)
------- {
-------         return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
------- }
------- EXPORT_SYMBOL_GPL(iommu_map_atomic);
------- 
        static size_t __iommu_unmap_pages(struct iommu_domain *domain,
                                          unsigned long iova, size_t size,
                                          struct iommu_iotlb_gather *iotlb_gather)
@@@@@@@@@ -2477,9 -2477,9 -2477,9 -2477,9 -2477,9 -2477,9 -2477,9 -2482,9 +2482,9 @@@@@@@@@ size_t iommu_unmap_fast(struct iommu_do
        }
        EXPORT_SYMBOL_GPL(iommu_unmap_fast);
        
------- static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
-------                 struct scatterlist *sg, unsigned int nents, int prot,
-------                 gfp_t gfp)
+++++++ ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+++++++                      struct scatterlist *sg, unsigned int nents, int prot,
+++++++                      gfp_t gfp)
        {
                const struct iommu_domain_ops *ops = domain->ops;
                size_t len = 0, mapped = 0;
                unsigned int i = 0;
                int ret;
        
+++++++         might_sleep_if(gfpflags_allow_blocking(gfp));
+++++++ 
+++++++         /* Discourage passing strange GFP flags */
+++++++         if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+++++++                                 __GFP_HIGHMEM)))
+++++++                 return -EINVAL;
+++++++ 
                while (i <= nents) {
                        phys_addr_t s_phys = sg_phys(sg);
        
        
                return ret;
        }
------- 
------- ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
-------                      struct scatterlist *sg, unsigned int nents, int prot)
------- {
-------         might_sleep();
-------         return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
------- }
        EXPORT_SYMBOL_GPL(iommu_map_sg);
        
------- ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
-------                     struct scatterlist *sg, unsigned int nents, int prot)
------- {
-------         return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
------- }
------- 
        /**
         * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
         * @domain: the iommu domain where the fault has happened
@@@@@@@@@ -3185,16 -3185,14 -3185,14 -3185,14 -3185,16 -3185,14 -3185,14 -3184,14 +3184,16 @@@@@@@@@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma
         */
        int iommu_device_claim_dma_owner(struct device *dev, void *owner)
        {
 --- ---        struct iommu_group *group = iommu_group_get(dev);
 +++ +++        struct iommu_group *group;
                int ret = 0;
        
 --- ---        if (!group)
 --- ---                return -ENODEV;
                if (WARN_ON(!owner))
                        return -EINVAL;
        
 +++ +++        group = iommu_group_get(dev);
 +++ +++        if (!group)
 +++ +++                return -ENODEV;
 +++ +++
                mutex_lock(&group->mutex);
                if (group->owner_cnt) {
                        if (group->owner != owner) {
index ca581ff1c76964554f6846e09627961b73a9f471,69682ee068d2bd88205c00fb12f1bfeda8dfa640,69682ee068d2bd88205c00fb12f1bfeda8dfa640,69682ee068d2bd88205c00fb12f1bfeda8dfa640,ca581ff1c76964554f6846e09627961b73a9f471,69682ee068d2bd88205c00fb12f1bfeda8dfa640,69682ee068d2bd88205c00fb12f1bfeda8dfa640,78d0a84c704f3296f5aa7420fc814601d89b76f2..43e4c8f89e23e24e7f82b9fc728d0fcc0c6ce14a
@@@@@@@@@ -319,7 -319,7 -319,7 -319,7 -319,7 -319,7 -319,7 -319,7 +319,7 @@@@@@@@@ static int mtk_iommu_v1_attach_device(s
                return 0;
        }
        
------- static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
+++++++ static void mtk_iommu_v1_set_platform_dma(struct device *dev)
        {
                struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
        
@@@@@@@@@ -585,10 -585,10 -585,10 -585,10 -585,10 -585,10 -585,10 -585,10 +585,10 @@@@@@@@@ static const struct iommu_ops mtk_iommu
                .def_domain_type = mtk_iommu_v1_def_domain_type,
                .device_group   = generic_device_group,
                .pgsize_bitmap  = MT2701_IOMMU_PAGE_SIZE,
+++++++         .set_platform_dma_ops = mtk_iommu_v1_set_platform_dma,
                .owner          = THIS_MODULE,
                .default_domain_ops = &(const struct iommu_domain_ops) {
                        .attach_dev     = mtk_iommu_v1_attach_device,
-------                 .detach_dev     = mtk_iommu_v1_detach_device,
                        .map_pages      = mtk_iommu_v1_map,
                        .unmap_pages    = mtk_iommu_v1_unmap,
                        .iova_to_phys   = mtk_iommu_v1_iova_to_phys,
@@@@@@@@@ -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 +683,7 @@@@@@@@@ static int mtk_iommu_v1_probe(struct pl
                ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
                                             dev_name(&pdev->dev));
                if (ret)
 --- ---                return ret;
 +++ +++                goto out_clk_unprepare;
        
                ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
                if (ret)
@@@@@@@@@ -698,8 -698,6 -698,6 -698,6 -698,8 -698,6 -698,6 -698,6 +698,8 @@@@@@@@@ out_dev_unreg
                iommu_device_unregister(&data->iommu);
        out_sysfs_remove:
                iommu_device_sysfs_remove(&data->iommu);
 +++ +++out_clk_unprepare:
 +++ +++        clk_disable_unprepare(data->bclk);
                return ret;
        }
        
diff --combined drivers/of/address.c
index 67763e5b8c0efdec99981c09f5d79148adbbdc5a,c34ac33b7338a991051e75adec81770fe8a35292,c34ac33b7338a991051e75adec81770fe8a35292,c34ac33b7338a991051e75adec81770fe8a35292,c34ac33b7338a991051e75adec81770fe8a35292,c34ac33b7338a991051e75adec81770fe8a35292,c34ac33b7338a991051e75adec81770fe8a35292,14f137a21b0c165caaa2cf6366ff6b4282b65ee3..4c0b169ef9bfba69558d39db21fb46643b17c90b
@@@@@@@@@ -626,6 -626,6 -626,6 -626,6 -626,6 -626,6 -626,6 -626,47 +626,47 @@@@@@@@@ u64 of_translate_dma_address(struct dev
        }
        EXPORT_SYMBOL(of_translate_dma_address);
        
+++++++ /**
+++++++  * of_translate_dma_region - Translate device tree address and size tuple
+++++++  * @dev: device tree node for which to translate
+++++++  * @prop: pointer into array of cells
+++++++  * @start: return value for the start of the DMA range
+++++++  * @length: return value for the length of the DMA range
+++++++  *
+++++++  * Returns a pointer to the cell immediately following the translated DMA region.
+++++++  */
+++++++ const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *prop,
+++++++                                       phys_addr_t *start, size_t *length)
+++++++ {
+++++++         struct device_node *parent;
+++++++         u64 address, size;
+++++++         int na, ns;
+++++++ 
+++++++         parent = __of_get_dma_parent(dev);
+++++++         if (!parent)
+++++++                 return NULL;
+++++++ 
+++++++         na = of_bus_n_addr_cells(parent);
+++++++         ns = of_bus_n_size_cells(parent);
+++++++ 
+++++++         of_node_put(parent);
+++++++ 
+++++++         address = of_translate_dma_address(dev, prop);
+++++++         if (address == OF_BAD_ADDR)
+++++++                 return NULL;
+++++++ 
+++++++         size = of_read_number(prop + na, ns);
+++++++ 
+++++++         if (start)
+++++++                 *start = address;
+++++++ 
+++++++         if (length)
+++++++                 *length = size;
+++++++ 
+++++++         return prop + na + ns;
+++++++ }
+++++++ EXPORT_SYMBOL(of_translate_dma_region);
+++++++ 
        const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
                                       u64 *size, unsigned int *flags)
        {
@@@@@@@@@ -965,19 -965,8 -965,8 -965,8 -965,8 -965,8 -965,8 -1006,8 +1006,19 @@@@@@@@@ int of_dma_get_range(struct device_nod
                }
        
                of_dma_range_parser_init(&parser, node);
 -------        for_each_of_range(&parser, &range)
 +++++++        for_each_of_range(&parser, &range) {
 +++++++                if (range.cpu_addr == OF_BAD_ADDR) {
 +++++++                        pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
 +++++++                               range.bus_addr, node);
 +++++++                        continue;
 +++++++                }
                        num_ranges++;
 +++++++        }
 +++++++
 +++++++        if (!num_ranges) {
 +++++++                ret = -EINVAL;
 +++++++                goto out;
 +++++++        }
        
                r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
                if (!r) {
                }
        
                /*
 -------         * Record all info in the generic DMA ranges array for struct device.
 +++++++         * Record all info in the generic DMA ranges array for struct device,
 +++++++         * returning an error if we don't find any parsable ranges.
                 */
                *map = r;
                of_dma_range_parser_init(&parser, node);
                for_each_of_range(&parser, &range) {
                        pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
                                 range.bus_addr, range.cpu_addr, range.size);
 -------                if (range.cpu_addr == OF_BAD_ADDR) {
 -------                        pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
 -------                               range.bus_addr, node);
 +++++++                if (range.cpu_addr == OF_BAD_ADDR)
                                continue;
 -------                }
                        r->cpu_start = range.cpu_addr;
                        r->dma_start = range.bus_addr;
                        r->size = range.size;
index 2209372f236dbb84d1b77c335c3acbe3d9584e82,23c24fe98c00d4a07ef73826a02d81386cf2118d,23c24fe98c00d4a07ef73826a02d81386cf2118d,23c24fe98c00d4a07ef73826a02d81386cf2118d,23c24fe98c00d4a07ef73826a02d81386cf2118d,23c24fe98c00d4a07ef73826a02d81386cf2118d,23c24fe98c00d4a07ef73826a02d81386cf2118d,e14f86a8ef52586a30a4f3829289f42a06205bc4..5600eb67ca30105e8de6f0527a1b6080b5ecab38
@@@@@@@@@ -1480,7 -1480,7 -1480,7 -1480,7 -1480,7 -1480,7 -1480,7 -1480,8 +1480,8 @@@@@@@@@ static int vfio_iommu_map(struct vfio_i
        
                list_for_each_entry(d, &iommu->domain_list, next) {
                        ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
-------                                 npage << PAGE_SHIFT, prot | IOMMU_CACHE);
+++++++                                 npage << PAGE_SHIFT, prot | IOMMU_CACHE,
+++++++                                 GFP_KERNEL);
                        if (ret)
                                goto unwind;
        
@@@@@@@@@ -1777,8 -1777,8 -1777,8 -1777,8 -1777,8 -1777,8 -1777,8 -1778,8 +1778,8 @@@@@@@@@ static int vfio_iommu_replay(struct vfi
                                        size = npage << PAGE_SHIFT;
                                }
        
-------                         ret = iommu_map(domain->domain, iova, phys,
-------                                         size, dma->prot | IOMMU_CACHE);
+++++++                         ret = iommu_map(domain->domain, iova, phys, size,
+++++++                                         dma->prot | IOMMU_CACHE, GFP_KERNEL);
                                if (ret) {
                                        if (!dma->iommu_mapped) {
                                                vfio_unpin_pages_remote(dma, iova,
         * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
         * hugetlbfs is in use.
         */
 -------static void vfio_test_domain_fgsp(struct vfio_domain *domain)
 +++++++static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
        {
 -------        struct page *pages;
                int ret, order = get_order(PAGE_SIZE * 2);
 +++++++        struct vfio_iova *region;
 +++++++        struct page *pages;
 +++++++        dma_addr_t start;
        
                pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
                if (!pages)
                        return;
        
 -------        ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
 ------                         IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
       -                        IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
 -------        if (!ret) {
 -------                size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
 +++++++        list_for_each_entry(region, regions, list) {
 +++++++                start = ALIGN(region->start, PAGE_SIZE * 2);
 +++++++                if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
 +++++++                        continue;
        
 -------                if (unmapped == PAGE_SIZE)
 -------                        iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
 -------                else
 -------                        domain->fgsp = true;
 +++++++                ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
-                                       IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
++++++++                                IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
 +++++++                if (!ret) {
 +++++++                        size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
 +++++++
 +++++++                        if (unmapped == PAGE_SIZE)
 +++++++                                iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
 +++++++                        else
 +++++++                                domain->fgsp = true;
 +++++++                }
 +++++++                break;
                }
        
                __free_pages(pages, order);
@@@@@@@@@ -2335,7 -2326,7 -2326,7 -2326,7 -2326,7 -2326,7 -2326,7 -2327,7 +2336,7 @@@@@@@@@ static int vfio_iommu_type1_attach_grou
                        }
                }
        
 -------        vfio_test_domain_fgsp(domain);
 +++++++        vfio_test_domain_fgsp(domain, &iova_copy);
        
                /* replay mappings on new domains */
                ret = vfio_iommu_replay(iommu, domain);
This page took 0.313853 seconds and 4 git commands to generate.