]> Git Repo - linux.git/commitdiff
Merge branches 'arm/omap', 'arm/exynos', 'arm/smmu', 'arm/mediatek', 'arm/qcom',...
authorJoerg Roedel <[email protected]>
Wed, 11 Sep 2019 10:39:19 +0000 (12:39 +0200)
committerJoerg Roedel <[email protected]>
Wed, 11 Sep 2019 10:39:19 +0000 (12:39 +0200)
12 files changed:
1  2  3  4  5  6  7  8  9  10 
Documentation/admin-guide/kernel-parameters.txt
MAINTAINERS
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/omap-iommu.c
drivers/iommu/qcom_iommu.c
include/linux/intel-iommu.h
kernel/dma/direct.c

index 4c1971960afa30484cfe0d2533e9cf28d003662c,7ccd158b3894e7841f16c11597ddbcadf1f7c34d,47d981a86e2f8a5c2115d1834eda3ab7af5ccaae,47d981a86e2f8a5c2115d1834eda3ab7af5ccaae,4c1971960afa30484cfe0d2533e9cf28d003662c,4c1971960afa30484cfe0d2533e9cf28d003662c,4c1971960afa30484cfe0d2533e9cf28d003662c,7ccd158b3894e7841f16c11597ddbcadf1f7c34d,aaca730800979e6fcea0bb2fe5555c46af13c79a,0c59398519be29b8e2d8d054df39e3e27d737b28..d31ffa110461156a8abdf2703514e57e3f42beb9
                                Note that using this option lowers the security
                                provided by tboot because it makes the system
                                vulnerable to DMA attacks.
++++++++ +              nobounce [Default off]
++++++++ +                      Disable bounce buffer for unstrusted devices such as
++++++++ +                      the Thunderbolt devices. This will treat the untrusted
++++++++ +                      devices as the trusted ones, hence might expose security
++++++++ +                      risks of DMA attacks.
          
                intel_idle.max_cstate=  [KNL,HW,ACPI,X86]
                                0       disables intel_idle and fall back on acpi_idle.
                                  synchronously.
          
                iommu.passthrough=
---------                       [ARM64] Configure DMA to bypass the IOMMU by default.
+++++++++                       [ARM64, X86] Configure DMA to bypass the IOMMU by default.
                                Format: { "0" | "1" }
                                0 - Use IOMMU translation for DMA.
                                1 - Bypass the IOMMU for DMA.
                                        expose users to several CPU vulnerabilities.
                                        Equivalent to: nopti [X86,PPC]
                                                       kpti=0 [ARM64]
 -     - -                                             nospectre_v1 [PPC]
 +     + +                                             nospectre_v1 [X86,PPC]
                                                       nobp=0 [S390]
                                                       nospectre_v2 [X86,PPC,S390,ARM64]
                                                       spectre_v2_user=off [X86]
                                nosmt=force: Force disable SMT, cannot be undone
                                             via the sysfs control file.
          
 -     - -      nospectre_v1    [PPC] Disable mitigations for Spectre Variant 1 (bounds
 -     - -                      check bypass). With this option data leaks are possible
 -     - -                      in the system.
 +     + +      nospectre_v1    [X86,PPC] Disable mitigations for Spectre Variant 1
 +     + +                      (bounds check bypass). With this option data leaks are
 +     + +                      possible in the system.
          
                nospectre_v2    [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
                                the Spectre variant 2 (indirect branch prediction)
                                Run specified binary instead of /init from the ramdisk,
                                used for early userspace startup. See initrd.
          
 +++   +++      rdrand=         [X86]
 +++   +++                      force - Override the decision by the kernel to hide the
 +++   +++                              advertisement of RDRAND support (this affects
 +++   +++                              certain AMD processors because of buggy BIOS
 +++   +++                              support, specifically around the suspend/resume
 +++   +++                              path).
 +++   +++
                rdt=            [HW,X86,RDT]
                                Turn on/off individual RDT features. List is:
                                cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --combined MAINTAINERS
index e7a47b5210fdd0b38e684e6f5a672153ea05f150,a2c343ee3b2ca13e4cbe7257a9e4a58d69a11285,e81e60bd7c2646892e7eb90c14bcab6c5286ac85,cf04f72ca79faf30cabd7d8004e6765cceca71a7,74e9d9c2013ec3904dff4c8954305386fe129186,9cbcf167bdd08f94bfeb80aa7424c5893224555c,e7a47b5210fdd0b38e684e6f5a672153ea05f150,a2c343ee3b2ca13e4cbe7257a9e4a58d69a11285,43604d6ab96c2b6c9f989eea100d249bad2f8881,a2c343ee3b2ca13e4cbe7257a9e4a58d69a11285..ae8536cba798823cba82340531756c7aa982405f
@@@@@@@@@@@ -183,7 -183,7 -183,7 -183,7 -183,7 -183,7 -183,7 -183,7 -183,7 -183,7 +183,7 @@@@@@@@@@@ M: Realtek linux nic maintainers <nic_s
          M:    Heiner Kallweit <[email protected]>
          L:    [email protected]
          S:    Maintained
 ---   ---F:    drivers/net/ethernet/realtek/r8169.c
 +++   +++F:    drivers/net/ethernet/realtek/r8169*
          
          8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
          M:    Greg Kroah-Hartman <[email protected]>
@@@@@@@@@@@ -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 -683,7 +683,7 @@@@@@@@@@@ S: Maintaine
          F:    drivers/crypto/sunxi-ss/
          
          ALLWINNER VPU DRIVER
 ----- ---M:    Maxime Ripard <m[email protected]>
 +++++ +++M:    Maxime Ripard <m[email protected]>
          M:    Paul Kocialkowski <[email protected]>
          L:    [email protected]
          S:    Maintained
@@@@@@@@@@@ -1350,8 -1350,8 -1350,8 -1350,7 -1350,7 -1350,8 -1350,8 -1350,8 -1350,8 -1350,8 +1350,7 @@@@@@@@@@@ M:      Will Deacon <[email protected]
          R:    Robin Murphy <[email protected]>
          L:    [email protected] (moderated for non-subscribers)
          S:    Maintained
---  -----F:    drivers/iommu/arm-smmu.c
---  -----F:    drivers/iommu/arm-smmu-v3.c
+++  +++++F:    drivers/iommu/arm-smmu*
          F:    drivers/iommu/io-pgtable-arm.c
          F:    drivers/iommu/io-pgtable-arm-v7s.c
          
@@@@@@@@@@@ -1408,7 -1408,7 -1408,7 -1407,7 -1407,7 -1408,7 -1408,7 -1408,7 -1408,7 -1408,7 +1407,7 @@@@@@@@@@@ S:      Maintaine
          F:    drivers/clk/sunxi/
          
          ARM/Allwinner sunXi SoC support
 ----- ---M:    Maxime Ripard <m[email protected]>
 +++++ +++M:    Maxime Ripard <m[email protected]>
          M:    Chen-Yu Tsai <[email protected]>
          L:    [email protected] (moderated for non-subscribers)
          S:    Maintained
@@@@@@@@@@@ -3577,7 -3577,7 -3577,7 -3576,7 -3576,7 -3577,7 -3577,7 -3577,7 -3577,7 -3577,7 +3576,7 @@@@@@@@@@@ F:      Documentation/filesystems/caching/ca
          F:    fs/cachefiles/
          
          CADENCE MIPI-CSI2 BRIDGES
 ----- ---M:    Maxime Ripard <m[email protected]>
 +++++ +++M:    Maxime Ripard <m[email protected]>
          L:    [email protected]
          S:    Maintained
          F:    Documentation/devicetree/bindings/media/cdns,*.txt
@@@@@@@@@@@ -5295,7 -5295,7 -5295,7 -5294,7 -5294,7 -5295,7 -5295,7 -5295,7 -5295,7 -5295,7 +5294,7 @@@@@@@@@@@ F:      include/linux/vga
          
          DRM DRIVERS AND MISC GPU PATCHES
          M:    Maarten Lankhorst <[email protected]>
 ----- ---M:    Maxime Ripard <m[email protected]>
 +++++ +++M:    Maxime Ripard <m[email protected]>
          M:    Sean Paul <[email protected]>
          W:    https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
          S:    Maintained
@@@@@@@@@@@ -5308,7 -5308,7 -5308,7 -5307,7 -5307,7 -5308,7 -5308,7 -5308,7 -5308,7 -5308,7 +5307,7 @@@@@@@@@@@ F:      include/uapi/drm/drm
          F:    include/linux/vga*
          
          DRM DRIVERS FOR ALLWINNER A10
 ----- ---M:    Maxime Ripard  <[email protected]>
 +++++ +++M:    Maxime Ripard <[email protected]>
          L:    [email protected]
          S:    Supported
          F:    drivers/gpu/drm/sun4i/
@@@@@@@@@@@ -6065,7 -6065,7 -6065,7 -6064,7 -6064,7 -6065,7 -6065,7 -6065,7 -6065,7 -6065,7 +6064,7 @@@@@@@@@@@ M:      Florian Fainelli <[email protected]
          M:    Heiner Kallweit <[email protected]>
          L:    [email protected]
          S:    Maintained
 ---   ---F:    Documentation/ABI/testing/sysfs-bus-mdio
 +++   +++F:    Documentation/ABI/testing/sysfs-class-net-phydev
          F:    Documentation/devicetree/bindings/net/ethernet-phy.yaml
          F:    Documentation/devicetree/bindings/net/mdio*
          F:    Documentation/networking/phy.rst
@@@@@@@@@@@ -6344,7 -6344,7 -6344,7 -6343,7 -6343,7 -6344,7 -6344,7 -6344,7 -6344,7 -6344,7 +6343,7 @@@@@@@@@@@ FPGA MANAGER FRAMEWOR
          M:    Moritz Fischer <[email protected]>
          L:    [email protected]
          S:    Maintained
 -     - -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
 +     + +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git
          Q:    http://patchwork.kernel.org/project/linux-fpga/list/
          F:    Documentation/fpga/
          F:    Documentation/driver-api/fpga/
@@@@@@@@@@@ -6377,7 -6377,7 -6377,7 -6376,7 -6376,7 -6377,7 -6377,7 -6377,7 -6377,7 -6377,7 +6376,7 @@@@@@@@@@@ FRAMEBUFFER LAYE
          M:    Bartlomiej Zolnierkiewicz <[email protected]>
          L:    [email protected]
          L:    [email protected]
 -     - -T:    git git://github.com/bzolnier/linux.git
 +     + +T:    git git://anongit.freedesktop.org/drm/drm-misc
          Q:    http://patchwork.kernel.org/project/linux-fbdev/list/
          S:    Maintained
          F:    Documentation/fb/
@@@@@@@@@@@ -6441,14 -6441,6 -6441,6 -6440,14 -6440,14 -6441,14 -6441,14 -6441,6 -6441,14 -6441,6 +6440,14 @@@@@@@@@@@ S:       Maintaine
          F:    drivers/perf/fsl_imx8_ddr_perf.c
          F:    Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
          
 ++    + +FREESCALE IMX I2C DRIVER
 ++    + +M:    Oleksij Rempel <[email protected]>
 ++    + +R:    Pengutronix Kernel Team <[email protected]>
 ++    + +L:    [email protected]
 ++    + +S:    Maintained
 ++    + +F:    drivers/i2c/busses/i2c-imx.c
 ++    + +F:    Documentation/devicetree/bindings/i2c/i2c-imx.txt
 ++    + +
          FREESCALE IMX LPI2C DRIVER
          M:    Dong Aisheng <[email protected]>
          L:    [email protected]
@@@@@@@@@@@ -6835,6 -6827,13 -6827,6 -6834,6 -6834,6 -6835,6 -6835,6 -6827,13 -6835,6 -6827,13 +6834,6 @@@@@@@@@@@ F:   Documentation/filesystems/gfs2*.tx
          F:    fs/gfs2/
          F:    include/uapi/linux/gfs2_ondisk.h
          
 -     - -GIGASET ISDN DRIVERS
 -     - -M:    Paul Bolle <[email protected]>
 -     - -L:    [email protected]
 -     - -W:    http://gigaset307x.sourceforge.net/
 -     - -S:    Odd Fixes
 -     - -F:    drivers/staging/isdn/gigaset/
 -     - -
          GNSS SUBSYSTEM
          M:    Johan Hovold <[email protected]>
          T:    git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
@@@@@@@@@@@ -7460,7 -7459,7 -7452,7 -7459,7 -7459,7 -7460,7 -7460,7 -7459,7 -7460,7 -7459,7 +7459,7 @@@@@@@@@@@ F:      drivers/net/hyperv
          F:    drivers/scsi/storvsc_drv.c
          F:    drivers/uio/uio_hv_generic.c
          F:    drivers/video/fbdev/hyperv_fb.c
 --    - -F:    drivers/iommu/hyperv_iommu.c
 ++    + +F:    drivers/iommu/hyperv-iommu.c
          F:    net/vmw_vsock/hyperv_transport.c
          F:    include/clocksource/hyperv_timer.h
          F:    include/linux/hyperv.h
@@@@@@@@@@@ -7513,7 -7512,7 -7505,7 -7512,7 -7512,7 -7513,7 -7513,7 -7512,7 -7513,7 -7512,7 +7512,7 @@@@@@@@@@@ I2C MV64XXX MARVELL AND ALLWINNER DRIVE
          M:    Gregory CLEMENT <[email protected]>
          L:    [email protected]
          S:    Maintained
 ----- ---F:    Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
 +++++ +++F:    Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml
          F:    drivers/i2c/busses/i2c-mv64xxx.c
          
          I2C OVER PARALLEL PORT
@@@@@@@@@@@ -8050,7 -8049,6 -8042,7 -8049,7 -8049,7 -8050,7 -8050,7 -8049,6 -8050,7 -8049,6 +8049,7 @@@@@@@@@@@ S:      Maintaine
          F:    drivers/video/fbdev/i810/
          
          INTEL ASoC DRIVERS
 +     + +M:    Cezary Rojewski <[email protected]>
          M:    Pierre-Louis Bossart <[email protected]>
          M:    Liam Girdwood <[email protected]>
          M:    Jie Yang <[email protected]>
@@@@@@@@@@@ -8072,13 -8070,6 -8064,6 -8071,13 -8071,13 -8072,13 -8072,13 -8070,6 -8072,13 -8070,6 +8071,13 @@@@@@@@@@@ T:       git git://git.code.sf.net/p/intel-sa
          S:    Supported
          F:    drivers/scsi/isci/
          
 ++    + +INTEL CPU family model numbers
 ++    + +M:    Tony Luck <[email protected]>
 ++    + +M:    [email protected]
 ++    + +L:    [email protected]
 ++    + +S:    Supported
 ++    + +F:    arch/x86/include/asm/intel-family.h
 ++    + +
          INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
          M:    Jani Nikula <[email protected]>
          M:    Joonas Lahtinen <[email protected]>
          L:    [email protected]
          T:    git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
          S:    Supported
 --    - -F:    fs/iomap.c
          F:    fs/iomap/
          F:    include/linux/iomap.h
          
@@@@@@@@@@@ -8454,6 -8446,11 -8440,11 -8453,11 -8453,11 -8454,11 -8454,6 -8446,11 -8454,11 -8446,11 +8453,6 @@@@@@@@@@@ S:      Maintaine
          F:    fs/io_uring.c
          F:    include/uapi/linux/io_uring.h
          
 ----- ---IP MASQUERADING
 ----- ---M:    Juanjo Ciarlante <[email protected]>
 ----- ---S:    Maintained
 ----- ---F:    net/ipv4/netfilter/ipt_MASQUERADE.c
 ----- ---
          IPMI SUBSYSTEM
          M:    Corey Minyard <[email protected]>
          L:    [email protected] (moderated for non-subscribers)
@@@@@@@@@@@ -8827,6 -8824,14 -8818,14 -8831,14 -8831,6 -8832,6 -8827,6 -8824,14 -8832,14 -8824,14 +8826,6 @@@@@@@@@@@ F:        virt/kvm/
          F:    tools/kvm/
          F:    tools/testing/selftests/kvm/
          
 ---   ---KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
 ---   ---M:    Joerg Roedel <[email protected]>
 ---   ---L:    [email protected]
 ---   ---W:    http://www.linux-kvm.org/
 ---   ---S:    Maintained
 ---   ---F:    arch/x86/include/asm/svm.h
 ---   ---F:    arch/x86/kvm/svm.c
 ---   ---
          KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
          M:    Marc Zyngier <[email protected]>
          R:    James Morse <[email protected]>
@@@@@@@@@@@ -8869,7 -8874,7 -8868,7 -8881,7 -8873,7 -8874,7 -8869,7 -8874,7 -8882,7 -8874,7 +8868,7 @@@@@@@@@@@ M:      Christian Borntraeger <borntraeger@d
          M:    Janosch Frank <[email protected]>
          R:    David Hildenbrand <[email protected]>
          R:    Cornelia Huck <[email protected]>
 ---   ---L:    linux-s390@vger.kernel.org
 +++   +++L:    kvm@vger.kernel.org
          W:    http://www.ibm.com/developerworks/linux/linux390/
          T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
          S:    Supported
@@@@@@@@@@@ -8884,11 -8889,6 -8883,6 -8896,6 -8888,11 -8889,11 -8884,11 -8889,6 -8897,6 -8889,6 +8883,11 @@@@@@@@@@@ F: tools/testing/selftests/kvm/*/s390x
          KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
          M:    Paolo Bonzini <[email protected]>
          M:    Radim Krčmář <[email protected]>
 +++   +++R:    Sean Christopherson <[email protected]>
 +++   +++R:    Vitaly Kuznetsov <[email protected]>
 +++   +++R:    Wanpeng Li <[email protected]>
 +++   +++R:    Jim Mattson <[email protected]>
 +++   +++R:    Joerg Roedel <[email protected]>
          L:    [email protected]
          W:    http://www.linux-kvm.org
          T:    git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
          F:    arch/x86/kvm/
          F:    arch/x86/kvm/*/
          F:    arch/x86/include/uapi/asm/kvm*
 +++   +++F:    arch/x86/include/uapi/asm/vmx.h
 +++   +++F:    arch/x86/include/uapi/asm/svm.h
          F:    arch/x86/include/asm/kvm*
          F:    arch/x86/include/asm/pvclock-abi.h
 +++   +++F:    arch/x86/include/asm/svm.h
 +++   +++F:    arch/x86/include/asm/vmx.h
          F:    arch/x86/kernel/kvm.c
          F:    arch/x86/kernel/kvmclock.c
          
@@@@@@@@@@@ -9229,18 -9225,6 -9219,6 -9232,6 -9233,6 -9234,6 -9229,18 -9225,6 -9233,6 -9225,6 +9228,18 @@@@@@@@@@@ F:   include/linux/nd.
          F:    include/linux/libnvdimm.h
          F:    include/uapi/linux/ndctl.h
          
 +++++ +++LICENSES and SPDX stuff
 +++++ +++M:    Thomas Gleixner <[email protected]>
 +++++ +++M:    Greg Kroah-Hartman <[email protected]>
 +++++ +++L:    [email protected]
 +++++ +++S:    Maintained
 +++++ +++T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/spdx.git
 +++++ +++F:    COPYING
 +++++ +++F:    Documentation/process/license-rules.rst
 +++++ +++F:    LICENSES/
 +++++ +++F:    scripts/spdxcheck-test.sh
 +++++ +++F:    scripts/spdxcheck.py
 +++++ +++
          LIGHTNVM PLATFORM SUPPORT
          M:    Matias Bjorling <[email protected]>
          W:    http://github/OpenChannelSSD
          M:    Sridhar Samudrala <[email protected]>
          L:    [email protected]
          S:    Supported
 ----- ---F:    driver/net/net_failover.c
 +++++ +++F:    drivers/net/net_failover.c
          F:    include/net/net_failover.h
          F:    Documentation/networking/net_failover.rst
          
          S:    Maintained
          W:    https://fedorahosted.org/dropwatch/
          F:    net/core/drop_monitor.c
 +     + +F:    include/uapi/linux/net_dropmon.h
          
          NETWORKING DRIVERS
          M:    "David S. Miller" <[email protected]>
          M:    Dave Watson <[email protected]>
          M:    John Fastabend <[email protected]>
          M:    Daniel Borkmann <[email protected]>
 +     + +M:    Jakub Kicinski <[email protected]>
          L:    [email protected]
          S:    Maintained
          F:    net/tls/*
@@@@@@@@@@@ -14485,7 -14467,6 -14463,6 -14476,6 -14477,6 -14478,6 -14485,7 -14467,6 -14477,6 -14467,6 +14484,7 @@@@@@@@@@@ F:   drivers/net/phy/phylink.
          F:    drivers/net/phy/sfp*
          F:    include/linux/phylink.h
          F:    include/linux/sfp.h
 +++++ +++K:    phylink
          
          SGI GRU DRIVER
          M:    Dimitri Sivanich <[email protected]>
@@@@@@@@@@@ -14891,9 -14872,9 -14868,9 -14881,9 -14882,9 -14883,9 -14891,9 -14872,9 -14882,9 -14872,9 +14890,9 @@@@@@@@@@@ F:   include/linux/arm_sdei.
          F:    include/uapi/linux/arm_sdei.h
          
          SOFTWARE RAID (Multiple Disks) SUPPORT
 ---   ---M:    Shaohua Li <shli@kernel.org>
 +++   +++M:    Song Liu <song@kernel.org>
          L:    [email protected]
 ---   ---T:    git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
 +++   +++T:    git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
          S:    Supported
          F:    drivers/md/Makefile
          F:    drivers/md/Kconfig
          F:    drivers/net/ethernet/ti/netcp*
          
          TI PCM3060 ASoC CODEC DRIVER
 -     - -M:    Kirill Marinushkin <kmarinushkin@birdec.tech>
 +     + +M:    Kirill Marinushkin <kmarinushkin@birdec.com>
          L:    [email protected] (moderated for non-subscribers)
          S:    Maintained
          F:    Documentation/devicetree/bindings/sound/pcm3060.txt
@@@@@@@@@@@ -17584,6 -17565,7 -17561,6 -17574,6 -17575,6 -17576,6 -17584,6 -17565,7 -17575,6 -17565,7 +17583,6 @@@@@@@@@@@ M:   Jakub Kicinski <jakub.kicinski@netro
          M:    Jesper Dangaard Brouer <[email protected]>
          M:    John Fastabend <[email protected]>
          L:    [email protected]
 -     - -L:    [email protected]
          L:    [email protected]
          S:    Supported
          F:    net/core/xdp.c
diff --combined drivers/iommu/Makefile
index f13f36ae1af652836254ba07315f99303fba6f88,f13f36ae1af652836254ba07315f99303fba6f88,f13f36ae1af652836254ba07315f99303fba6f88,a2729aadd300fa7b9f20d40edaa46d61f1a3b0c7,a2729aadd300fa7b9f20d40edaa46d61f1a3b0c7,f13f36ae1af652836254ba07315f99303fba6f88,f13f36ae1af652836254ba07315f99303fba6f88,c6a277e69848433abf2068ae036d29b84b95371f,bfe27b2755bda6beea642be89eb99b6d301c49b5,f13f36ae1af652836254ba07315f99303fba6f88..4f405f926e739cdd4d00937f9b57b4d42ee06b30
@@@@@@@@@@@ -10,13 -10,13 -10,13 -10,13 -10,13 -10,13 -10,13 -10,13 -10,14 -10,13 +10,14 @@@@@@@@@@@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += 
          obj-$(CONFIG_IOMMU_IOVA) += iova.o
          obj-$(CONFIG_OF_IOMMU)        += of_iommu.o
          obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
------- --obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+++++++ ++obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
          obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
          obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
---  -----obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
+++  +++++obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o
          obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
          obj-$(CONFIG_DMAR_TABLE) += dmar.o
          obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
++++++++ +obj-$(CONFIG_INTEL_IOMMU) += intel-trace.o
          obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
          obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
          obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
index 61de81965c44ed95b52ef857b4f31a4cb8519a28,b607a92791d3f8b48a9c1caeaea48986753ead64,b607a92791d3f8b48a9c1caeaea48986753ead64,29eeea91466080a1a328accbd435dd673ab0866a,29eeea91466080a1a328accbd435dd673ab0866a,b607a92791d3f8b48a9c1caeaea48986753ead64,b607a92791d3f8b48a9c1caeaea48986753ead64,d365ee449a2cb88681c0af1d865b2d3c1781fe08,b607a92791d3f8b48a9c1caeaea48986753ead64,07512d08dd00e264684dc7189bd08c284b453bca..1ed3b98324bac09ce8399b2ee0ad445e08802d20
@@@@@@@@@@@ -436,7 -436,7 -436,7 -436,7 -436,7 -436,7 -436,7 -436,7 -436,7 -436,7 +436,7 @@@@@@@@@@@ static int iommu_init_device(struct dev
                 * invalid address), we ignore the capability for the device so
                 * it'll be forced to go into translation mode.
                 */
---------       if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+++++++++       if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
                    dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
                        struct amd_iommu *iommu;
          
@@@@@@@@@@@ -1143,17 -1143,6 -1143,6 -1143,6 -1143,6 -1143,6 -1143,6 -1143,6 -1143,6 -1143,6 +1143,17 @@@@@@@@@@@ static void amd_iommu_flush_tlb_all(str
                iommu_completion_wait(iommu);
          }
          
 +++++++++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
 +++++++++{
 +++++++++      struct iommu_cmd cmd;
 +++++++++
 +++++++++      build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
 +++++++++                            dom_id, 1);
 +++++++++      iommu_queue_command(iommu, &cmd);
 +++++++++
 +++++++++      iommu_completion_wait(iommu);
 +++++++++}
 +++++++++
          static void amd_iommu_flush_all(struct amd_iommu *iommu)
          {
                struct iommu_cmd cmd;
@@@@@@@@@@@ -1435,21 -1424,18 -1424,18 -1424,18 -1424,18 -1424,18 -1424,18 -1424,18 -1424,18 -1424,18 +1435,21 @@@@@@@@@@@ static void free_pagetable(struct prote
           * another level increases the size of the address space by 9 bits to a size up
           * to 64 bits.
           */
 ---------static bool increase_address_space(struct protection_domain *domain,
 +++++++++static void increase_address_space(struct protection_domain *domain,
                                           gfp_t gfp)
          {
 +++++++++      unsigned long flags;
                u64 *pte;
          
 ---------      if (domain->mode == PAGE_MODE_6_LEVEL)
 +++++++++      spin_lock_irqsave(&domain->lock, flags);
 +++++++++
 +++++++++      if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                        /* address space already 64 bit large */
 ---------              return false;
 +++++++++              goto out;
          
                pte = (void *)get_zeroed_page(gfp);
                if (!pte)
 ---------              return false;
 +++++++++              goto out;
          
                *pte             = PM_LEVEL_PDE(domain->mode,
                                                iommu_virt_to_phys(domain->pt_root));
                domain->mode    += 1;
                domain->updated  = true;
          
 ---------      return true;
 +++++++++out:
 +++++++++      spin_unlock_irqrestore(&domain->lock, flags);
 +++++++++
 +++++++++      return;
          }
          
          static u64 *alloc_pte(struct protection_domain *domain,
@@@@@@@@@@@ -1890,7 -1873,6 -1873,6 -1873,6 -1873,6 -1873,6 -1873,6 -1873,6 -1873,6 -1873,6 +1890,7 @@@@@@@@@@@ static void set_dte_entry(u16 devid, st
          {
                u64 pte_root = 0;
                u64 flags = 0;
 +++++++++      u32 old_domid;
          
                if (domain->mode != PAGE_MODE_NONE)
                        pte_root = iommu_virt_to_phys(domain->pt_root);
                flags &= ~DEV_DOMID_MASK;
                flags |= domain->id;
          
 +++++++++      old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
                amd_iommu_dev_table[devid].data[1]  = flags;
                amd_iommu_dev_table[devid].data[0]  = pte_root;
 +++++++++
 +++++++++      /*
 +++++++++       * A kdump kernel might be replacing a domain ID that was copied from
 +++++++++       * the previous kernel--if so, it needs to flush the translation cache
 +++++++++       * entries for the old domain ID that is being overwritten
 +++++++++       */
 +++++++++      if (old_domid) {
 +++++++++              struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
 +++++++++
 +++++++++              amd_iommu_flush_tlb_domid(iommu, old_domid);
 +++++++++      }
          }
          
          static void clear_dte_entry(u16 devid)
@@@@@@@@@@@ -2256,7 -2226,7 -2226,7 -2226,7 -2226,7 -2226,7 -2226,7 -2226,7 -2226,7 -2226,7 +2256,7 @@@@@@@@@@@ static int amd_iommu_add_device(struct 
          
                BUG_ON(!dev_data);
          
---------       if (iommu_pass_through || dev_data->iommu_v2)
+++++++++       if (dev_data->iommu_v2)
                        iommu_request_dm_for_dev(dev);
          
                /* Domains are initialized for this device - have a look what we ended up with */
@@@@@@@@@@@ -2577,7 -2547,7 -2547,7 -2547,7 -2547,7 -2547,7 -2547,7 -2547,9 -2547,7 -2547,7 +2577,9 @@@@@@@@@@@ static int map_sg(struct device *dev, s
          
                                bus_addr  = address + s->dma_address + (j << PAGE_SHIFT);
                                phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
------- --                      ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
+++++++ ++                      ret = iommu_map_page(domain, bus_addr, phys_addr,
+++++++ ++                                           PAGE_SIZE, prot,
+++++++ ++                                           GFP_ATOMIC | __GFP_NOWARN);
                                if (ret)
                                        goto out_unmap;
          
@@@@@@@@@@@ -2835,7 -2805,7 -2805,7 -2805,7 -2805,7 -2805,7 -2805,7 -2807,7 -2805,7 -2805,7 +2837,7 @@@@@@@@@@@ int __init amd_iommu_init_api(void
          
          int __init amd_iommu_init_dma_ops(void)
          {
---------       swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
+++++++++       swiotlb        = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
                iommu_detected = 1;
          
                if (amd_iommu_unmap_flush)
@@@@@@@@@@@ -3085,7 -3055,7 -3055,7 -3055,8 -3055,8 -3055,7 -3055,7 -3057,7 -3055,7 -3055,8 +3087,8 @@@@@@@@@@@ static int amd_iommu_map(struct iommu_d
          }
          
          static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
---  ----                          size_t page_size)
+++  ++++                             size_t page_size,
+++  ++++                             struct iommu_iotlb_gather *gather)
          {
                struct protection_domain *domain = to_pdomain(dom);
                size_t unmap_size;
@@@@@@@@@@@ -3226,9 -3196,9 -3196,9 -3197,10 -3197,10 -3196,9 -3196,9 -3198,9 -3196,9 -3197,10 +3229,10 @@@@@@@@@@@ static void amd_iommu_flush_iotlb_all(s
                domain_flush_complete(dom);
          }
          
---  ---- static void amd_iommu_iotlb_range_add(struct iommu_domain *domain,
---  ----                                     unsigned long iova, size_t size)
+++  ++++ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+++  ++++                                struct iommu_iotlb_gather *gather)
          {
+++  ++++       amd_iommu_flush_iotlb_all(domain);
          }
          
          const struct iommu_ops amd_iommu_ops = {
                .is_attach_deferred = amd_iommu_is_attach_deferred,
                .pgsize_bitmap  = AMD_IOMMU_PGSIZES,
                .flush_iotlb_all = amd_iommu_flush_iotlb_all,
---  ----       .iotlb_range_add = amd_iommu_iotlb_range_add,
---  ----       .iotlb_sync = amd_iommu_flush_iotlb_all,
+++  ++++       .iotlb_sync = amd_iommu_iotlb_sync,
          };
          
          /*****************************************************************************
@@@@@@@@@@@ -4343,13 -4313,13 -4313,13 -4314,13 -4314,13 -4313,13 -4313,13 -4315,62 -4313,13 -4314,13 +4346,62 @@@@@@@@@@@ static const struct irq_domain_ops amd_
                .deactivate = irq_remapping_deactivate,
          };
          
+++++++ ++int amd_iommu_activate_guest_mode(void *data)
+++++++ ++{
+++++++ ++      struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+++++++ ++      struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+++++++ ++
+++++++ ++      if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+++++++ ++          !entry || entry->lo.fields_vapic.guest_mode)
+++++++ ++              return 0;
+++++++ ++
+++++++ ++      entry->lo.val = 0;
+++++++ ++      entry->hi.val = 0;
+++++++ ++
+++++++ ++      entry->lo.fields_vapic.guest_mode  = 1;
+++++++ ++      entry->lo.fields_vapic.ga_log_intr = 1;
+++++++ ++      entry->hi.fields.ga_root_ptr       = ir_data->ga_root_ptr;
+++++++ ++      entry->hi.fields.vector            = ir_data->ga_vector;
+++++++ ++      entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
+++++++ ++
+++++++ ++      return modify_irte_ga(ir_data->irq_2_irte.devid,
+++++++ ++                            ir_data->irq_2_irte.index, entry, NULL);
+++++++ ++}
+++++++ ++EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
+++++++ ++
+++++++ ++int amd_iommu_deactivate_guest_mode(void *data)
+++++++ ++{
+++++++ ++      struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+++++++ ++      struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+++++++ ++      struct irq_cfg *cfg = ir_data->cfg;
+++++++ ++
+++++++ ++      if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
+++++++ ++          !entry || !entry->lo.fields_vapic.guest_mode)
+++++++ ++              return 0;
+++++++ ++
+++++++ ++      entry->lo.val = 0;
+++++++ ++      entry->hi.val = 0;
+++++++ ++
+++++++ ++      entry->lo.fields_remap.dm          = apic->irq_dest_mode;
+++++++ ++      entry->lo.fields_remap.int_type    = apic->irq_delivery_mode;
+++++++ ++      entry->hi.fields.vector            = cfg->vector;
+++++++ ++      entry->lo.fields_remap.destination =
+++++++ ++                              APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
+++++++ ++      entry->hi.fields.destination =
+++++++ ++                              APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
+++++++ ++
+++++++ ++      return modify_irte_ga(ir_data->irq_2_irte.devid,
+++++++ ++                            ir_data->irq_2_irte.index, entry, NULL);
+++++++ ++}
+++++++ ++EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
+++++++ ++
          static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
          {
+++++++ ++      int ret;
                struct amd_iommu *iommu;
                struct amd_iommu_pi_data *pi_data = vcpu_info;
                struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
                struct amd_ir_data *ir_data = data->chip_data;
------- --      struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
                struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
                struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
          
                if (!dev_data || !dev_data->use_vapic)
                        return 0;
          
+++++++ ++      ir_data->cfg = irqd_cfg(data);
                pi_data->ir_data = ir_data;
          
                /* Note:
          
                pi_data->prev_ga_tag = ir_data->cached_ga_tag;
                if (pi_data->is_guest_mode) {
------- --              /* Setting */
------- --              irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
------- --              irte->hi.fields.vector = vcpu_pi_info->vector;
------- --              irte->lo.fields_vapic.ga_log_intr = 1;
------- --              irte->lo.fields_vapic.guest_mode = 1;
------- --              irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
------- --
------- --              ir_data->cached_ga_tag = pi_data->ga_tag;
+++++++ ++              ir_data->ga_root_ptr = (pi_data->base >> 12);
+++++++ ++              ir_data->ga_vector = vcpu_pi_info->vector;
+++++++ ++              ir_data->ga_tag = pi_data->ga_tag;
+++++++ ++              ret = amd_iommu_activate_guest_mode(ir_data);
+++++++ ++              if (!ret)
+++++++ ++                      ir_data->cached_ga_tag = pi_data->ga_tag;
                } else {
------- --              /* Un-Setting */
------- --              struct irq_cfg *cfg = irqd_cfg(data);
------- --
------- --              irte->hi.val = 0;
------- --              irte->lo.val = 0;
------- --              irte->hi.fields.vector = cfg->vector;
------- --              irte->lo.fields_remap.guest_mode = 0;
------- --              irte->lo.fields_remap.destination =
------- --                              APICID_TO_IRTE_DEST_LO(cfg->dest_apicid);
------- --              irte->hi.fields.destination =
------- --                              APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
------- --              irte->lo.fields_remap.int_type = apic->irq_delivery_mode;
------- --              irte->lo.fields_remap.dm = apic->irq_dest_mode;
+++++++ ++              ret = amd_iommu_deactivate_guest_mode(ir_data);
          
                        /*
                         * This communicates the ga_tag back to the caller
                         * so that it can do all the necessary clean up.
                         */
------- --              ir_data->cached_ga_tag = 0;
+++++++ ++              if (!ret)
+++++++ ++                      ir_data->cached_ga_tag = 0;
                }
          
------- --      return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
+++++++ ++      return ret;
          }
          
          
index f68a62c3c32b55e1414ee5eff0cc34a452f38489,a7f9c3edbcb299f83f8d4c6093c274a0bc5b3387,a7f9c3edbcb299f83f8d4c6093c274a0bc5b3387,315e0087c19f2c38a1979a2ea1c31a337b56d700,de68b4a02aea79238309a836f01e82f3f9488e07,f68a62c3c32b55e1414ee5eff0cc34a452f38489,f68a62c3c32b55e1414ee5eff0cc34a452f38489,a7f9c3edbcb299f83f8d4c6093c274a0bc5b3387,d991d40f797fb62e9f6659060c1e92d4abb3bddc,8ce9db9c2cf6ee92eaf369e8eab1179aa174efb4..8f412af842471aef7a56603156123a2d70fd8142
@@@@@@@@@@@ -303,13 -303,13 -303,13 -303,13 -303,13 -303,13 -303,13 -303,13 -303,13 -303,15 +303,15 @@@@@@@@@@@ static int iommu_dma_init_domain(struc
                        u64 size, struct device *dev)
          {
                struct iommu_dma_cookie *cookie = domain->iova_cookie;
---------       struct iova_domain *iovad = &cookie->iovad;
                unsigned long order, base_pfn;
+++++++++       struct iova_domain *iovad;
                int attr;
          
                if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
                        return -EINVAL;
          
+++++++++       iovad = &cookie->iovad;
+++++++++ 
                /* Use the smallest supported page size for IOVA granularity */
                order = __ffs(domain->pgsize_bitmap);
                base_pfn = max_t(unsigned long, 1, base >> order);
@@@@@@@@@@@ -444,13 -444,13 -444,13 -444,18 -444,18 -444,13 -444,13 -444,13 -444,13 -446,18 +446,18 @@@@@@@@@@@ static void __iommu_dma_unmap(struct de
                struct iommu_dma_cookie *cookie = domain->iova_cookie;
                struct iova_domain *iovad = &cookie->iovad;
                size_t iova_off = iova_offset(iovad, dma_addr);
+++  ++++       struct iommu_iotlb_gather iotlb_gather;
+++  ++++       size_t unmapped;
          
                dma_addr -= iova_off;
                size = iova_align(iovad, size + iova_off);
+++  ++++       iommu_iotlb_gather_init(&iotlb_gather);
+++  ++++ 
+++  ++++       unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
+++  ++++       WARN_ON(unmapped != size);
          
---  ----       WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
                if (!cookie->fq_domain)
---  ----               iommu_tlb_sync(domain);
+++  ++++               iommu_tlb_sync(domain, &iotlb_gather);
                iommu_dma_free_iova(cookie, dma_addr, size);
          }
          
@@@@@@@@@@@ -459,11 -459,13 -459,13 -464,11 -464,11 -459,11 -459,11 -459,13 -459,11 -466,13 +466,11 @@@@@@@@@@@ static dma_addr_t __iommu_dma_map(struc
          {
                struct iommu_domain *domain = iommu_get_dma_domain(dev);
                struct iommu_dma_cookie *cookie = domain->iova_cookie;
 --    - -      size_t iova_off = 0;
 ++    + +      struct iova_domain *iovad = &cookie->iovad;
 ++    + +      size_t iova_off = iova_offset(iovad, phys);
                dma_addr_t iova;
          
 --    - -      if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
 --    - -              iova_off = iova_offset(&cookie->iovad, phys);
 --    - -              size = iova_align(&cookie->iovad, size + iova_off);
 --    - -      }
 ++    + +      size = iova_align(iovad, size + iova_off);
          
                iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
                if (!iova)
@@@@@@@@@@@ -572,7 -574,7 -574,7 -577,7 -577,7 -572,7 -572,7 -574,7 -572,7 -581,7 +579,7 @@@@@@@@@@@ static void *iommu_dma_alloc_remap(stru
                struct iova_domain *iovad = &cookie->iovad;
                bool coherent = dev_is_dma_coherent(dev);
                int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
 --    - -      pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
 ++    + +      pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
                unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
                struct page **pages;
                struct sg_table sgt;
@@@@@@@@@@@ -762,7 -764,7 -764,7 -767,7 -767,7 -762,7 -762,7 -764,7 -762,7 -771,7 +769,7 @@@@@@@@@@@ static int __finalise_sg(struct device 
                         * - and wouldn't make the resulting output segment too long
                         */
                        if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
 --    - -                  (cur_len + s_length <= max_len)) {
 ++    + +                  (max_len - cur_len >= s_length)) {
                                /* ...then concatenate it with the previous one */
                                cur_len += s_length;
                        } else {
@@@@@@@@@@@ -965,18 -967,15 -967,15 -970,15 -970,18 -965,18 -965,18 -967,15 -965,15 -974,15 +972,18 @@@@@@@@@@@ static void *iommu_dma_alloc_pages(stru
          {
                bool coherent = dev_is_dma_coherent(dev);
                size_t alloc_size = PAGE_ALIGN(size);
 +++   +++      int node = dev_to_node(dev);
                struct page *page = NULL;
                void *cpu_addr;
          
                page = dma_alloc_contiguous(dev, alloc_size, gfp);
 +++   +++      if (!page)
 +++   +++              page = alloc_pages_node(node, gfp, get_order(alloc_size));
                if (!page)
                        return NULL;
          
                if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
 --    - -              pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
 ++    + +              pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
          
                        cpu_addr = dma_common_contiguous_remap(page, alloc_size,
                                        VM_USERMAP, prot, __builtin_return_address(0));
@@@@@@@@@@@ -1036,7 -1035,7 -1035,7 -1038,7 -1041,7 -1036,7 -1036,7 -1035,7 -1033,7 -1042,7 +1043,7 @@@@@@@@@@@ static int iommu_dma_mmap(struct devic
                unsigned long pfn, off = vma->vm_pgoff;
                int ret;
          
 --    - -      vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
 ++    + +      vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
          
                if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                        return ret;
@@@@@@@@@@@ -1148,21 -1147,16 -1147,16 -1150,21 -1153,21 -1148,21 -1148,21 -1147,16 -1145,21 -1154,16 +1155,21 @@@@@@@@@@@ static struct iommu_dma_msi_page *iommu
                if (!msi_page)
                        return NULL;
          
 --    - -      iova = __iommu_dma_map(dev, msi_addr, size, prot);
 --    - -      if (iova == DMA_MAPPING_ERROR)
 ++    + +      iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
 ++    + +      if (!iova)
                        goto out_free_page;
          
 ++    + +      if (iommu_map(domain, iova, msi_addr, size, prot))
 ++    + +              goto out_free_iova;
 ++    + +
                INIT_LIST_HEAD(&msi_page->list);
                msi_page->phys = msi_addr;
                msi_page->iova = iova;
                list_add(&msi_page->list, &cookie->msi_page_list);
                return msi_page;
          
 ++    + +out_free_iova:
 ++    + +      iommu_dma_free_iova(cookie, iova, size);
          out_free_page:
                kfree(msi_page);
                return NULL;
index b0c1e5f9daae5acc353ef91c5d0ab44f773fa318,b0c1e5f9daae5acc353ef91c5d0ab44f773fa318,a48236c1d5cb882886f2d2ef94a45da18665af57,cf5af34cb6819bf25501bb235b1cb58b6a21614e,cf5af34cb6819bf25501bb235b1cb58b6a21614e,b0c1e5f9daae5acc353ef91c5d0ab44f773fa318,b0c1e5f9daae5acc353ef91c5d0ab44f773fa318,b0c1e5f9daae5acc353ef91c5d0ab44f773fa318,b0c1e5f9daae5acc353ef91c5d0ab44f773fa318,3ea7b3f8ee12d2af513a1049ecc29177f99b1120..9c94e16fb1277f793bb8d24062b09afb8ab49d74
@@@@@@@@@@@ -566,7 -566,7 -566,7 -566,7 -566,7 -566,7 -566,7 -566,7 -566,7 -566,7 +566,7 @@@@@@@@@@@ static void sysmmu_tlb_invalidate_entry
          
          static const struct iommu_ops exynos_iommu_ops;
          
-- -------static int __init exynos_sysmmu_probe(struct platform_device *pdev)
++ +++++++static int exynos_sysmmu_probe(struct platform_device *pdev)
          {
                int irq, ret;
                struct device *dev = &pdev->dev;
                        return PTR_ERR(data->sfrbase);
          
                irq = platform_get_irq(pdev, 0);
---------       if (irq <= 0) {
---------               dev_err(dev, "Unable to find IRQ resource\n");
+++++++++       if (irq <= 0)
                        return irq;
---------       }
          
                ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
                                        dev_name(dev), data);
@@@@@@@@@@@ -1130,7 -1130,7 -1130,7 -1130,8 -1130,8 -1130,7 -1130,7 -1130,7 -1130,7 -1128,8 +1128,8 @@@@@@@@@@@ static void exynos_iommu_tlb_invalidate
          }
          
          static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
---  ----                                unsigned long l_iova, size_t size)
+++  ++++                                unsigned long l_iova, size_t size,
+++  ++++                                struct iommu_iotlb_gather *gather)
          {
                struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
                sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
index c4e0e4a9ee9ec5c2f5632176591badb3161031fa,bdaed2da8a55e5a96904d0a182b9d44db8843777,bdaed2da8a55e5a96904d0a182b9d44db8843777,b7454ca4a87c34f95a6be59af8ea6813aab9811e,b7454ca4a87c34f95a6be59af8ea6813aab9811e,12d094d08c0a2e982fb712b9310ceadf7213013d,12d094d08c0a2e982fb712b9310ceadf7213013d,bdaed2da8a55e5a96904d0a182b9d44db8843777,518de728ae5c911b1a58dc4520cb298ab77575ae,587337534b765a44d53cd7194c101d67eb46bd78..87de0b975672b0a8864277ff799b5e02e56547ea
          #include <linux/dma-direct.h>
          #include <linux/crash_dump.h>
          #include <linux/numa.h>
++++++++ +#include <linux/swiotlb.h>
          #include <asm/irq_remapping.h>
          #include <asm/cacheflush.h>
          #include <asm/iommu.h>
++++++++ +#include <trace/events/intel_iommu.h>
          
          #include "irq_remapping.h"
          #include "intel-pasid.h"
@@@@@@@@@@@ -339,13 -339,11 -339,11 -339,11 -339,11 -339,11 -339,11 -339,11 -341,13 -339,11 +341,15 @@@@@@@@@@@ static void domain_exit(struct dmar_dom
          static void domain_remove_dev_info(struct dmar_domain *domain);
          static void dmar_remove_one_dev_info(struct device *dev);
          static void __dmar_remove_one_dev_info(struct device_domain_info *info);
 +++++++++static void domain_context_clear(struct intel_iommu *iommu,
 +++++++++                               struct device *dev);
          static int domain_detach_iommu(struct dmar_domain *domain,
                                       struct intel_iommu *iommu);
          static bool device_is_rmrr_locked(struct device *dev);
          static int intel_iommu_attach_device(struct iommu_domain *domain,
                                             struct device *dev);
++++++++ +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
++++++++ +                                          dma_addr_t iova);
          
          #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
          int dmar_disabled = 0;
@@@@@@@@@@@ -362,6 -360,6 -360,6 -360,6 -360,6 -360,6 -360,6 -360,6 -364,7 -360,6 +366,7 @@@@@@@@@@@ static int dmar_forcedac
          static int intel_iommu_strict;
          static int intel_iommu_superpage = 1;
          static int iommu_identity_mapping;
++++++++ +static int intel_no_bounce;
          
          #define IDENTMAP_ALL          1
          #define IDENTMAP_GFX          2
@@@@@@@@@@@ -375,6 -373,6 -373,6 -373,6 -373,6 -373,6 -373,6 -373,6 -378,9 -373,6 +380,9 @@@@@@@@@@@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mappe
          static DEFINE_SPINLOCK(device_domain_lock);
          static LIST_HEAD(device_domain_list);
          
++++++++ +#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) &&  \
++++++++ +                              to_pci_dev(d)->untrusted)
++++++++ +
          /*
           * Iterate over elements in device_domain_list and call the specified
           * callback @fn against each element.
@@@@@@@@@@@ -457,6 -455,6 -455,6 -455,6 -455,6 -455,6 -455,6 -455,6 -463,9 -455,6 +465,9 @@@@@@@@@@@ static int __init intel_iommu_setup(cha
                                printk(KERN_INFO
                                        "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
                                intel_iommu_tboot_noforce = 1;
++++++++ +              } else if (!strncmp(str, "nobounce", 8)) {
++++++++ +                      pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n");
++++++++ +                      intel_no_bounce = 1;
                        }
          
                        str += strcspn(str, ",");
                return ret;
          }
          
 +++++++++struct domain_context_mapping_data {
 +++++++++      struct dmar_domain *domain;
 +++++++++      struct intel_iommu *iommu;
 +++++++++      struct pasid_table *table;
 +++++++++};
 +++++++++
 +++++++++static int domain_context_mapping_cb(struct pci_dev *pdev,
 +++++++++                                   u16 alias, void *opaque)
 +++++++++{
 +++++++++      struct domain_context_mapping_data *data = opaque;
 +++++++++
 +++++++++      return domain_context_mapping_one(data->domain, data->iommu,
 +++++++++                                        data->table, PCI_BUS_NUM(alias),
 +++++++++                                        alias & 0xff);
 +++++++++}
 +++++++++
          static int
          domain_context_mapping(struct dmar_domain *domain, struct device *dev)
          {
 +++++++++      struct domain_context_mapping_data data;
                struct pasid_table *table;
                struct intel_iommu *iommu;
                u8 bus, devfn;
                        return -ENODEV;
          
                table = intel_pasid_get_table(dev);
 ---------      return domain_context_mapping_one(domain, iommu, table, bus, devfn);
 +++++++++
 +++++++++      if (!dev_is_pci(dev))
 +++++++++              return domain_context_mapping_one(domain, iommu, table,
 +++++++++                                                bus, devfn);
 +++++++++
 +++++++++      data.domain = domain;
 +++++++++      data.iommu = iommu;
 +++++++++      data.table = table;
 +++++++++
 +++++++++      return pci_for_each_dma_alias(to_pci_dev(dev),
 +++++++++                                    &domain_context_mapping_cb, &data);
          }
          
          static int domain_context_mapped_cb(struct pci_dev *pdev,
@@@@@@@@@@@ -3296,7 -3267,7 -3267,7 -3267,7 -3267,7 -3267,7 -3267,7 -3267,7 -3278,7 -3267,7 +3307,7 @@@@@@@@@@@ static int __init init_dmars(void
                        iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
                }
          
---------       if (iommu_pass_through)
+++++++++       if (iommu_default_passthrough())
                        iommu_identity_mapping |= IDENTMAP_ALL;
          
          #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
@@@@@@@@@@@ -3478,7 -3449,6 -3449,6 -3449,7 -3449,7 -3449,7 -3449,7 -3449,6 -3460,7 -3449,6 +3489,7 @@@@@@@@@@@ static bool iommu_need_mapping(struct d
                                        dmar_domain = to_dmar_domain(domain);
                                        dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
                                }
 ++    + +                      dmar_remove_one_dev_info(dev);
                                get_private_domain_for_dev(dev);
                        }
          
@@@@@@@@@@@ -3534,6 -3504,6 -3504,6 -3505,6 -3505,6 -3505,6 -3505,6 -3504,6 -3516,9 -3504,6 +3545,9 @@@@@@@@@@@ static dma_addr_t __intel_map_single(st
          
                start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
                start_paddr += paddr & ~PAGE_MASK;
++++++++ +
++++++++ +      trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT);
++++++++ +
                return start_paddr;
          
          error:
@@@@@@@@@@@ -3589,10 -3559,10 -3559,10 -3560,10 -3560,10 -3560,10 -3560,10 -3559,10 -3574,7 -3559,10 +3603,7 @@@@@@@@@@@ static void intel_unmap(struct device *
                if (dev_is_pci(dev))
                        pdev = to_pci_dev(dev);
          
-------- -      dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
-------- -
                freelist = domain_unmap(domain, start_pfn, last_pfn);
-------- -
                if (intel_iommu_strict || (pdev && pdev->untrusted) ||
                                !has_iova_flush_queue(&domain->iovad)) {
                        iommu_flush_iotlb_psi(iommu, domain, start_pfn,
                         * cpu used up by the iotlb flush operation...
                         */
                }
++++++++ +
++++++++ +      trace_unmap_single(dev, dev_addr, size);
          }
          
          static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
@@@@@@@@@@@ -3698,6 -3668,6 -3668,6 -3669,6 -3669,6 -3669,6 -3669,6 -3668,6 -3682,8 -3668,6 +3711,8 @@@@@@@@@@@ static void intel_unmap_sg(struct devic
                }
          
                intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
++++++++ +
++++++++ +      trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
          }
          
          static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
                        return 0;
                }
          
++++++++ +      trace_map_sg(dev, iova_pfn << PAGE_SHIFT,
++++++++ +                   sg_phys(sglist), size << VTD_PAGE_SHIFT);
++++++++ +
                return nelems;
          }
          
@@@@@@@@@@@ -3769,6 -3739,6 -3739,6 -3740,6 -3740,6 -3740,6 -3740,6 -3739,6 -3758,252 -3739,6 +3787,252 @@@@@@@@@@@ static const struct dma_map_ops intel_d
                .dma_supported = dma_direct_supported,
          };
          
++++++++ +static void
++++++++ +bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size,
++++++++ +                 enum dma_data_direction dir, enum dma_sync_target target)
++++++++ +{
++++++++ +      struct dmar_domain *domain;
++++++++ +      phys_addr_t tlb_addr;
++++++++ +
++++++++ +      domain = find_domain(dev);
++++++++ +      if (WARN_ON(!domain))
++++++++ +              return;
++++++++ +
++++++++ +      tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr);
++++++++ +      if (is_swiotlb_buffer(tlb_addr))
++++++++ +              swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target);
++++++++ +}
++++++++ +
++++++++ +static dma_addr_t
++++++++ +bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
++++++++ +                enum dma_data_direction dir, unsigned long attrs,
++++++++ +                u64 dma_mask)
++++++++ +{
++++++++ +      size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
++++++++ +      struct dmar_domain *domain;
++++++++ +      struct intel_iommu *iommu;
++++++++ +      unsigned long iova_pfn;
++++++++ +      unsigned long nrpages;
++++++++ +      phys_addr_t tlb_addr;
++++++++ +      int prot = 0;
++++++++ +      int ret;
++++++++ +
++++++++ +      domain = find_domain(dev);
++++++++ +      if (WARN_ON(dir == DMA_NONE || !domain))
++++++++ +              return DMA_MAPPING_ERROR;
++++++++ +
++++++++ +      iommu = domain_get_iommu(domain);
++++++++ +      if (WARN_ON(!iommu))
++++++++ +              return DMA_MAPPING_ERROR;
++++++++ +
++++++++ +      nrpages = aligned_nrpages(0, size);
++++++++ +      iova_pfn = intel_alloc_iova(dev, domain,
++++++++ +                                  dma_to_mm_pfn(nrpages), dma_mask);
++++++++ +      if (!iova_pfn)
++++++++ +              return DMA_MAPPING_ERROR;
++++++++ +
++++++++ +      /*
++++++++ +       * Check if DMAR supports zero-length reads on write only
++++++++ +       * mappings..
++++++++ +       */
++++++++ +      if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL ||
++++++++ +                      !cap_zlr(iommu->cap))
++++++++ +              prot |= DMA_PTE_READ;
++++++++ +      if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
++++++++ +              prot |= DMA_PTE_WRITE;
++++++++ +
++++++++ +      /*
++++++++ +       * If both the physical buffer start address and size are
++++++++ +       * page aligned, we don't need to use a bounce page.
++++++++ +       */
++++++++ +      if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
++++++++ +              tlb_addr = swiotlb_tbl_map_single(dev,
++++++++ +                              __phys_to_dma(dev, io_tlb_start),
++++++++ +                              paddr, size, aligned_size, dir, attrs);
++++++++ +              if (tlb_addr == DMA_MAPPING_ERROR) {
++++++++ +                      goto swiotlb_error;
++++++++ +              } else {
++++++++ +                      /* Cleanup the padding area. */
++++++++ +                      void *padding_start = phys_to_virt(tlb_addr);
++++++++ +                      size_t padding_size = aligned_size;
++++++++ +
++++++++ +                      if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
++++++++ +                          (dir == DMA_TO_DEVICE ||
++++++++ +                           dir == DMA_BIDIRECTIONAL)) {
++++++++ +                              padding_start += size;
++++++++ +                              padding_size -= size;
++++++++ +                      }
++++++++ +
++++++++ +                      memset(padding_start, 0, padding_size);
++++++++ +              }
++++++++ +      } else {
++++++++ +              tlb_addr = paddr;
++++++++ +      }
++++++++ +
++++++++ +      ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
++++++++ +                               tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot);
++++++++ +      if (ret)
++++++++ +              goto mapping_error;
++++++++ +
++++++++ +      trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size);
++++++++ +
++++++++ +      return (phys_addr_t)iova_pfn << PAGE_SHIFT;
++++++++ +
++++++++ +mapping_error:
++++++++ +      if (is_swiotlb_buffer(tlb_addr))
++++++++ +              swiotlb_tbl_unmap_single(dev, tlb_addr, size,
++++++++ +                                       aligned_size, dir, attrs);
++++++++ +swiotlb_error:
++++++++ +      free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
++++++++ +      dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n",
++++++++ +              size, (unsigned long long)paddr, dir);
++++++++ +
++++++++ +      return DMA_MAPPING_ERROR;
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
++++++++ +                  enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE);
++++++++ +      struct dmar_domain *domain;
++++++++ +      phys_addr_t tlb_addr;
++++++++ +
++++++++ +      domain = find_domain(dev);
++++++++ +      if (WARN_ON(!domain))
++++++++ +              return;
++++++++ +
++++++++ +      tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr);
++++++++ +      if (WARN_ON(!tlb_addr))
++++++++ +              return;
++++++++ +
++++++++ +      intel_unmap(dev, dev_addr, size);
++++++++ +      if (is_swiotlb_buffer(tlb_addr))
++++++++ +              swiotlb_tbl_unmap_single(dev, tlb_addr, size,
++++++++ +                                       aligned_size, dir, attrs);
++++++++ +
++++++++ +      trace_bounce_unmap_single(dev, dev_addr, size);
++++++++ +}
++++++++ +
++++++++ +static dma_addr_t
++++++++ +bounce_map_page(struct device *dev, struct page *page, unsigned long offset,
++++++++ +              size_t size, enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      return bounce_map_single(dev, page_to_phys(page) + offset,
++++++++ +                               size, dir, attrs, *dev->dma_mask);
++++++++ +}
++++++++ +
++++++++ +static dma_addr_t
++++++++ +bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
++++++++ +                  enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      return bounce_map_single(dev, phys_addr, size,
++++++++ +                               dir, attrs, *dev->dma_mask);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size,
++++++++ +                enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      bounce_unmap_single(dev, dev_addr, size, dir, attrs);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size,
++++++++ +                    enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      bounce_unmap_single(dev, dev_addr, size, dir, attrs);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems,
++++++++ +              enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      struct scatterlist *sg;
++++++++ +      int i;
++++++++ +
++++++++ +      for_each_sg(sglist, sg, nelems, i)
++++++++ +              bounce_unmap_page(dev, sg->dma_address,
++++++++ +                                sg_dma_len(sg), dir, attrs);
++++++++ +}
++++++++ +
++++++++ +static int
++++++++ +bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
++++++++ +            enum dma_data_direction dir, unsigned long attrs)
++++++++ +{
++++++++ +      int i;
++++++++ +      struct scatterlist *sg;
++++++++ +
++++++++ +      for_each_sg(sglist, sg, nelems, i) {
++++++++ +              sg->dma_address = bounce_map_page(dev, sg_page(sg),
++++++++ +                                                sg->offset, sg->length,
++++++++ +                                                dir, attrs);
++++++++ +              if (sg->dma_address == DMA_MAPPING_ERROR)
++++++++ +                      goto out_unmap;
++++++++ +              sg_dma_len(sg) = sg->length;
++++++++ +      }
++++++++ +
++++++++ +      return nelems;
++++++++ +
++++++++ +out_unmap:
++++++++ +      bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
++++++++ +      return 0;
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
++++++++ +                         size_t size, enum dma_data_direction dir)
++++++++ +{
++++++++ +      bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_single_for_device(struct device *dev, dma_addr_t addr,
++++++++ +                            size_t size, enum dma_data_direction dir)
++++++++ +{
++++++++ +      bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
++++++++ +                     int nelems, enum dma_data_direction dir)
++++++++ +{
++++++++ +      struct scatterlist *sg;
++++++++ +      int i;
++++++++ +
++++++++ +      for_each_sg(sglist, sg, nelems, i)
++++++++ +              bounce_sync_single(dev, sg_dma_address(sg),
++++++++ +                                 sg_dma_len(sg), dir, SYNC_FOR_CPU);
++++++++ +}
++++++++ +
++++++++ +static void
++++++++ +bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
++++++++ +                        int nelems, enum dma_data_direction dir)
++++++++ +{
++++++++ +      struct scatterlist *sg;
++++++++ +      int i;
++++++++ +
++++++++ +      for_each_sg(sglist, sg, nelems, i)
++++++++ +              bounce_sync_single(dev, sg_dma_address(sg),
++++++++ +                                 sg_dma_len(sg), dir, SYNC_FOR_DEVICE);
++++++++ +}
++++++++ +
++++++++ +static const struct dma_map_ops bounce_dma_ops = {
++++++++ +      .alloc                  = intel_alloc_coherent,
++++++++ +      .free                   = intel_free_coherent,
++++++++ +      .map_sg                 = bounce_map_sg,
++++++++ +      .unmap_sg               = bounce_unmap_sg,
++++++++ +      .map_page               = bounce_map_page,
++++++++ +      .unmap_page             = bounce_unmap_page,
++++++++ +      .sync_single_for_cpu    = bounce_sync_single_for_cpu,
++++++++ +      .sync_single_for_device = bounce_sync_single_for_device,
++++++++ +      .sync_sg_for_cpu        = bounce_sync_sg_for_cpu,
++++++++ +      .sync_sg_for_device     = bounce_sync_sg_for_device,
++++++++ +      .map_resource           = bounce_map_resource,
++++++++ +      .unmap_resource         = bounce_unmap_resource,
++++++++ +      .dma_supported          = dma_direct_supported,
++++++++ +};
++++++++ +
          static inline int iommu_domain_cache_init(void)
          {
                int ret = 0;
@@@@@@@@@@@ -4569,22 -4539,22 -4539,22 -4540,22 -4540,22 -4540,22 -4540,22 -4539,22 -4804,20 -4539,22 +4833,20 @@@@@@@@@@@ const struct attribute_group *intel_iom
                NULL,
          };
          
-------- -static int __init platform_optin_force_iommu(void)
++++++++ +static inline bool has_untrusted_dev(void)
          {
                struct pci_dev *pdev = NULL;
-------- -      bool has_untrusted_dev = false;
          
-------- -      if (!dmar_platform_optin() || no_platform_optin)
-------- -              return 0;
++++++++ +      for_each_pci_dev(pdev)
++++++++ +              if (pdev->untrusted)
++++++++ +                      return true;
          
-------- -      for_each_pci_dev(pdev) {
-------- -              if (pdev->untrusted) {
-------- -                      has_untrusted_dev = true;
-------- -                      break;
-------- -              }
-------- -      }
++++++++ +      return false;
++++++++ +}
          
-------- -      if (!has_untrusted_dev)
++++++++ +static int __init platform_optin_force_iommu(void)
++++++++ +{
++++++++ +      if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev())
                        return 0;
          
                if (no_iommu || dmar_disabled)
                        iommu_identity_mapping |= IDENTMAP_ALL;
          
                dmar_disabled = 0;
-------- -#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-------- -      swiotlb = 0;
-------- -#endif
                no_iommu = 0;
          
                return 1;
@@@@@@@@@@@ -4740,7 -4710,7 -4710,7 -4711,7 -4711,7 -4711,7 -4711,7 -4710,7 -4970,14 -4710,7 +4999,14 @@@@@@@@@@@ int __init intel_iommu_init(void
                up_write(&dmar_global_lock);
          
          #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
-------- -      swiotlb = 0;
++++++++ +      /*
++++++++ +       * If the system has no untrusted device or the user has decided
++++++++ +       * to disable the bounce page mechanisms, we don't need swiotlb.
++++++++ +       * Mark this and the pre-allocated bounce pages will be released
++++++++ +       * later.
++++++++ +       */
++++++++ +      if (!has_untrusted_dev() || intel_no_bounce)
++++++++ +              swiotlb = 0;
          #endif
                dma_ops = &intel_dma_ops;
          
                return ret;
          }
          
 +++++++++static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
 +++++++++{
 +++++++++      struct intel_iommu *iommu = opaque;
 +++++++++
 +++++++++      domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
 +++++++++      return 0;
 +++++++++}
 +++++++++
 +++++++++/*
 +++++++++ * NB - intel-iommu lacks any sort of reference counting for the users of
 +++++++++ * dependent devices.  If multiple endpoints have intersecting dependent
 +++++++++ * devices, unbinding the driver from any one of them will possibly leave
 +++++++++ * the others unable to operate.
 +++++++++ */
 +++++++++static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
 +++++++++{
 +++++++++      if (!iommu || !dev || !dev_is_pci(dev))
 +++++++++              return;
 +++++++++
 +++++++++      pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
 +++++++++}
 +++++++++
          static void __dmar_remove_one_dev_info(struct device_domain_info *info)
          {
                struct dmar_domain *domain;
                                                PASID_RID2PASID);
          
                        iommu_disable_dev_iotlb(info);
 ---------              domain_context_clear_one(iommu, info->bus, info->devfn);
 +++++++++              domain_context_clear(iommu, info->dev);
                        intel_pasid_free_table(info->dev);
                }
          
          
                /* free the private domain */
                if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
 --    - -          !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY))
 ++    + +          !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
 ++    + +          list_empty(&domain->devices))
                        domain_exit(info->domain);
          
                free_devinfo_mem(info);
@@@@@@@@@@@ -4856,8 -4803,7 -4803,7 -4805,8 -4805,8 -4805,8 -4805,8 -4803,7 -5071,8 -4803,7 +5122,8 @@@@@@@@@@@ static void dmar_remove_one_dev_info(st
          
                spin_lock_irqsave(&device_domain_lock, flags);
                info = dev->archdata.iommu;
 --    - -      __dmar_remove_one_dev_info(info);
 ++    + +      if (info)
 ++    + +              __dmar_remove_one_dev_info(info);
                spin_unlock_irqrestore(&device_domain_lock, flags);
          }
          
@@@@@@@@@@@ -5204,7 -5150,7 -5150,7 -5153,8 -5153,8 -5153,7 -5153,7 -5150,7 -5419,7 -5150,8 +5470,8 @@@@@@@@@@@ static int intel_iommu_map(struct iommu
          }
          
          static size_t intel_iommu_unmap(struct iommu_domain *domain,
---  ----                               unsigned long iova, size_t size)
+++  ++++                               unsigned long iova, size_t size,
+++  ++++                               struct iommu_iotlb_gather *gather)
          {
                struct dmar_domain *dmar_domain = to_dmar_domain(domain);
                struct page *freelist = NULL;
@@@@@@@@@@@ -5335,7 -5281,6 -5281,6 -5285,7 -5285,7 -5284,7 -5284,7 -5281,6 -5550,7 -5282,6 +5602,7 @@@@@@@@@@@ static int intel_iommu_add_device(struc
                        if (device_def_domain_type(dev) == IOMMU_DOMAIN_IDENTITY) {
                                ret = iommu_request_dm_for_dev(dev);
                                if (ret) {
 ++    + +                              dmar_remove_one_dev_info(dev);
                                        dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
                                        domain_add_dev_info(si_domain, dev);
                                        dev_info(dev,
                        if (device_def_domain_type(dev) == IOMMU_DOMAIN_DMA) {
                                ret = iommu_request_dma_domain_for_dev(dev);
                                if (ret) {
 ++    + +                              dmar_remove_one_dev_info(dev);
                                        dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
                                        if (!get_private_domain_for_dev(dev)) {
                                                dev_warn(dev,
                        }
                }
          
++++++++ +      if (device_needs_bounce(dev)) {
++++++++ +              dev_info(dev, "Use Intel IOMMU bounce page dma_ops\n");
++++++++ +              set_dma_ops(dev, &bounce_dma_ops);
++++++++ +      }
++++++++ +
                return 0;
          }
          
@@@@@@@@@@@ -5372,11 -5316,9 -5316,9 -5322,11 -5322,11 -5321,11 -5321,11 -5316,9 -5592,14 -5317,9 +5644,14 @@@@@@@@@@@ static void intel_iommu_remove_device(s
                if (!iommu)
                        return;
          
 ++    + +      dmar_remove_one_dev_info(dev);
 ++    + +
                iommu_group_remove_device(dev);
          
                iommu_device_unlink(&iommu->iommu, dev);
++++++++ +
++++++++ +      if (device_needs_bounce(dev))
++++++++ +              set_dma_ops(dev, NULL);
          }
          
          static void intel_iommu_get_resv_regions(struct device *device,
@@@@@@@@@@@ -5690,20 -5632,20 -5632,20 -5640,20 -5640,20 -5639,20 -5639,20 -5632,20 -5913,46 -5633,20 +5965,46 @@@@@@@@@@@ const struct iommu_ops intel_iommu_ops 
                .pgsize_bitmap          = INTEL_IOMMU_PGSIZES,
          };
          
-------- -static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
++++++++ +static void quirk_iommu_igfx(struct pci_dev *dev)
          {
-------- -      /* G4x/GM45 integrated gfx dmar support is totally busted. */
                pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
                dmar_map_gfx = 0;
          }
          
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
-------- -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
++++++++ +/* G4x/GM45 integrated gfx dmar support is totally busted. */
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
++++++++ +
++++++++ +/* Broadwell igfx malfunctions with dmar */
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
++++++++ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
          
          static void quirk_iommu_rwbf(struct pci_dev *dev)
          {
index ad0098c0c87c7544f18b70c0bd9dc43913ca0478,ad0098c0c87c7544f18b70c0bd9dc43913ca0478,ad0098c0c87c7544f18b70c0bd9dc43913ca0478,76a8ec343d53252ea916261555100430d7837318,76a8ec343d53252ea916261555100430d7837318,ad0098c0c87c7544f18b70c0bd9dc43913ca0478,49db9d6825487c9a4a66e0e1595e378817509287,ad0098c0c87c7544f18b70c0bd9dc43913ca0478,ad0098c0c87c7544f18b70c0bd9dc43913ca0478,76a8ec343d53252ea916261555100430d7837318..9da8309f71708f213f91dbe99374681e0da3f652
@@@@@@@@@@@ -49,6 -49,6 -49,6 -49,6 -49,6 -49,6 -49,7 -49,6 -49,6 -49,6 +49,7 @@@@@@@@@@@ struct ipmmu_features 
                bool setup_imbuscr;
                bool twobit_imttbcr_sl0;
                bool reserved_context;
++++++ +++      bool cache_snoop;
          };
          
          struct ipmmu_vmsa_device {
@@@@@@@@@@@ -115,45 -115,45 -115,45 -115,45 -115,45 -115,45 -116,44 -115,45 -115,45 -115,45 +116,44 @@@@@@@@@@@ static struct ipmmu_vmsa_device *to_ipm
          #define IMTTBCR                               0x0008
          #define IMTTBCR_EAE                   (1 << 31)
          #define IMTTBCR_PMB                   (1 << 30)
------ ---#define IMTTBCR_SH1_NON_SHAREABLE     (0 << 28)
------ ---#define IMTTBCR_SH1_OUTER_SHAREABLE   (2 << 28)
------ ---#define IMTTBCR_SH1_INNER_SHAREABLE   (3 << 28)
------ ---#define IMTTBCR_SH1_MASK              (3 << 28)
------ ---#define IMTTBCR_ORGN1_NC              (0 << 26)
------ ---#define IMTTBCR_ORGN1_WB_WA           (1 << 26)
------ ---#define IMTTBCR_ORGN1_WT              (2 << 26)
------ ---#define IMTTBCR_ORGN1_WB              (3 << 26)
------ ---#define IMTTBCR_ORGN1_MASK            (3 << 26)
------ ---#define IMTTBCR_IRGN1_NC              (0 << 24)
------ ---#define IMTTBCR_IRGN1_WB_WA           (1 << 24)
------ ---#define IMTTBCR_IRGN1_WT              (2 << 24)
------ ---#define IMTTBCR_IRGN1_WB              (3 << 24)
------ ---#define IMTTBCR_IRGN1_MASK            (3 << 24)
++++++ +++#define IMTTBCR_SH1_NON_SHAREABLE     (0 << 28)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH1_OUTER_SHAREABLE   (2 << 28)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH1_INNER_SHAREABLE   (3 << 28)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH1_MASK              (3 << 28)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_NC              (0 << 26)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_WB_WA           (1 << 26)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_WT              (2 << 26)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_WB              (3 << 26)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN1_MASK            (3 << 26)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_NC              (0 << 24)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_WB_WA           (1 << 24)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_WT              (2 << 24)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_WB              (3 << 24)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN1_MASK            (3 << 24)       /* R-Car Gen2 only */
          #define IMTTBCR_TSZ1_MASK             (7 << 16)
          #define IMTTBCR_TSZ1_SHIFT            16
------ ---#define IMTTBCR_SH0_NON_SHAREABLE     (0 << 12)
------ ---#define IMTTBCR_SH0_OUTER_SHAREABLE   (2 << 12)
------ ---#define IMTTBCR_SH0_INNER_SHAREABLE   (3 << 12)
------ ---#define IMTTBCR_SH0_MASK              (3 << 12)
------ ---#define IMTTBCR_ORGN0_NC              (0 << 10)
------ ---#define IMTTBCR_ORGN0_WB_WA           (1 << 10)
------ ---#define IMTTBCR_ORGN0_WT              (2 << 10)
------ ---#define IMTTBCR_ORGN0_WB              (3 << 10)
------ ---#define IMTTBCR_ORGN0_MASK            (3 << 10)
------ ---#define IMTTBCR_IRGN0_NC              (0 << 8)
------ ---#define IMTTBCR_IRGN0_WB_WA           (1 << 8)
------ ---#define IMTTBCR_IRGN0_WT              (2 << 8)
------ ---#define IMTTBCR_IRGN0_WB              (3 << 8)
------ ---#define IMTTBCR_IRGN0_MASK            (3 << 8)
++++++ +++#define IMTTBCR_SH0_NON_SHAREABLE     (0 << 12)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH0_OUTER_SHAREABLE   (2 << 12)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH0_INNER_SHAREABLE   (3 << 12)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SH0_MASK              (3 << 12)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_NC              (0 << 10)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_WB_WA           (1 << 10)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_WT              (2 << 10)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_WB              (3 << 10)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_ORGN0_MASK            (3 << 10)       /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_NC              (0 << 8)        /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_WB_WA           (1 << 8)        /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_WT              (2 << 8)        /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_WB              (3 << 8)        /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_IRGN0_MASK            (3 << 8)        /* R-Car Gen2 only */
++++++ +++#define IMTTBCR_SL0_TWOBIT_LVL_3      (0 << 6)        /* R-Car Gen3 only */
++++++ +++#define IMTTBCR_SL0_TWOBIT_LVL_2      (1 << 6)        /* R-Car Gen3 only */
++++++ +++#define IMTTBCR_SL0_TWOBIT_LVL_1      (2 << 6)        /* R-Car Gen3 only */
          #define IMTTBCR_SL0_LVL_2             (0 << 4)
          #define IMTTBCR_SL0_LVL_1             (1 << 4)
          #define IMTTBCR_TSZ0_MASK             (7 << 0)
          #define IMTTBCR_TSZ0_SHIFT            O
          
------ ---#define IMTTBCR_SL0_TWOBIT_LVL_3      (0 << 6)
------ ---#define IMTTBCR_SL0_TWOBIT_LVL_2      (1 << 6)
------ ---#define IMTTBCR_SL0_TWOBIT_LVL_1      (2 << 6)
------ ---
          #define IMBUSCR                               0x000c
          #define IMBUSCR_DVM                   (1 << 2)
          #define IMBUSCR_BUSSEL_SYS            (0 << 0)
@@@@@@@@@@@ -361,16 -361,16 -361,16 -361,16 -361,16 -361,16 -361,16 -361,16 -361,16 -361,16 +361,16 @@@@@@@@@@@ static void ipmmu_tlb_flush_all(void *c
                ipmmu_tlb_invalidate(domain);
          }
          
---  ---- static void ipmmu_tlb_add_flush(unsigned long iova, size_t size,
---  ----                               size_t granule, bool leaf, void *cookie)
+++  ++++ static void ipmmu_tlb_flush(unsigned long iova, size_t size,
+++  ++++                               size_t granule, void *cookie)
          {
---  ----       /* The hardware doesn't support selective TLB flush. */
+++  ++++       ipmmu_tlb_flush_all(cookie);
          }
          
---  ---- static const struct iommu_gather_ops ipmmu_gather_ops = {
+++  ++++ static const struct iommu_flush_ops ipmmu_flush_ops = {
                .tlb_flush_all = ipmmu_tlb_flush_all,
---  ----       .tlb_add_flush = ipmmu_tlb_add_flush,
---  ----       .tlb_sync = ipmmu_tlb_flush_all,
+++  ++++       .tlb_flush_walk = ipmmu_tlb_flush,
+++  ++++       .tlb_flush_leaf = ipmmu_tlb_flush,
          };
          
          /* -----------------------------------------------------------------------------
@@@@@@@@@@@ -422,17 -422,17 -422,17 -422,17 -422,17 -422,17 -422,19 -422,17 -422,17 -422,17 +422,19 @@@@@@@@@@@ static void ipmmu_domain_setup_context(
          
                /*
                 * TTBCR
------ ---       * We use long descriptors with inner-shareable WBWA tables and allocate
------ ---       * the whole 32-bit VA space to TTBR0.
++++++ +++       * We use long descriptors and allocate the whole 32-bit VA space to
++++++ +++       * TTBR0.
                 */
                if (domain->mmu->features->twobit_imttbcr_sl0)
                        tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
                else
                        tmp = IMTTBCR_SL0_LVL_1;
          
------ ---      ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE |
------ ---                           IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
------ ---                           IMTTBCR_IRGN0_WB_WA | tmp);
++++++ +++      if (domain->mmu->features->cache_snoop)
++++++ +++              tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
++++++ +++                     IMTTBCR_IRGN0_WB_WA;
++++++ +++
++++++ +++      ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
          
                /* MAIR0 */
                ipmmu_ctx_write_root(domain, IMMAIR0,
@@@@@@@@@@@ -480,7 -480,7 -480,7 -480,7 -480,7 -480,7 -482,7 -480,7 -480,7 -480,7 +482,7 @@@@@@@@@@@ static int ipmmu_domain_init_context(st
                domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
                domain->cfg.ias = 32;
                domain->cfg.oas = 40;
---  ----       domain->cfg.tlb = &ipmmu_gather_ops;
+++  ++++       domain->cfg.tlb = &ipmmu_flush_ops;
                domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
                domain->io_domain.geometry.force_aperture = true;
                /*
@@@@@@@@@@@ -733,14 -733,14 -733,14 -733,14 -733,14 -733,14 -735,14 -733,14 -733,14 -733,14 +735,14 @@@@@@@@@@@ static int ipmmu_map(struct iommu_domai
          }
          
          static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
---  ----                         size_t size)
+++  ++++                         size_t size, struct iommu_iotlb_gather *gather)
          {
                struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
          
---  ----       return domain->iop->unmap(domain->iop, iova, size);
+++  ++++       return domain->iop->unmap(domain->iop, iova, size, gather);
          }
          
---  ---- static void ipmmu_iotlb_sync(struct iommu_domain *io_domain)
+++  ++++ static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
          {
                struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
          
                        ipmmu_tlb_flush_all(domain);
          }
          
+++  ++++ static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
+++  ++++                            struct iommu_iotlb_gather *gather)
+++  ++++ {
+++  ++++       ipmmu_flush_iotlb_all(io_domain);
+++  ++++ }
+++  ++++ 
          static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
                                              dma_addr_t iova)
          {
@@@@@@@@@@@ -957,7 -957,7 -957,7 -963,7 -963,7 -957,7 -959,7 -957,7 -957,7 -963,7 +965,7 @@@@@@@@@@@ static const struct iommu_ops ipmmu_op
                .detach_dev = ipmmu_detach_device,
                .map = ipmmu_map,
                .unmap = ipmmu_unmap,
---  ----       .flush_iotlb_all = ipmmu_iotlb_sync,
+++  ++++       .flush_iotlb_all = ipmmu_flush_iotlb_all,
                .iotlb_sync = ipmmu_iotlb_sync,
                .iova_to_phys = ipmmu_iova_to_phys,
                .add_device = ipmmu_add_device,
@@@@@@@@@@@ -988,6 -988,6 -988,6 -994,6 -994,6 -988,6 -990,7 -988,6 -988,6 -994,6 +996,7 @@@@@@@@@@@ static const struct ipmmu_features ipmm
                .setup_imbuscr = true,
                .twobit_imttbcr_sl0 = false,
                .reserved_context = false,
++++++ +++      .cache_snoop = true,
          };
          
          static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
                .setup_imbuscr = false,
                .twobit_imttbcr_sl0 = true,
                .reserved_context = true,
++++++ +++      .cache_snoop = false,
          };
          
          static const struct of_device_id ipmmu_of_ids[] = {
index dfb961d8c21b7fd95feaa102a3235950950c0e5f,4879c8c4d19812969e7ed52fc6aab143d15854b9,dfb961d8c21b7fd95feaa102a3235950950c0e5f,8039bc5ee42575f02acc2f6872041c03ccf29ca3,8039bc5ee42575f02acc2f6872041c03ccf29ca3,dfb961d8c21b7fd95feaa102a3235950950c0e5f,dfb961d8c21b7fd95feaa102a3235950950c0e5f,dfb961d8c21b7fd95feaa102a3235950950c0e5f,dfb961d8c21b7fd95feaa102a3235950950c0e5f,8039bc5ee42575f02acc2f6872041c03ccf29ca3..09c6e1c680db980aeb67dae51986b2a84e88d99a
          
          static const struct iommu_ops omap_iommu_ops;
          
+ ++++++++struct orphan_dev {
+ ++++++++      struct device *dev;
+ ++++++++      struct list_head node;
+ ++++++++};
+ ++++++++
+ ++++++++static LIST_HEAD(orphan_dev_list);
+ ++++++++
+ ++++++++static DEFINE_SPINLOCK(orphan_lock);
+ ++++++++
          #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
          
          /* bitmap of the page sizes currently supported */
          static struct platform_driver omap_iommu_driver;
          static struct kmem_cache *iopte_cachep;
          
+ ++++++++static int _omap_iommu_add_device(struct device *dev);
+ ++++++++
          /**
           * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
           * @dom:      generic iommu domain handle
@@@@@@@@@@@ -65,6 -76,9 -65,6 -65,6 -65,6 -65,6 -65,6 -65,6 -65,6 -65,6 +76,9 @@@@@@@@@@@ static struct omap_iommu_domain *to_oma
          /**
           * omap_iommu_save_ctx - Save registers for pm off-mode support
           * @dev:      client device
+ ++++++++ *
+ ++++++++ * This should be treated as an deprecated API. It is preserved only
+ ++++++++ * to maintain existing functionality for OMAP3 ISP driver.
           **/
          void omap_iommu_save_ctx(struct device *dev)
          {
@@@@@@@@@@@ -92,6 -106,9 -92,6 -92,6 -92,6 -92,6 -92,6 -92,6 -92,6 -92,6 +106,9 @@@@@@@@@@@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx)
          /**
           * omap_iommu_restore_ctx - Restore registers for pm off-mode support
           * @dev:      client device
+ ++++++++ *
+ ++++++++ * This should be treated as an deprecated API. It is preserved only
+ ++++++++ * to maintain existing functionality for OMAP3 ISP driver.
           **/
          void omap_iommu_restore_ctx(struct device *dev)
          {
@@@@@@@@@@@ -186,36 -203,18 -186,36 -186,36 -186,36 -186,36 -186,36 -186,36 -186,36 -186,36 +203,18 @@@@@@@@@@@ static void omap2_iommu_disable(struct 
          
          static int iommu_enable(struct omap_iommu *obj)
          {
- --------      int err;
- --------      struct platform_device *pdev = to_platform_device(obj->dev);
- --------      struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- --------
- --------      if (pdata && pdata->deassert_reset) {
- --------              err = pdata->deassert_reset(pdev, pdata->reset_name);
- --------              if (err) {
- --------                      dev_err(obj->dev, "deassert_reset failed: %d\n", err);
- --------                      return err;
- --------              }
- --------      }
- --------
- --------      pm_runtime_get_sync(obj->dev);
+ ++++++++      int ret;
          
- --------      err = omap2_iommu_enable(obj);
+ ++++++++      ret = pm_runtime_get_sync(obj->dev);
+ ++++++++      if (ret < 0)
+ ++++++++              pm_runtime_put_noidle(obj->dev);
          
- --------      return err;
+ ++++++++      return ret < 0 ? ret : 0;
          }
          
          static void iommu_disable(struct omap_iommu *obj)
          {
- --------      struct platform_device *pdev = to_platform_device(obj->dev);
- --------      struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
- --------
- --------      omap2_iommu_disable(obj);
- --------
                pm_runtime_put_sync(obj->dev);
- --------
- --------      if (pdata && pdata->assert_reset)
- --------              pdata->assert_reset(pdev, pdata->reset_name);
          }
          
          /*
@@@@@@@@@@@ -901,15 -900,219 -901,15 -901,15 -901,15 -901,15 -901,15 -901,15 -901,15 -901,15 +900,219 @@@@@@@@@@@ static void omap_iommu_detach(struct om
          
                dma_unmap_single(obj->dev, obj->pd_dma, IOPGD_TABLE_SIZE,
                                 DMA_TO_DEVICE);
- --------      iommu_disable(obj);
                obj->pd_dma = 0;
                obj->iopgd = NULL;
+ ++++++++      iommu_disable(obj);
          
                spin_unlock(&obj->iommu_lock);
          
                dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
          }
          
+ ++++++++static void omap_iommu_save_tlb_entries(struct omap_iommu *obj)
+ ++++++++{
+ ++++++++      struct iotlb_lock lock;
+ ++++++++      struct cr_regs cr;
+ ++++++++      struct cr_regs *tmp;
+ ++++++++      int i;
+ ++++++++
+ ++++++++      /* check if there are any locked tlbs to save */
+ ++++++++      iotlb_lock_get(obj, &lock);
+ ++++++++      obj->num_cr_ctx = lock.base;
+ ++++++++      if (!obj->num_cr_ctx)
+ ++++++++              return;
+ ++++++++
+ ++++++++      tmp = obj->cr_ctx;
+ ++++++++      for_each_iotlb_cr(obj, obj->num_cr_ctx, i, cr)
+ ++++++++              * tmp++ = cr;
+ ++++++++}
+ ++++++++
+ ++++++++static void omap_iommu_restore_tlb_entries(struct omap_iommu *obj)
+ ++++++++{
+ ++++++++      struct iotlb_lock l;
+ ++++++++      struct cr_regs *tmp;
+ ++++++++      int i;
+ ++++++++
+ ++++++++      /* no locked tlbs to restore */
+ ++++++++      if (!obj->num_cr_ctx)
+ ++++++++              return;
+ ++++++++
+ ++++++++      l.base = 0;
+ ++++++++      tmp = obj->cr_ctx;
+ ++++++++      for (i = 0; i < obj->num_cr_ctx; i++, tmp++) {
+ ++++++++              l.vict = i;
+ ++++++++              iotlb_lock_set(obj, &l);
+ ++++++++              iotlb_load_cr(obj, tmp);
+ ++++++++      }
+ ++++++++      l.base = obj->num_cr_ctx;
+ ++++++++      l.vict = i;
+ ++++++++      iotlb_lock_set(obj, &l);
+ ++++++++}
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_domain_deactivate - deactivate attached iommu devices
+ ++++++++ * @domain: iommu domain attached to the target iommu device
+ ++++++++ *
+ ++++++++ * This API allows the client devices of IOMMU devices to suspend
+ ++++++++ * the IOMMUs they control at runtime, after they are idled and
+ ++++++++ * suspended all activity. System Suspend will leverage the PM
+ ++++++++ * driver late callbacks.
+ ++++++++ **/
+ ++++++++int omap_iommu_domain_deactivate(struct iommu_domain *domain)
+ ++++++++{
+ ++++++++      struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ ++++++++      struct omap_iommu_device *iommu;
+ ++++++++      struct omap_iommu *oiommu;
+ ++++++++      int i;
+ ++++++++
+ ++++++++      if (!omap_domain->dev)
+ ++++++++              return 0;
+ ++++++++
+ ++++++++      iommu = omap_domain->iommus;
+ ++++++++      iommu += (omap_domain->num_iommus - 1);
+ ++++++++      for (i = 0; i < omap_domain->num_iommus; i++, iommu--) {
+ ++++++++              oiommu = iommu->iommu_dev;
+ ++++++++              pm_runtime_put_sync(oiommu->dev);
+ ++++++++      }
+ ++++++++
+ ++++++++      return 0;
+ ++++++++}
+ ++++++++EXPORT_SYMBOL_GPL(omap_iommu_domain_deactivate);
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_domain_activate - activate attached iommu devices
+ ++++++++ * @domain: iommu domain attached to the target iommu device
+ ++++++++ *
+ ++++++++ * This API allows the client devices of IOMMU devices to resume the
+ ++++++++ * IOMMUs they control at runtime, before they can resume operations.
+ ++++++++ * System Resume will leverage the PM driver late callbacks.
+ ++++++++ **/
+ ++++++++int omap_iommu_domain_activate(struct iommu_domain *domain)
+ ++++++++{
+ ++++++++      struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+ ++++++++      struct omap_iommu_device *iommu;
+ ++++++++      struct omap_iommu *oiommu;
+ ++++++++      int i;
+ ++++++++
+ ++++++++      if (!omap_domain->dev)
+ ++++++++              return 0;
+ ++++++++
+ ++++++++      iommu = omap_domain->iommus;
+ ++++++++      for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
+ ++++++++              oiommu = iommu->iommu_dev;
+ ++++++++              pm_runtime_get_sync(oiommu->dev);
+ ++++++++      }
+ ++++++++
+ ++++++++      return 0;
+ ++++++++}
+ ++++++++EXPORT_SYMBOL_GPL(omap_iommu_domain_activate);
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_runtime_suspend - disable an iommu device
+ ++++++++ * @dev:      iommu device
+ ++++++++ *
+ ++++++++ * This function performs all that is necessary to disable an
+ ++++++++ * IOMMU device, either during final detachment from a client
+ ++++++++ * device, or during system/runtime suspend of the device. This
+ ++++++++ * includes programming all the appropriate IOMMU registers, and
+ ++++++++ * managing the associated omap_hwmod's state and the device's
+ ++++++++ * reset line. This function also saves the context of any
+ ++++++++ * locked TLBs if suspending.
+ ++++++++ **/
+ ++++++++static __maybe_unused int omap_iommu_runtime_suspend(struct device *dev)
+ ++++++++{
+ ++++++++      struct platform_device *pdev = to_platform_device(dev);
+ ++++++++      struct iommu_platform_data *pdata = dev_get_platdata(dev);
+ ++++++++      struct omap_iommu *obj = to_iommu(dev);
+ ++++++++      int ret;
+ ++++++++
+ ++++++++      /* save the TLBs only during suspend, and not for power down */
+ ++++++++      if (obj->domain && obj->iopgd)
+ ++++++++              omap_iommu_save_tlb_entries(obj);
+ ++++++++
+ ++++++++      omap2_iommu_disable(obj);
+ ++++++++
+ ++++++++      if (pdata && pdata->device_idle)
+ ++++++++              pdata->device_idle(pdev);
+ ++++++++
+ ++++++++      if (pdata && pdata->assert_reset)
+ ++++++++              pdata->assert_reset(pdev, pdata->reset_name);
+ ++++++++
+ ++++++++      if (pdata && pdata->set_pwrdm_constraint) {
+ ++++++++              ret = pdata->set_pwrdm_constraint(pdev, false, &obj->pwrst);
+ ++++++++              if (ret) {
+ ++++++++                      dev_warn(obj->dev, "pwrdm_constraint failed to be reset, status = %d\n",
+ ++++++++                               ret);
+ ++++++++              }
+ ++++++++      }
+ ++++++++
+ ++++++++      return 0;
+ ++++++++}
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_runtime_resume - enable an iommu device
+ ++++++++ * @dev:      iommu device
+ ++++++++ *
+ ++++++++ * This function performs all that is necessary to enable an
+ ++++++++ * IOMMU device, either during initial attachment to a client
+ ++++++++ * device, or during system/runtime resume of the device. This
+ ++++++++ * includes programming all the appropriate IOMMU registers, and
+ ++++++++ * managing the associated omap_hwmod's state and the device's
+ ++++++++ * reset line. The function also restores any locked TLBs if
+ ++++++++ * resuming after a suspend.
+ ++++++++ **/
+ ++++++++static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
+ ++++++++{
+ ++++++++      struct platform_device *pdev = to_platform_device(dev);
+ ++++++++      struct iommu_platform_data *pdata = dev_get_platdata(dev);
+ ++++++++      struct omap_iommu *obj = to_iommu(dev);
+ ++++++++      int ret = 0;
+ ++++++++
+ ++++++++      if (pdata && pdata->set_pwrdm_constraint) {
+ ++++++++              ret = pdata->set_pwrdm_constraint(pdev, true, &obj->pwrst);
+ ++++++++              if (ret) {
+ ++++++++                      dev_warn(obj->dev, "pwrdm_constraint failed to be set, status = %d\n",
+ ++++++++                               ret);
+ ++++++++              }
+ ++++++++      }
+ ++++++++
+ ++++++++      if (pdata && pdata->deassert_reset) {
+ ++++++++              ret = pdata->deassert_reset(pdev, pdata->reset_name);
+ ++++++++              if (ret) {
+ ++++++++                      dev_err(dev, "deassert_reset failed: %d\n", ret);
+ ++++++++                      return ret;
+ ++++++++              }
+ ++++++++      }
+ ++++++++
+ ++++++++      if (pdata && pdata->device_enable)
+ ++++++++              pdata->device_enable(pdev);
+ ++++++++
+ ++++++++      /* restore the TLBs only during resume, and not for power up */
+ ++++++++      if (obj->domain)
+ ++++++++              omap_iommu_restore_tlb_entries(obj);
+ ++++++++
+ ++++++++      ret = omap2_iommu_enable(obj);
+ ++++++++
+ ++++++++      return ret;
+ ++++++++}
+ ++++++++
+ ++++++++/**
+ ++++++++ * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ ++++++++ * @dev:      iommu device
+ ++++++++ *
+ ++++++++ * This function performs the necessary checks to determine if the IOMMU
+ ++++++++ * device needs suspending or not. The function checks if the runtime_pm
+ ++++++++ * status of the device is suspended, and returns 1 in that case. This
+ ++++++++ * results in the PM core to skip invoking any of the Sleep PM callbacks
+ ++++++++ * (suspend, suspend_late, resume, resume_early etc).
+ ++++++++ */
+ ++++++++static int omap_iommu_prepare(struct device *dev)
+ ++++++++{
+ ++++++++      if (pm_runtime_status_suspended(dev))
+ ++++++++              return 1;
+ ++++++++      return 0;
+ ++++++++}
+ ++++++++
          static bool omap_iommu_can_register(struct platform_device *pdev)
          {
                struct device_node *np = pdev->dev.of_node;
@@@@@@@@@@@ -974,6 -1177,7 -974,6 -974,6 -974,6 -974,6 -974,6 -974,6 -974,6 -974,6 +1177,7 @@@@@@@@@@@ static int omap_iommu_probe(struct plat
                struct omap_iommu *obj;
                struct resource *res;
                struct device_node *of = pdev->dev.of_node;
+ ++++++++      struct orphan_dev *orphan_dev, *tmp;
          
                if (!of) {
                        pr_err("%s: only DT-based devices are supported\n", __func__);
                if (!obj)
                        return -ENOMEM;
          
+ ++++++++      /*
+ ++++++++       * self-manage the ordering dependencies between omap_device_enable/idle
+ ++++++++       * and omap_device_assert/deassert_hardreset API
+ ++++++++       */
+ ++++++++      if (pdev->dev.pm_domain) {
+ ++++++++              dev_dbg(&pdev->dev, "device pm_domain is being reset\n");
+ ++++++++              pdev->dev.pm_domain = NULL;
+ ++++++++      }
+ ++++++++
                obj->name = dev_name(&pdev->dev);
                obj->nr_tlb_entries = 32;
                err = of_property_read_u32(of, "ti,#tlb-entries", &obj->nr_tlb_entries);
          
                obj->dev = &pdev->dev;
                obj->ctx = (void *)obj + sizeof(*obj);
+ ++++++++      obj->cr_ctx = devm_kzalloc(&pdev->dev,
+ ++++++++                                 sizeof(*obj->cr_ctx) * obj->nr_tlb_entries,
+ ++++++++                                 GFP_KERNEL);
+ ++++++++      if (!obj->cr_ctx)
+ ++++++++              return -ENOMEM;
          
                spin_lock_init(&obj->iommu_lock);
                spin_lock_init(&obj->page_table_lock);
                                goto out_sysfs;
                }
          
- --------      pm_runtime_irq_safe(obj->dev);
                pm_runtime_enable(obj->dev);
          
                omap_iommu_debugfs_add(obj);
          
                dev_info(&pdev->dev, "%s registered\n", obj->name);
          
+ ++++++++      list_for_each_entry_safe(orphan_dev, tmp, &orphan_dev_list, node) {
+ ++++++++              err = _omap_iommu_add_device(orphan_dev->dev);
+ ++++++++              if (!err) {
+ ++++++++                      list_del(&orphan_dev->node);
+ ++++++++                      kfree(orphan_dev);
+ ++++++++              }
+ ++++++++      }
+ ++++++++
                return 0;
          
          out_sysfs:
@@@@@@@@@@@ -1072,6 -1297,14 -1072,6 -1072,6 -1072,6 -1072,6 -1072,6 -1072,6 -1072,6 -1072,6 +1297,14 @@@@@@@@@@@ static int omap_iommu_remove(struct pla
                return 0;
          }
          
+ ++++++++static const struct dev_pm_ops omap_iommu_pm_ops = {
+ ++++++++      .prepare = omap_iommu_prepare,
+ ++++++++      SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ ++++++++                                   pm_runtime_force_resume)
+ ++++++++      SET_RUNTIME_PM_OPS(omap_iommu_runtime_suspend,
+ ++++++++                         omap_iommu_runtime_resume, NULL)
+ ++++++++};
+ ++++++++
          static const struct of_device_id omap_iommu_of_match[] = {
                { .compatible = "ti,omap2-iommu" },
                { .compatible = "ti,omap4-iommu" },
@@@@@@@@@@@ -1085,6 -1318,7 -1085,6 -1085,6 -1085,6 -1085,6 -1085,6 -1085,6 -1085,6 -1085,6 +1318,7 @@@@@@@@@@@ static struct platform_driver omap_iomm
                .remove = omap_iommu_remove,
                .driver = {
                        .name   = "omap-iommu",
+ ++++++++              .pm     = &omap_iommu_pm_ops,
                        .of_match_table = of_match_ptr(omap_iommu_of_match),
                },
          };
@@@@@@@@@@@ -1149,7 -1383,7 -1149,7 -1149,7 -1149,7 -1149,7 -1149,7 -1149,7 -1149,7 -1149,7 +1383,7 @@@@@@@@@@@ static int omap_iommu_map(struct iommu_
          }
          
          static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
---  ----                              size_t size)
+++  ++++                              size_t size, struct iommu_iotlb_gather *gather)
          {
                struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
                struct device *dev = omap_domain->dev;
@@@@@@@@@@@ -1423,7 -1657,7 -1423,7 -1423,7 -1423,7 -1423,7 -1423,7 -1423,7 -1423,7 -1423,7 +1657,7 @@@@@@@@@@@ static phys_addr_t omap_iommu_iova_to_p
                return ret;
          }
          
- --------static int omap_iommu_add_device(struct device *dev)
+ ++++++++static int _omap_iommu_add_device(struct device *dev)
          {
                struct omap_iommu_arch_data *arch_data, *tmp;
                struct omap_iommu *oiommu;
                struct platform_device *pdev;
                int num_iommus, i;
                int ret;
+ ++++++++      struct orphan_dev *orphan_dev;
+ ++++++++      unsigned long flags;
          
                /*
                 * Allocate the archdata iommu structure for DT-based devices.
                        }
          
                        pdev = of_find_device_by_node(np);
- --------              if (WARN_ON(!pdev)) {
+ ++++++++              if (!pdev) {
                                of_node_put(np);
                                kfree(arch_data);
- --------                      return -EINVAL;
+ ++++++++                      spin_lock_irqsave(&orphan_lock, flags);
+ ++++++++                      list_for_each_entry(orphan_dev, &orphan_dev_list,
+ ++++++++                                          node) {
+ ++++++++                              if (orphan_dev->dev == dev)
+ ++++++++                                      break;
+ ++++++++                      }
+ ++++++++                      spin_unlock_irqrestore(&orphan_lock, flags);
+ ++++++++
+ ++++++++                      if (orphan_dev && orphan_dev->dev == dev)
+ ++++++++                              return -EPROBE_DEFER;
+ ++++++++
+ ++++++++                      orphan_dev = kzalloc(sizeof(*orphan_dev), GFP_KERNEL);
+ ++++++++                      orphan_dev->dev = dev;
+ ++++++++                      spin_lock_irqsave(&orphan_lock, flags);
+ ++++++++                      list_add(&orphan_dev->node, &orphan_dev_list);
+ ++++++++                      spin_unlock_irqrestore(&orphan_lock, flags);
+ ++++++++                      return -EPROBE_DEFER;
                        }
          
                        oiommu = platform_get_drvdata(pdev);
                        }
          
                        tmp->iommu_dev = oiommu;
+ ++++++++              tmp->dev = &pdev->dev;
          
                        of_node_put(np);
                }
                return 0;
          }
          
+ ++++++++static int omap_iommu_add_device(struct device *dev)
+ ++++++++{
+ ++++++++      int ret;
+ ++++++++
+ ++++++++      ret = _omap_iommu_add_device(dev);
+ ++++++++      if (ret == -EPROBE_DEFER)
+ ++++++++              return 0;
+ ++++++++
+ ++++++++      return ret;
+ ++++++++}
+ ++++++++
          static void omap_iommu_remove_device(struct device *dev)
          {
                struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@@@@@@@@@@ -1554,7 -1818,7 -1554,7 -1554,7 -1554,7 -1554,7 -1554,7 -1554,7 -1554,7 -1554,7 +1818,7 @@@@@@@@@@@ static const struct iommu_ops omap_iomm
          static int __init omap_iommu_init(void)
          {
                struct kmem_cache *p;
- --------      const unsigned long flags = SLAB_HWCACHE_ALIGN;
+ ++++++++      const slab_flags_t flags = SLAB_HWCACHE_ALIGN;
                size_t align = 1 << 10; /* L2 pagetable alignement */
                struct device_node *np;
                int ret;
index 34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116,34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116,34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116,fd33cf5981d79d29d6fceeabb027ed0525313542,fd33cf5981d79d29d6fceeabb027ed0525313542,41cf9bd5a600c25177cc539c9756df73623cb0a4,34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116,34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116,34d0b9783b3ed6f57c3fcbe9ec46d16db1e92116,3608f58f1ea8ae14355a4498c43e218a4f0e512a..c31e7bc4ccbec2f7083ba30541e2a2b70df7431a
@@@@@@@@@@@ -7,6 -7,6 -7,6 -7,7 -7,7 -7,6 -7,6 -7,6 -7,6 -7,6 +7,7 @@@@@@@@@@@
           */
          
          #include <linux/atomic.h>
+++  +++++#include <linux/bitfield.h>
          #include <linux/clk.h>
          #include <linux/delay.h>
          #include <linux/dma-iommu.h>
          #include <linux/slab.h>
          #include <linux/spinlock.h>
          
---  -----#include "arm-smmu-regs.h"
+++  +++++#include "arm-smmu.h"
          
          #define SMMU_INTR_SEL_NS     0x2000
          
@@@@@@@@@@@ -155,7 -155,7 -155,7 -156,7 -156,7 -155,7 -155,7 -155,7 -155,7 -155,7 +156,7 @@@@@@@@@@@ static void qcom_iommu_tlb_inv_range_no
                        struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
                        size_t s = size;
          
---  -----              iova &= ~12UL;
+++  +++++              iova = (iova >> 12) << 12;
                        iova |= ctx->asid;
                        do {
                                iommu_writel(ctx, reg, iova);
                }
          }
          
---  ---- static const struct iommu_gather_ops qcom_gather_ops = {
+++  ++++ static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+++  ++++                                     size_t granule, void *cookie)
+++  ++++ {
+++  ++++       qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
+++  ++++       qcom_iommu_tlb_sync(cookie);
+++  ++++ }
+++  ++++ 
+++  ++++ static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+++  ++++                                     size_t granule, void *cookie)
+++  ++++ {
+++  ++++       qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
+++  ++++       qcom_iommu_tlb_sync(cookie);
+++  ++++ }
+++  ++++ 
+++  ++++ static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+++  ++++                                   unsigned long iova, size_t granule,
+++  ++++                                   void *cookie)
+++  ++++ {
+++  ++++       qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
+++  ++++ }
+++  ++++ 
+++  ++++ static const struct iommu_flush_ops qcom_flush_ops = {
                .tlb_flush_all  = qcom_iommu_tlb_inv_context,
---  ----       .tlb_add_flush  = qcom_iommu_tlb_inv_range_nosync,
---  ----       .tlb_sync       = qcom_iommu_tlb_sync,
+++  ++++       .tlb_flush_walk = qcom_iommu_tlb_flush_walk,
+++  ++++       .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf,
+++  ++++       .tlb_add_page   = qcom_iommu_tlb_add_page,
          };
          
          static irqreturn_t qcom_iommu_fault(int irq, void *dev)
@@@@@@@@@@@ -215,7 -215,7 -215,7 -238,7 -238,7 -215,7 -215,7 -215,7 -215,7 -237,7 +238,7 @@@@@@@@@@@ static int qcom_iommu_init_domain(struc
                        .pgsize_bitmap  = qcom_iommu_ops.pgsize_bitmap,
                        .ias            = 32,
                        .oas            = 40,
---  ----               .tlb            = &qcom_gather_ops,
+++  ++++               .tlb            = &qcom_flush_ops,
                        .iommu_dev      = qcom_iommu->dev,
                };
          
                        /* TTBRs */
                        iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
                                        pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0] |
---  -----                              ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+++  +++++                              FIELD_PREP(TTBRn_ASID, ctx->asid));
                        iommu_writeq(ctx, ARM_SMMU_CB_TTBR1,
                                        pgtbl_cfg.arm_lpae_s1_cfg.ttbr[1] |
---  -----                              ((u64)ctx->asid << TTBRn_ASID_SHIFT));
+++  +++++                              FIELD_PREP(TTBRn_ASID, ctx->asid));
          
---  -----              /* TTBCR */
---  -----              iommu_writel(ctx, ARM_SMMU_CB_TTBCR2,
+++  +++++              /* TCR */
+++  +++++              iommu_writel(ctx, ARM_SMMU_CB_TCR2,
                                        (pgtbl_cfg.arm_lpae_s1_cfg.tcr >> 32) |
---  -----                              TTBCR2_SEP_UPSTREAM);
---  -----              iommu_writel(ctx, ARM_SMMU_CB_TTBCR,
+++  +++++                              FIELD_PREP(TCR2_SEP, TCR2_SEP_UPSTREAM));
+++  +++++              iommu_writel(ctx, ARM_SMMU_CB_TCR,
                                        pgtbl_cfg.arm_lpae_s1_cfg.tcr);
          
                        /* MAIRs (stage-1 only) */
@@@@@@@@@@@ -417,7 -417,7 -417,7 -440,7 -440,7 -417,7 -417,7 -417,7 -417,7 -439,7 +440,7 @@@@@@@@@@@ static int qcom_iommu_map(struct iommu_
          }
          
          static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
---  ----                              size_t size)
+++  ++++                              size_t size, struct iommu_iotlb_gather *gather)
          {
                size_t ret;
                unsigned long flags;
                 */
                pm_runtime_get_sync(qcom_domain->iommu->dev);
                spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
---  ----       ret = ops->unmap(ops, iova, size);
+++  ++++       ret = ops->unmap(ops, iova, size, gather);
                spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
                pm_runtime_put_sync(qcom_domain->iommu->dev);
          
                return ret;
          }
          
---  ---- static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
+++  ++++ static void qcom_iommu_flush_iotlb_all(struct iommu_domain *domain)
          {
                struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
                struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
                pm_runtime_put_sync(qcom_domain->iommu->dev);
          }
          
+++  ++++ static void qcom_iommu_iotlb_sync(struct iommu_domain *domain,
+++  ++++                                 struct iommu_iotlb_gather *gather)
+++  ++++ {
+++  ++++       qcom_iommu_flush_iotlb_all(domain);
+++  ++++ }
+++  ++++ 
          static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
                                                   dma_addr_t iova)
          {
@@@@@@@@@@@ -581,7 -581,7 -581,7 -610,7 -610,7 -581,7 -581,7 -581,7 -581,7 -609,7 +610,7 @@@@@@@@@@@ static const struct iommu_ops qcom_iomm
                .detach_dev     = qcom_iommu_detach_dev,
                .map            = qcom_iommu_map,
                .unmap          = qcom_iommu_unmap,
---  ----       .flush_iotlb_all = qcom_iommu_iotlb_sync,
+++  ++++       .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
                .iotlb_sync     = qcom_iommu_iotlb_sync,
                .iova_to_phys   = qcom_iommu_iova_to_phys,
                .add_device     = qcom_iommu_add_device,
@@@@@@@@@@@ -696,10 -696,10 -696,10 -725,10 -725,10 -696,10 -696,10 -696,10 -696,10 -724,8 +725,8 @@@@@@@@@@@ static int qcom_iommu_ctx_probe(struct 
                        return PTR_ERR(ctx->base);
          
                irq = platform_get_irq(pdev, 0);
---------       if (irq < 0) {
---------               dev_err(dev, "failed to get irq\n");
+++++++++       if (irq < 0)
                        return -ENODEV;
---------       }
          
                /* clear IRQs before registering fault handler, just in case the
                 * boot-loader left us a surprise:
@@@@@@@@@@@ -775,7 -775,7 -775,7 -804,7 -804,7 -775,7 -775,7 -775,7 -775,7 -801,7 +802,7 @@@@@@@@@@@ static int qcom_iommu_device_probe(stru
                struct qcom_iommu_dev *qcom_iommu;
                struct device *dev = &pdev->dev;
                struct resource *res;
----- ----      int ret, sz, max_asid = 0;
+++++ ++++      int ret, max_asid = 0;
          
                /* find the max asid (which is 1:1 to ctx bank idx), so we know how
                 * many child ctx devices we have:
                for_each_child_of_node(dev->of_node, child)
                        max_asid = max(max_asid, get_asid(child));
          
----- ----      sz = sizeof(*qcom_iommu) + (max_asid * sizeof(qcom_iommu->ctxs[0]));
----- ----
----- ----      qcom_iommu = devm_kzalloc(dev, sz, GFP_KERNEL);
+++++ ++++      qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
+++++ ++++                                GFP_KERNEL);
                if (!qcom_iommu)
                        return -ENOMEM;
                qcom_iommu->num_ctxs = max_asid;
index 4fc6454f7ebb470a2ec20d0b2cab94b2df89ae38,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531,10e79a49af9d1cf47684fcd130558f5510cc3d6b,f2ae8a006ff8b279b9b86d68d49fcb8b62d70531..ed11ef594378d8e6ecec23071754ff8552ebe4f1
          #define dma_frcd_type(d) ((d >> 30) & 1)
          #define dma_frcd_fault_reason(c) (c & 0xff)
          #define dma_frcd_source_id(c) (c & 0xffff)
++++++++ +#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
++++++++ +#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
          /* low 64 bit */
          #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
          
          #define QI_PC_PASID_SEL               (QI_PC_TYPE | QI_PC_GRAN(1))
          
          #define QI_EIOTLB_ADDR(addr)  ((u64)(addr) & VTD_PAGE_MASK)
 ---------#define QI_EIOTLB_GL(gl)      (((u64)gl) << 7)
          #define QI_EIOTLB_IH(ih)      (((u64)ih) << 6)
          #define QI_EIOTLB_AM(am)      (((u64)am))
          #define QI_EIOTLB_PASID(pasid)        (((u64)pasid) << 32)
          #define QI_RESP_INVALID               0x1
          #define QI_RESP_FAILURE               0xf
          
 ---------#define QI_GRAN_ALL_ALL                       0
 ---------#define QI_GRAN_NONG_ALL              1
          #define QI_GRAN_NONG_PASID            2
          #define QI_GRAN_PSI_PASID             3
          
diff --combined kernel/dma/direct.c
index 706113c6bebc3d984d0e5abe4acf83519cbec13b,59bdceea3737a4a095555723f4a7b79fda15c048,59bdceea3737a4a095555723f4a7b79fda15c048,795c9b095d7573a79df1d4eeef00a10dca6a2ee4,706113c6bebc3d984d0e5abe4acf83519cbec13b,706113c6bebc3d984d0e5abe4acf83519cbec13b,706113c6bebc3d984d0e5abe4acf83519cbec13b,59bdceea3737a4a095555723f4a7b79fda15c048,a7f2a0163426ea2a28099de1d069d6cdbddd4bdd,59bdceea3737a4a095555723f4a7b79fda15c048..8402b29c280f560ae18420f9a99fc3f614c40293
@@@@@@@@@@@ -47,6 -47,9 -47,9 -47,6 -47,6 -47,6 -47,6 -47,9 -47,6 -47,9 +47,6 @@@@@@@@@@@ u64 dma_direct_get_required_mask(struc
          {
                u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
          
 --    - -      if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
 --    - -              max_dma = dev->bus_dma_mask;
 --    - -
                return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
          }
          
@@@@@@@@@@@ -85,8 -88,6 -88,6 -85,6 -85,8 -85,8 -85,8 -88,6 -85,6 -88,6 +85,8 @@@@@@@@@@@ static bool dma_coherent_ok(struct devi
          struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                        dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
          {
 +++   +++      size_t alloc_size = PAGE_ALIGN(size);
 +++   +++      int node = dev_to_node(dev);
                struct page *page = NULL;
                u64 phys_mask;
          
                gfp &= ~__GFP_ZERO;
                gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
                                &phys_mask);
 +++   +++      page = dma_alloc_contiguous(dev, alloc_size, gfp);
 +++   +++      if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
 +++   +++              dma_free_contiguous(dev, page, alloc_size);
 +++   +++              page = NULL;
 +++   +++      }
          again:
 ---   ---      page = dma_alloc_contiguous(dev, size, gfp);
 +++   +++      if (!page)
 +++   +++              page = alloc_pages_node(node, gfp, get_order(alloc_size));
                if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
                        dma_free_contiguous(dev, page, size);
                        page = NULL;
@@@@@@@@@@@ -135,12 -130,10 -130,10 -127,12 -135,12 -135,12 -135,12 -130,10 -127,12 -130,10 +135,12 @@@@@@@@@@@ void *dma_direct_alloc_pages(struct dev
                if (!page)
                        return NULL;
          
 --    - -      if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
 ++    + +      if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
 ++    + +          !force_dma_unencrypted(dev)) {
                        /* remove any dirty cache lines on the kernel alias */
                        if (!PageHighMem(page))
                                arch_dma_prep_coherent(page, size);
 ++    + +              *dma_handle = phys_to_dma(dev, page_to_phys(page));
                        /* return the page pointer as the opaque cookie */
                        return page;
                }
@@@@@@@@@@@ -185,8 -178,7 -178,7 -177,8 -185,8 -185,8 -185,8 -178,7 -177,8 -178,7 +185,8 @@@@@@@@@@@ void dma_direct_free_pages(struct devic
          {
                unsigned int page_order = get_order(size);
          
 --    - -      if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
 ++    + +      if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
 ++    + +          !force_dma_unencrypted(dev)) {
                        /* cpu_addr is a struct page cookie, not a kernel address */
                        __dma_direct_free_pages(dev, size, cpu_addr);
                        return;
@@@@@@@@@@@ -305,7 -297,7 -297,7 -297,7 -305,7 -305,7 -305,7 -297,7 -297,7 -297,7 +305,7 @@@@@@@@@@@ void dma_direct_unmap_page(struct devic
                        dma_direct_sync_single_for_cpu(dev, addr, size, dir);
          
                if (unlikely(is_swiotlb_buffer(phys)))
-------- -              swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
++++++++ +              swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
          }
          EXPORT_SYMBOL(dma_direct_unmap_page);
          
This page took 0.227411 seconds and 4 git commands to generate.