]> Git Repo - linux.git/commitdiff
Merge tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping
authorLinus Torvalds <[email protected]>
Thu, 15 Oct 2020 21:43:29 +0000 (14:43 -0700)
committerLinus Torvalds <[email protected]>
Thu, 15 Oct 2020 21:43:29 +0000 (14:43 -0700)
Pull dma-mapping updates from Christoph Hellwig:

 - rework the non-coherent DMA allocator

 - move private definitions out of <linux/dma-mapping.h>

 - lower CMA_ALIGNMENT (Paul Cercueil)

 - remove the omap1 dma address translation in favor of the common code

 - make dma-direct aware of multiple dma offset ranges (Jim Quinlan)

 - support per-node DMA CMA areas (Barry Song)

 - increase the default seg boundary limit (Nicolin Chen)

 - misc fixes (Robin Murphy, Thomas Tai, Xu Wang)

 - various cleanups

* tag 'dma-mapping-5.10' of git://git.infradead.org/users/hch/dma-mapping: (63 commits)
  ARM/ixp4xx: add a missing include of dma-map-ops.h
  dma-direct: simplify the DMA_ATTR_NO_KERNEL_MAPPING handling
  dma-direct: factor out a dma_direct_alloc_from_pool helper
  dma-direct check for highmem pages in dma_direct_alloc_pages
  dma-mapping: merge <linux/dma-noncoherent.h> into <linux/dma-map-ops.h>
  dma-mapping: move large parts of <linux/dma-direct.h> to kernel/dma
  dma-mapping: move dma-debug.h to kernel/dma/
  dma-mapping: remove <asm/dma-contiguous.h>
  dma-mapping: merge <linux/dma-contiguous.h> into <linux/dma-map-ops.h>
  dma-contiguous: remove dma_contiguous_set_default
  dma-contiguous: remove dev_set_cma_area
  dma-contiguous: remove dma_declare_contiguous
  dma-mapping: split <linux/dma-mapping.h>
  cma: decrease CMA_ALIGNMENT lower limit to 2
  firewire-ohci: use dma_alloc_pages
  dma-iommu: implement ->alloc_noncoherent
  dma-mapping: add new {alloc,free}_noncoherent dma_map_ops methods
  dma-mapping: add a new dma_alloc_pages API
  dma-mapping: remove dma_cache_sync
  53c700: convert to dma_alloc_noncoherent
  ...

38 files changed:
1  2 
Documentation/admin-guide/kernel-parameters.txt
MAINTAINERS
arch/arm/mm/init.c
arch/arm/xen/mm.c
arch/arm64/mm/init.c
arch/ia64/Kconfig
arch/ia64/mm/init.c
arch/microblaze/mm/init.c
arch/mips/Kconfig
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/kernel/setup.c
arch/parisc/Kconfig
arch/s390/kernel/setup.c
arch/x86/kernel/setup.c
arch/x86/pci/sta2x11-fixup.c
arch/xtensa/mm/init.c
drivers/acpi/arm64/iort.c
drivers/acpi/scan.c
drivers/base/core.c
drivers/base/dd.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/iommu/amd/iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/media/platform/exynos4-is/fimc-is.c
drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
drivers/usb/core/message.c
include/linux/device.h
include/linux/gfp.h
kernel/dma/contiguous.c
kernel/dma/swiotlb.c
mm/Kconfig
mm/hugetlb.c
mm/memory.c

index 0fa47ddf4c4662a24089fd9daba484d690b79237,7657e00e83ca382fb9a3fab2cab11ecf2db9a96a..f7ac0e663976e5b0a736a8ab40f3f60cca28ccba
                        loops can be debugged more effectively on production
                        systems.
  
 -      clearcpuid=BITNUM [X86]
 +      clearcpuid=BITNUM[,BITNUM...] [X86]
                        Disable CPUID feature X for the kernel. See
                        arch/x86/include/asm/cpufeatures.h for the valid bit
                        numbers. Note the Linux specific bits are not necessarily
                        some critical bits.
  
        cma=nn[MG]@[start[MG][-end[MG]]]
 -                      [ARM,X86,KNL]
 +                      [KNL,CMA]
                        Sets the size of kernel global memory area for
                        contiguous memory allocations and optionally the
                        placement constraint by the physical address range of
                        memory allocations. A value of 0 disables CMA
                        altogether. For more information, see
-                       include/linux/dma-contiguous.h
+                       kernel/dma/contiguous.c
+       cma_pernuma=nn[MG]
+                       [ARM64,KNL]
+                       Sets the size of kernel per-numa memory area for
+                       contiguous memory allocations. A value of 0 disables
+                       per-numa CMA altogether. And If this option is not
+                       specificed, the default value is 0.
+                       With per-numa CMA enabled, DMA users on node nid will
+                       first try to allocate buffer from the pernuma area
+                       which is located in node nid, if the allocation fails,
+                       they will fallback to the global default memory area.
  
        cmo_free_hint=  [PPC] Format: { yes | no }
                        Specify whether pages are marked as being inactive
                        Arch Perfmon v4 (Skylake and newer).
  
        disable_ddw     [PPC/PSERIES]
 -                      Disable Dynamic DMA Window support. Use this if
 +                      Disable Dynamic DMA Window support. Use this
                        to workaround buggy firmware.
  
        disable_ipv6=   [IPV6]
                        what data is available or for reverse-engineering.
  
        dyndbg[="val"]          [KNL,DYNAMIC_DEBUG]
 -      module.dyndbg[="val"]
 +      <module>.dyndbg[="val"]
                        Enable debug messages at boot time.  See
                        Documentation/admin-guide/dynamic-debug-howto.rst
                        for details.
        nopku           [X86] Disable Memory Protection Keys CPU feature found
                        in some Intel CPUs.
  
 -      module.async_probe [KNL]
 +      <module>.async_probe [KNL]
                        Enable asynchronous probe on this module.
  
        early_ioremap_debug [KNL]
                        1 - Bypass the IOMMU for DMA.
                        unset - Use value of CONFIG_IOMMU_DEFAULT_PASSTHROUGH.
  
 -      io7=            [HW] IO7 for Marvel based alpha systems
 +      io7=            [HW] IO7 for Marvel-based Alpha systems
                        See comment before marvel_specify_io7 in
                        arch/alpha/kernel/core_marvel.c.
  
        kgdbwait        [KGDB] Stop kernel execution and enter the
                        kernel debugger at the earliest opportunity.
  
 -      kmac=           [MIPS] korina ethernet MAC address.
 +      kmac=           [MIPS] Korina ethernet MAC address.
                        Configure the RouterBoard 532 series on-chip
                        Ethernet adapter MAC address.
  
                        [KVM,ARM] Allow use of GICv4 for direct injection of
                        LPIs.
  
 +      kvm_cma_resv_ratio=n [PPC]
 +                      Reserves given percentage from system memory area for
 +                      contiguous memory allocation for KVM hash pagetable
 +                      allocation.
 +                      By default it reserves 5% of total system memory.
 +                      Format: <integer>
 +                      Default: 5
 +
        kvm-intel.ept=  [KVM,Intel] Disable extended page tables
                        (virtualized MMU) support on capable Intel chips.
                        Default is 1 (enabled)
        lapic           [X86-32,APIC] Enable the local APIC even if BIOS
                        disabled it.
  
 -      lapic=          [X86,APIC] "notscdeadline" Do not use TSC deadline
 +      lapic=          [X86,APIC] Do not use TSC deadline
                        value for LAPIC timer one-shot implementation. Default
                        back to the programmable timer unit in the LAPIC.
 +                      Format: notscdeadline
  
        lapic_timer_c2_ok       [X86,APIC] trust the local apic timer
                        in C2 power state.
  
        memblock=debug  [KNL] Enable memblock debug messages.
  
 -      load_ramdisk=   [RAM] List of ramdisks to load from floppy
 -                      See Documentation/admin-guide/blockdev/ramdisk.rst.
 +      load_ramdisk=   [RAM] [Deprecated]
  
        lockd.nlm_grace_period=P  [NFS] Assign grace period.
                        Format: <integer>
                        (machvec) in a generic kernel.
                        Example: machvec=hpzx1
  
 -      machtype=       [Loongson] Share the same kernel image file between different
 -                       yeeloong laptop.
 +      machtype=       [Loongson] Share the same kernel image file between
 +                      different yeeloong laptops.
                        Example: machtype=lemote-yeeloong-2f-7inch
  
        max_addr=nn[KMG]        [KNL,BOOT,ia64] All physical memory greater
                        register save and restore. The kernel will only save
                        legacy floating-point registers on task switch.
  
 -      nohugeiomap     [KNL,X86,PPC] Disable kernel huge I/O mappings.
 +      nohugeiomap     [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings.
  
        nosmt           [KNL,S390] Disable symmetric multithreading (SMT).
                        Equivalent to smt=1.
                        Param: <number> - step/bucket size as a power of 2 for
                                statistical time based profiling.
  
 -      prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
 -                      before loading.
 -                      See Documentation/admin-guide/blockdev/ramdisk.rst.
 +      prompt_ramdisk= [RAM] [Deprecated]
  
        prot_virt=      [S390] enable hosting protected virtual machines
                        isolated from the hypervisor (if hardware supports
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
                        See Documentation/admin-guide/blockdev/ramdisk.rst.
  
 +      ramdisk_start=  [RAM] RAM disk image start address
 +
        random.trust_cpu={on,off}
                        [KNL] Enable or disable trusting the use of the
                        CPU's random number generator (if available) to
diff --combined MAINTAINERS
index 42fd3e3a252c28a3c9a95cee657e0723bd76a6eb,c0dbe6e9de6549271e004f5f015639a3d1ea5faa..0f39d1ca7770db0999116abbd7bf5522b1f54beb
@@@ -405,7 -405,7 +405,7 @@@ F: drivers/platform/x86/i2c-multi-insta
  ACPI PMIC DRIVERS
  M:    "Rafael J. Wysocki" <[email protected]>
  M:    Len Brown <[email protected]>
 -R:    Andy Shevchenko <and[email protected]>
 +R:    Andy Shevchenko <and[email protected]>
  R:    Mika Westerberg <[email protected]>
  L:    [email protected]
  S:    Supported
@@@ -802,13 -802,6 +802,13 @@@ S:       Maintaine
  F:    Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
  F:    drivers/irqchip/irq-al-fic.c
  
 +AMAZON ANNAPURNA LABS MEMORY CONTROLLER EDAC
 +M:    Talel Shenhar <[email protected]>
 +M:    Talel Shenhar <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/edac/amazon,al-mc-edac.yaml
 +F:    drivers/edac/al_mc_edac.c
 +
  AMAZON ANNAPURNA LABS THERMAL MMIO DRIVER
  M:    Talel Shenhar <[email protected]>
  S:    Maintained
@@@ -950,12 -943,37 +950,12 @@@ S:      Supporte
  F:    arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
  F:    drivers/net/ethernet/amd/xgbe/
  
 -ANALOG DEVICES INC AD5686 DRIVER
 -M:    Michael Hennerich <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    drivers/iio/dac/ad5686*
 -F:    drivers/iio/dac/ad5696*
 -
 -ANALOG DEVICES INC AD5758 DRIVER
 -M:    Michael Hennerich <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/dac/ad5758.txt
 -F:    drivers/iio/dac/ad5758.c
 -
 -ANALOG DEVICES INC AD7091R5 DRIVER
 -M:    Beniamin Bia <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml
 -F:    drivers/iio/adc/ad7091r5.c
 -
 -ANALOG DEVICES INC AD7124 DRIVER
 -M:    Michael Hennerich <[email protected]>
 +AMS AS73211 DRIVER
 +M:    Christian Eggers <[email protected]>
  L:    [email protected]
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
 -F:    drivers/iio/adc/ad7124.c
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/light/ams,as73211.yaml
 +F:    drivers/iio/light/as73211.c
  
  ANALOG DEVICES INC AD7192 DRIVER
  M:    Alexandru Tachici <[email protected]>
@@@ -973,6 -991,15 +973,6 @@@ W:        http://ez.analog.com/community/linux
  F:    Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
  F:    drivers/iio/adc/ad7292.c
  
 -ANALOG DEVICES INC AD7606 DRIVER
 -M:    Michael Hennerich <[email protected]>
 -M:    Beniamin Bia <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
 -F:    drivers/iio/adc/ad7606.c
 -
  ANALOG DEVICES INC AD7768-1 DRIVER
  M:    Michael Hennerich <[email protected]>
  L:    [email protected]
@@@ -1034,6 -1061,7 +1034,6 @@@ F:      drivers/iio/imu/adis16475.
  F:    Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml
  
  ANALOG DEVICES INC ADM1177 DRIVER
 -M:    Beniamin Bia <[email protected]>
  M:    Michael Hennerich <[email protected]>
  L:    [email protected]
  S:    Supported
  S:    Maintained
  F:    drivers/media/i2c/adv7842*
  
 +ANALOG DEVICES INC ADXRS290 DRIVER
 +M:    Nishant Malpani <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/iio/gyro/adxrs290.c
 +F:    Documentation/devicetree/bindings/iio/gyroscope/adi,adxrs290.yaml
 +
  ANALOG DEVICES INC ASOC CODEC DRIVERS
  M:    Lars-Peter Clausen <[email protected]>
  M:    Nuno Sá <[email protected]>
@@@ -1107,6 -1128,15 +1107,6 @@@ S:     Supporte
  W:    http://ez.analog.com/community/linux-device-drivers
  F:    drivers/dma/dma-axi-dmac.c
  
 -ANALOG DEVICES INC HMC425A DRIVER
 -M:    Beniamin Bia <[email protected]>
 -M:    Michael Hennerich <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -W:    http://ez.analog.com/community/linux-device-drivers
 -F:    Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml
 -F:    drivers/iio/amplifiers/hmc425a.c
 -
  ANALOG DEVICES INC IIO DRIVERS
  M:    Lars-Peter Clausen <[email protected]>
  M:    Michael Hennerich <[email protected]>
@@@ -1115,11 -1145,8 +1115,11 @@@ W:    http://wiki.analog.com
  W:    http://ez.analog.com/community/linux-device-drivers
  F:    Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
  F:    Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
 +F:    Documentation/devicetree/bindings/iio/*/adi,*
 +F:    Documentation/devicetree/bindings/iio/dac/ad5758.txt
  F:    drivers/iio/*/ad*
  F:    drivers/iio/adc/ltc249*
 +F:    drivers/iio/amplifiers/hmc425a.c
  F:    drivers/staging/iio/*/ad*
  X:    drivers/iio/*/adjd*
  
@@@ -1433,11 -1460,6 +1433,11 @@@ S:    Odd Fixe
  F:    drivers/amba/
  F:    include/linux/amba/bus.h
  
 +ARM PRIMECELL CLCD PL110 DRIVER
 +M:    Russell King <[email protected]>
 +S:    Odd Fixes
 +F:    drivers/video/fbdev/amba-clcd.*
 +
  ARM PRIMECELL KMI PL050 DRIVER
  M:    Russell King <[email protected]>
  S:    Odd Fixes
@@@ -1484,7 -1506,8 +1484,7 @@@ L:      [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/iommu/arm,smmu*
  F:    drivers/iommu/arm/
 -F:    drivers/iommu/io-pgtable-arm-v7s.c
 -F:    drivers/iommu/io-pgtable-arm.c
 +F:    drivers/iommu/io-pgtable-arm*
  
  ARM SUB-ARCHITECTURES
  L:    [email protected] (moderated for non-subscribers)
@@@ -1502,9 -1525,8 +1502,9 @@@ F:      Documentation/devicetree/bindings/ar
  F:    Documentation/devicetree/bindings/clock/actions,owl-cmu.txt
  F:    Documentation/devicetree/bindings/dma/owl-dma.txt
  F:    Documentation/devicetree/bindings/i2c/i2c-owl.txt
 +F:    Documentation/devicetree/bindings/interrupt-controller/actions,owl-sirq.yaml
  F:    Documentation/devicetree/bindings/mmc/owl-mmc.yaml
 -F:    Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt
 +F:    Documentation/devicetree/bindings/pinctrl/actions,*
  F:    Documentation/devicetree/bindings/power/actions,owl-sps.txt
  F:    Documentation/devicetree/bindings/timer/actions,owl-timer.txt
  F:    arch/arm/boot/dts/owl-*
@@@ -1514,7 -1536,6 +1514,7 @@@ F:      drivers/clk/actions
  F:    drivers/clocksource/timer-owl*
  F:    drivers/dma/owl-dma.c
  F:    drivers/i2c/busses/i2c-owl.c
 +F:    drivers/irqchip/irq-owl-sirq.c
  F:    drivers/mmc/host/owl-mmc.c
  F:    drivers/pinctrl/actions/*
  F:    drivers/soc/actions/
@@@ -1602,7 -1623,7 +1602,7 @@@ N:      meso
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
  M:    Tsahee Zidenberg <[email protected]>
 -M:    Antoine Tenart <a[email protected]>
 +M:    Antoine Tenart <a[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/boot/dts/alpine*
@@@ -1725,7 -1746,6 +1725,7 @@@ ARM/CORESIGHT FRAMEWORK AND DRIVER
  M:    Mathieu Poirier <[email protected]>
  R:    Suzuki K Poulose <[email protected]>
  R:    Mike Leach <[email protected]>
 +L:    [email protected] (moderated for non-subscribers)
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-coresight-devices-*
@@@ -2425,7 -2445,7 +2425,7 @@@ L:      [email protected] (m
  L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/s5p-cec.txt
 -F:    drivers/media/platform/s5p-cec/
 +F:    drivers/media/cec/platform/s5p/
  
  ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT
  M:    Andrzej Pietrasiewicz <[email protected]>
@@@ -2484,7 -2504,7 +2484,7 @@@ S:      Maintaine
  F:    drivers/clk/socfpga/
  
  ARM/SOCFPGA EDAC SUPPORT
 -M:    Thor Thayer <[email protected]>
 +M:    Dinh Nguyen <[email protected]>
  S:    Maintained
  F:    drivers/edac/altera_edac.
  
@@@ -2570,7 -2590,7 +2570,7 @@@ L:      [email protected]
  L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/tegra-cec.txt
 -F:    drivers/media/platform/tegra-cec/
 +F:    drivers/media/cec/platform/tegra/
  
  ARM/TETON BGA MACHINE SUPPORT
  M:    "Mark F. Brown" <[email protected]>
@@@ -3455,14 -3475,6 +3455,14 @@@ F:    drivers/bus/brcmstb_gisb.
  F:    drivers/pci/controller/pcie-brcmstb.c
  N:    brcmstb
  
 +BROADCOM BDC DRIVER
 +M:    Al Cooper <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/usb/brcm,bdc.txt
 +F:    drivers/usb/gadget/udc/bdc/
 +
  BROADCOM BMIPS CPUFREQ DRIVER
  M:    Markus Mayer <[email protected]>
  M:    [email protected]
@@@ -3495,17 -3507,13 +3495,17 @@@ F:   drivers/net/ethernet/broadcom/bnx2.
  F:    drivers/net/ethernet/broadcom/bnx2_*
  
  BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
 -M:    [email protected]
 +M:    Saurav Kashyap <[email protected]>
 +M:    Javed Hasan <[email protected]>
 +M:    [email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/scsi/bnx2fc/
  
  BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
 -M:    [email protected]
 +M:    Nilesh Javali <[email protected]>
 +M:    Manish Rangankar <[email protected]>
 +M:    [email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/scsi/bnx2i/
@@@ -3840,16 -3848,6 +3840,16 @@@ S:    Orpha
  F:    Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
  F:    drivers/mtd/nand/raw/cadence-nand-controller.c
  
 +CADENCE USB3 DRD IP DRIVER
 +M:    Peter Chen <[email protected]>
 +M:    Pawel Laszczak <[email protected]>
 +M:    Roger Quadros <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
 +F:    Documentation/devicetree/bindings/usb/cdns-usb3.txt
 +F:    drivers/usb/cdns3/
 +
  CADET FM/AM RADIO RECEIVER DRIVER
  M:    Hans Verkuil <[email protected]>
  L:    [email protected]
@@@ -4032,7 -4030,7 +4032,7 @@@ S:      Supporte
  W:    http://linuxtv.org
  T:    git git://linuxtv.org/media_tree.git
  F:    Documentation/devicetree/bindings/media/cec-gpio.txt
 -F:    drivers/media/platform/cec-gpio/
 +F:    drivers/media/cec/platform/cec-gpio/
  
  CELL BROADBAND ENGINE ARCHITECTURE
  M:    Arnd Bergmann <[email protected]>
@@@ -4102,11 -4100,6 +4102,11 @@@ T:    git git://git.kernel.org/pub/scm/lin
  F:    drivers/char/
  F:    drivers/misc/
  F:    include/linux/miscdevice.h
 +X:    drivers/char/agp/
 +X:    drivers/char/hw_random/
 +X:    drivers/char/ipmi/
 +X:    drivers/char/random.c
 +X:    drivers/char/tpm/
  
  CHECKPATCH
  M:    Andy Whitcroft <[email protected]>
@@@ -4177,7 -4170,6 +4177,7 @@@ CIRRUS LOGIC AUDIO CODEC DRIVER
  M:    James Schulman <[email protected]>
  M:    David Rhodes <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 +L:    [email protected]
  S:    Maintained
  F:    sound/soc/codecs/cs*
  
@@@ -5014,12 -5006,6 +5014,12 @@@ S:    Maintaine
  F:    drivers/base/devcoredump.c
  F:    include/linux/devcoredump.h
  
 +DEVICE DEPENDENCY HELPER SCRIPT
 +M:    Saravana Kannan <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    scripts/dev-needs.sh
 +
  DEVICE DIRECT ACCESS (DAX)
  M:    Dan Williams <[email protected]>
  M:    Vishal Verma <[email protected]>
@@@ -5216,7 -5202,7 +5216,7 @@@ T:      git git://git.infradead.org/users/hc
  F:    include/asm-generic/dma-mapping.h
  F:    include/linux/dma-direct.h
  F:    include/linux/dma-mapping.h
- F:    include/linux/dma-noncoherent.h
+ F:    include/linux/dma-map-ops.h
  F:    kernel/dma/
  
  DMA-BUF HEAPS FRAMEWORK
@@@ -5388,6 -5374,7 +5388,6 @@@ F:      include/linux/kobj
  F:    lib/kobj*
  
  DRIVERS FOR ADAPTIVE VOLTAGE SCALING (AVS)
 -M:    Kevin Hilman <[email protected]>
  M:    Nishanth Menon <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -5418,7 -5405,7 +5418,7 @@@ F:      drivers/gpu/drm/panel/panel-arm-vers
  
  DRM DRIVER FOR ASPEED BMC GFX
  M:    Joel Stanley <[email protected]>
 -L:    [email protected]
 +L:    [email protected] (moderated for non-subscribers)
  S:    Supported
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
@@@ -5426,10 -5413,7 +5426,10 @@@ F:    drivers/gpu/drm/aspeed
  
  DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
  M:    Dave Airlie <[email protected]>
 -S:    Odd Fixes
 +R:    Thomas Zimmermann <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    drivers/gpu/drm/ast/
  
  DRM DRIVER FOR BOCHS VIRTUAL GPU
@@@ -5503,24 -5487,14 +5503,24 @@@ S:   Maintaine
  F:    drivers/gpu/drm/panel/panel-lvds.c
  F:    Documentation/devicetree/bindings/display/panel/lvds.yaml
  
 +DRM DRIVER FOR MANTIX MLAF057WE51 PANELS
 +M:    Guido Günther <[email protected]>
 +R:    Purism Kernel Team <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/display/panel/mantix,mlaf057we51-x.yaml
 +F:    drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
 +
  DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/mga/
  F:    include/uapi/drm/mga_drm.h
  
 -DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
 +DRM DRIVER FOR MGA G200 GRAPHICS CHIPS
  M:    Dave Airlie <[email protected]>
 -S:    Odd Fixes
 +R:    Thomas Zimmermann <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    drivers/gpu/drm/mgag200/
  
  DRM DRIVER FOR MI0283QT
@@@ -5601,13 -5575,12 +5601,13 @@@ S:   Maintaine
  F:    Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
  F:    drivers/gpu/drm/panel/panel-raydium-rm67191.c
  
 -DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
 +DRM DRIVER FOR SITRONIX ST7703 PANELS
  M:    Guido Günther <[email protected]>
  R:    Purism Kernel Team <[email protected]>
 +R:    Ondrej Jirman <[email protected]>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
 -F:    drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
 +F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
 +F:    drivers/gpu/drm/panel/panel-sitronix-st7703.c
  
  DRM DRIVER FOR SAVAGE VIDEO CARDS
  S:    Orphan / Obsolete
@@@ -5666,15 -5639,13 +5666,15 @@@ F:   drivers/gpu/drm/panel/panel-tpo-tpg1
  DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
  M:    Dave Airlie <[email protected]>
  R:    Sean Paul <[email protected]>
 +R:    Thomas Zimmermann <[email protected]>
  L:    [email protected]
 -S:    Odd Fixes
 +S:    Supported
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    drivers/gpu/drm/udl/
  
  DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
  M:    Rodrigo Siqueira <[email protected]>
 +M:    Melissa Wen <[email protected]>
  R:    Haneen Mohammed <[email protected]>
  R:    Daniel Vetter <[email protected]>
  L:    [email protected]
@@@ -5809,7 -5780,7 +5809,7 @@@ F:      drivers/gpu/drm/gma500
  
  DRM DRIVERS FOR HISILICON
  M:    Xinliang Liu <[email protected]>
 -M:    Rongrong Zou <zourongrong@gmail.com>
 +M:    Tian Tao  <tiantao6@hisilicon.com>
  R:    John Stultz <[email protected]>
  R:    Xinwei Kong <[email protected]>
  R:    Chen Feng <[email protected]>
@@@ -5835,7 -5806,6 +5835,7 @@@ L:      [email protected]
  S:    Supported
  F:    Documentation/devicetree/bindings/display/mediatek/
  F:    drivers/gpu/drm/mediatek/
 +F:    drivers/phy/mediatek/phy-mtk-hdmi*
  
  DRM DRIVERS FOR NVIDIA TEGRA
  M:    Thierry Reding <[email protected]>
@@@ -6209,7 -6179,7 +6209,7 @@@ S:      Supporte
  F:    drivers/edac/bluefield_edac.c
  
  EDAC-CALXEDA
 -M:    Robert Richter <[email protected]>
 +M:    Andre Przywara <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/edac/highbank*
@@@ -6852,17 -6822,14 +6852,17 @@@ F:   drivers/net/ethernet/nvidia/
  
  FPGA DFL DRIVERS
  M:    Wu Hao <[email protected]>
 +R:    Tom Rix <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-dfl
  F:    Documentation/fpga/dfl.rst
  F:    drivers/fpga/dfl*
  F:    include/uapi/linux/fpga-dfl.h
  
  FPGA MANAGER FRAMEWORK
  M:    Moritz Fischer <[email protected]>
 +R:    Tom Rix <[email protected]>
  L:    [email protected]
  S:    Maintained
  W:    http://www.rocketboards.org
@@@ -7268,7 -7235,7 +7268,7 @@@ F:      drivers/staging/gasket
  GCC PLUGINS
  M:    Kees Cook <[email protected]>
  R:    Emese Revfy <[email protected]>
 -L:    [email protected]
 +L:    [email protected]
  S:    Maintained
  F:    Documentation/kbuild/gcc-plugins.rst
  F:    scripts/Makefile.gcc-plugins
@@@ -7768,8 -7735,8 +7768,8 @@@ F:      Documentation/watchdog/hpwdt.rs
  F:    drivers/watchdog/hpwdt.c
  
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 -M:    Don Brace <don.brace@microsemi.com>
 -L:    esc.storagedev@microsemi.com
 +M:    Don Brace <don.brace@microchip.com>
 +L:    storagedev@microchip.com
  L:    [email protected]
  S:    Supported
  F:    Documentation/scsi/hpsa.rst
@@@ -7912,13 -7879,6 +7912,13 @@@ W:    http://www.hisilicon.co
  F:    Documentation/devicetree/bindings/net/hisilicon*.txt
  F:    drivers/net/ethernet/hisilicon/
  
 +HIKEY960 ONBOARD USB GPIO HUB DRIVER
 +M:    John Stultz <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/misc/hisi_hikey_usb.c
 +F:    Documentation/devicetree/bindings/misc/hisilicon-hikey-usb.yaml
 +
  HISILICON PMU DRIVER
  M:    Shaokun Zhang <[email protected]>
  S:    Supported
@@@ -7962,12 -7922,6 +7962,12 @@@ F:    drivers/crypto/hisilicon/sec2/sec_cr
  F:    drivers/crypto/hisilicon/sec2/sec_crypto.h
  F:    drivers/crypto/hisilicon/sec2/sec_main.c
  
 +HISILICON STAGING DRIVERS FOR HIKEY 960/970
 +M:    Mauro Carvalho Chehab <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/staging/hikey9xx/
 +
  HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
  M:    Zaibo Xu <[email protected]>
  S:    Maintained
@@@ -8545,6 -8499,7 +8545,6 @@@ F:      drivers/iio/multiplexer/iio-mux.
  
  IIO SUBSYSTEM AND DRIVERS
  M:    Jonathan Cameron <[email protected]>
 -R:    Hartmut Knaack <[email protected]>
  R:    Lars-Peter Clausen <[email protected]>
  R:    Peter Meerwald-Stadler <[email protected]>
  L:    [email protected]
@@@ -8718,7 -8673,7 +8718,7 @@@ F:      drivers/input/input-mt.
  K:    \b(ABS|SYN)_MT_
  
  INSIDE SECURE CRYPTO DRIVER
 -M:    Antoine Tenart <a[email protected]>
 +M:    Antoine Tenart <a[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/crypto/inside-secure/
@@@ -8797,8 -8752,7 +8797,8 @@@ F:      include/drm/i915
  F:    include/uapi/drm/i915_drm.h
  
  INTEL ETHERNET DRIVERS
 -M:    Jeff Kirsher <[email protected]>
 +M:    Jesse Brandeburg <[email protected]>
 +M:    Tony Nguyen <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Supported
  W:    http://www.intel.com/support/feedback.htm
@@@ -8897,7 -8851,7 +8897,7 @@@ INTEL IPU3 CSI-2 CIO2 DRIVE
  M:    Yong Zhi <[email protected]>
  M:    Sakari Ailus <[email protected]>
  M:    Bingbu Cao <[email protected]>
 -R:    Tian Shu Qiu <[email protected]>
 +R:    Tianshu Qiu <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    Documentation/userspace-api/media/v4l/pixfmt-srggb10-ipu3.rst
@@@ -8906,7 -8860,7 +8906,7 @@@ F:      drivers/media/pci/intel/ipu3
  INTEL IPU3 CSI-2 IMGU DRIVER
  M:    Sakari Ailus <[email protected]>
  R:    Bingbu Cao <[email protected]>
 -R:    Tian Shu Qiu <[email protected]>
 +R:    Tianshu Qiu <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    Documentation/admin-guide/media/ipu3.rst
@@@ -8971,8 -8925,8 +8971,8 @@@ F:      arch/x86/include/asm/intel_punit_ipc
  F:    drivers/platform/x86/intel_punit_ipc.c
  
  INTEL PMC CORE DRIVER
 -M:    Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
 -M:    Vishwanath Somayaji <vishwanath.somayaji@intel.com>
 +M:    Rajneesh Bhardwaj <irenic.rajneesh@gmail.com>
 +M:    David E Box <david.e.box@intel.com>
  L:    [email protected]
  S:    Maintained
  F:    drivers/platform/x86/intel_pmc_core*
@@@ -8985,7 -8939,7 +8985,7 @@@ F:      drivers/gpio/gpio-*cove.
  F:    drivers/gpio/gpio-msic.c
  
  INTEL PMIC MULTIFUNCTION DEVICE DRIVERS
 -R:    Andy Shevchenko <[email protected]>
 +M:    Andy Shevchenko <[email protected]>
  S:    Maintained
  F:    drivers/mfd/intel_msic.c
  F:    drivers/mfd/intel_soc_pmic*
@@@ -9166,7 -9120,6 +9166,7 @@@ L:      [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
  F:    Documentation/devicetree/bindings/iommu/
 +F:    Documentation/userspace-api/iommu.rst
  F:    drivers/iommu/
  F:    include/linux/iommu.h
  F:    include/linux/iova.h
@@@ -9759,8 -9712,8 +9759,8 @@@ M:      Catalin Marinas <catalin.marinas@arm
  S:    Maintained
  F:    Documentation/dev-tools/kmemleak.rst
  F:    include/linux/kmemleak.h
 -F:    mm/kmemleak-test.c
  F:    mm/kmemleak.c
 +F:    samples/kmemleak/kmemleak-test.c
  
  KMOD KERNEL MODULE LOADER - USERMODE HELPER
  M:    Luis Chamberlain <[email protected]>
@@@ -9789,12 -9742,6 +9789,12 @@@ F:    Documentation/admin-guide/auxdisplay
  F:    drivers/auxdisplay/ks0108.c
  F:    include/linux/ks0108.h
  
 +KTD253 BACKLIGHT DRIVER
 +M:    Linus Walleij <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/leds/backlight/kinetic,ktd253.yaml
 +F:    drivers/video/backlight/ktd253-backlight.c
 +
  L3MDEV
  M:    David Ahern <[email protected]>
  L:    [email protected]
@@@ -9849,7 -9796,7 +9849,7 @@@ F:      drivers/scsi/53c700
  LEAKING_ADDRESSES
  M:    Tobin C. Harding <[email protected]>
  M:    Tycho Andersen <[email protected]>
 -L:    [email protected]
 +L:    [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git
  F:    scripts/leaking_addresses.pl
@@@ -9920,6 -9867,15 +9920,6 @@@ T:     git git://git.kernel.org/pub/scm/lin
  F:    drivers/ata/pata_arasan_cf.c
  F:    include/linux/pata_arasan_cf_data.h
  
 -LIBATA PATA DRIVERS
 -M:    Bartlomiej Zolnierkiewicz <[email protected]>
 -M:    Jens Axboe <[email protected]>
 -L:    [email protected]
 -S:    Maintained
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 -F:    drivers/ata/ata_generic.c
 -F:    drivers/ata/pata_*.c
 -
  LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
  M:    Linus Walleij <[email protected]>
  L:    [email protected]
@@@ -11089,7 -11045,7 +11089,7 @@@ F:   net/dsa/tag_mtk.
  
  MEDIATEK USB3 DRD IP DRIVER
  M:    Chunfeng Yun <[email protected]>
 -L:    [email protected] (moderated for non-subscribers)
 +L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
@@@ -11357,8 -11313,8 +11357,8 @@@ S:   Supporte
  W:    http://linux-meson.com/
  T:    git git://linuxtv.org/media_tree.git
  F:    Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
 -F:    drivers/media/platform/meson/ao-cec-g12a.c
 -F:    drivers/media/platform/meson/ao-cec.c
 +F:    drivers/media/cec/platform/meson/ao-cec-g12a.c
 +F:    drivers/media/cec/platform/meson/ao-cec.c
  
  MESON NAND CONTROLLER DRIVER FOR AMLOGIC SOCS
  M:    Liang Yang <[email protected]>
@@@ -11368,6 -11324,7 +11368,6 @@@ F:   Documentation/devicetree/bindings/mt
  F:    drivers/mtd/nand/raw/meson_*
  
  MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS
 -M:    Maxime Jourdan <[email protected]>
  M:    Neil Armstrong <[email protected]>
  L:    [email protected]
  L:    [email protected]
@@@ -11387,7 -11344,6 +11387,7 @@@ M:   Hemant Kumar <[email protected]
  L:    [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
 +F:    Documentation/ABI/stable/sysfs-bus-mhi
  F:    Documentation/mhi/
  F:    drivers/bus/mhi/
  F:    include/linux/mhi.h
@@@ -11586,8 -11542,8 +11586,8 @@@ F:   arch/mips/configs/generic/board-ocel
  F:    arch/mips/generic/board-ocelot.c
  
  MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
 -M:    Don Brace <don.brace@microsemi.com>
 -L:    esc.storagedev@microsemi.com
 +M:    Don Brace <don.brace@microchip.com>
 +L:    storagedev@microchip.com
  L:    [email protected]
  S:    Supported
  F:    Documentation/scsi/smartpqi.rst
@@@ -11819,13 -11775,6 +11819,13 @@@ Q: http://patchwork.linuxtv.org/project
  T:    git git://linuxtv.org/anttip/media_tree.git
  F:    drivers/media/usb/msi2500/
  
 +MSTAR INTERRUPT CONTROLLER DRIVER
 +M:    Mark-PK Tsai <[email protected]>
 +M:    Daniel Palmer <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/interrupt-controller/mstar,mst-intc.yaml
 +F:    drivers/irqchip/irq-mst-intc.c
 +
  MSYSTEMS DISKONCHIP G3 MTD DRIVER
  M:    Robert Jarzmik <[email protected]>
  L:    [email protected]
@@@ -12128,7 -12077,6 +12128,7 @@@ NETWORKING [DSA
  M:    Andrew Lunn <[email protected]>
  M:    Vivien Didelot <[email protected]>
  M:    Florian Fainelli <[email protected]>
 +M:    Vladimir Oltean <[email protected]>
  S:    Maintained
  F:    Documentation/devicetree/bindings/net/dsa/
  F:    drivers/net/dsa/
@@@ -12342,19 -12290,6 +12342,19 @@@ S: Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
  F:    arch/nios2/
  
 +NITRO ENCLAVES (NE)
 +M:    Andra Paraschiv <[email protected]>
 +M:    Alexandru Vasile <[email protected]>
 +M:    Alexandru Ciobotaru <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +W:    https://aws.amazon.com/ec2/nitro/nitro-enclaves/
 +F:    Documentation/virt/ne_overview.rst
 +F:    drivers/virt/nitro_enclaves/
 +F:    include/linux/nitro_enclaves.h
 +F:    include/uapi/linux/nitro_enclaves.h
 +F:    samples/nitro_enclaves/
 +
  NOHZ, DYNTICKS SUPPORT
  M:    Frederic Weisbecker <[email protected]>
  M:    Thomas Gleixner <[email protected]>
@@@ -12517,21 -12452,6 +12517,21 @@@ F: drivers/iio/gyro/fxas21002c_core.
  F:    drivers/iio/gyro/fxas21002c_i2c.c
  F:    drivers/iio/gyro/fxas21002c_spi.c
  
 +NXP i.MX 8MQ DCSS DRIVER
 +M:    Laurentiu Palcu <[email protected]>
 +R:    Lucas Stach <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
 +F:    drivers/gpu/drm/imx/dcss/
 +
 +NXP PTN5150A CC LOGIC AND EXTCON DRIVER
 +M:    Krzysztof Kozlowski <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
 +F:    drivers/extcon/extcon-ptn5150.c
 +
  NXP SGTL5000 DRIVER
  M:    Fabio Estevam <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
@@@ -12582,7 -12502,6 +12582,7 @@@ M:   Josh Poimboeuf <[email protected]
  M:    Peter Zijlstra <[email protected]>
  S:    Supported
  F:    tools/objtool/
 +F:    include/linux/objtool.h
  
  OCELOT ETHERNET SWITCH DRIVER
  M:    Microchip Linux Driver Support <[email protected]>
@@@ -12844,7 -12763,7 +12844,7 @@@ T:   git git://linuxtv.org/media_tree.gi
  F:    drivers/media/i2c/ov2685.c
  
  OMNIVISION OV2740 SENSOR DRIVER
 -M:    Tianshu Qiu <tian.shu.qiua@intel.com>
 +M:    Tianshu Qiu <[email protected]>
  R:    Shawn Tu <[email protected]>
  R:    Bingbu Cao <[email protected]>
  L:    [email protected]
@@@ -12860,12 -12779,10 +12860,12 @@@ T:        git git://linuxtv.org/media_tree.gi
  F:    drivers/media/i2c/ov5640.c
  
  OMNIVISION OV5647 SENSOR DRIVER
 -M:    Luis Oliveira <[email protected]>
 +M:    Dave Stevenson <[email protected]>
 +M:    Jacopo Mondi <[email protected]>
  L:    [email protected]
  S:    Maintained
  T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/ov5647.yaml
  F:    drivers/media/i2c/ov5647.c
  
  OMNIVISION OV5670 SENSOR DRIVER
@@@ -13128,9 -13045,7 +13128,9 @@@ F:   lib/packing.
  
  PADATA PARALLEL EXECUTION MECHANISM
  M:    Steffen Klassert <[email protected]>
 +M:    Daniel Jordan <[email protected]>
  L:    [email protected]
 +L:    [email protected]
  S:    Maintained
  F:    Documentation/core-api/padata.rst
  F:    include/linux/padata.h
@@@ -13267,7 -13182,6 +13267,7 @@@ F:   drivers/firmware/pcdp.
  
  PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
  M:    Thomas Petazzoni <[email protected]>
 +M:    Pali Rohár <[email protected]>
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
@@@ -13802,9 -13716,10 +13802,9 @@@ PIN CONTROLLER - RENESA
  M:    Geert Uytterhoeven <[email protected]>
  L:    [email protected]
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git sh-pfc
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-drivers.git renesas-pinctrl
  F:    Documentation/devicetree/bindings/pinctrl/renesas,*
 -F:    drivers/pinctrl/pinctrl-rz*
 -F:    drivers/pinctrl/sh-pfc/
 +F:    drivers/pinctrl/renesas/
  
  PIN CONTROLLER - SAMSUNG
  M:    Tomasz Figa <[email protected]>
@@@ -14037,7 -13952,6 +14037,7 @@@ PRINT
  M:    Petr Mladek <[email protected]>
  M:    Sergey Senozhatsky <[email protected]>
  R:    Steven Rostedt <[email protected]>
 +R:    John Ogness <[email protected]>
  S:    Maintained
  F:    include/linux/printk.h
  F:    kernel/printk/
@@@ -14275,17 -14189,13 +14275,17 @@@ S:        Supporte
  F:    drivers/infiniband/hw/qib/
  
  QLOGIC QL41xxx FCOE DRIVER
 -M:    [email protected]
 +M:    Saurav Kashyap <[email protected]>
 +M:    Javed Hasan <[email protected]>
 +M:    [email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/scsi/qedf/
  
  QLOGIC QL41xxx ISCSI DRIVER
 -M:    [email protected]
 +M:    Nilesh Javali <[email protected]>
 +M:    Manish Rangankar <[email protected]>
 +M:    [email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/scsi/qedi/
@@@ -14318,20 -14228,21 +14318,20 @@@ M:        Nilesh Javali <[email protected]
  M:    [email protected]
  L:    [email protected]
  S:    Supported
 -F:    Documentation/scsi/LICENSE.qla2xxx
  F:    drivers/scsi/qla2xxx/
  
  QLOGIC QLA3XXX NETWORK DRIVER
  M:    [email protected]
  L:    [email protected]
  S:    Supported
 -F:    Documentation/networking/device_drivers/ethernet/qlogic/LICENSE.qla3xxx
  F:    drivers/net/ethernet/qlogic/qla3xxx.*
  
  QLOGIC QLA4XXX iSCSI DRIVER
 -M:    [email protected]
 +M:    Nilesh Javali <[email protected]>
 +M:    Manish Rangankar <[email protected]>
 +M:    [email protected]
  L:    [email protected]
  S:    Supported
 -F:    Documentation/scsi/LICENSE.qla4xxx
  F:    drivers/scsi/qla4xxx/
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
@@@ -14679,9 -14590,9 +14679,9 @@@ M:   Niklas Söderlund <niklas.soderlund+
  L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
 -F:    drivers/media/i2c/rdacm20.c
  F:    drivers/media/i2c/max9271.c
  F:    drivers/media/i2c/max9271.h
 +F:    drivers/media/i2c/rdacm20.c
  
  RDC R-321X SoC
  M:    Florian Fainelli <[email protected]>
@@@ -14975,11 -14886,8 +14975,11 @@@ F: include/linux/hid-roccat
  
  ROCKCHIP ISP V1 DRIVER
  M:    Helen Koike <[email protected]>
 +M:    Dafna Hirschfeld <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    Documentation/admin-guide/media/rkisp1.rst
 +F:    Documentation/userspace-api/media/v4l/pixfmt-meta-rkisp1.rst
  F:    drivers/staging/media/rkisp1/
  
  ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER
@@@ -15475,7 -15383,6 +15475,7 @@@ R:   Dietmar Eggemann <dietmar.eggemann@a
  R:    Steven Rostedt <[email protected]> (SCHED_FIFO/SCHED_RR)
  R:    Ben Segall <[email protected]> (CONFIG_CFS_BANDWIDTH)
  R:    Mel Gorman <[email protected]> (CONFIG_NUMA_BALANCING)
 +R:    Daniel Bristot de Oliveira <[email protected]> (SCHED_DEADLINE)
  L:    [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
@@@ -15597,8 -15504,8 +15597,8 @@@ F:   drivers/mmc/host/sdricoh_cs.
  SECO BOARDS CEC DRIVER
  M:    Ettore Chimenti <[email protected]>
  S:    Maintained
 -F:    drivers/media/platform/seco-cec/seco-cec.c
 -F:    drivers/media/platform/seco-cec/seco-cec.h
 +F:    drivers/media/cec/platform/seco/seco-cec.c
 +F:    drivers/media/cec/platform/seco/seco-cec.h
  
  SECURE COMPUTING
  M:    Kees Cook <[email protected]>
@@@ -15691,7 -15598,6 +15691,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    Documentation/ABI/obsolete/sysfs-selinux-checkreqprot
  F:    Documentation/ABI/obsolete/sysfs-selinux-disable
  F:    Documentation/admin-guide/LSM/SELinux.rst
 +F:    include/trace/events/avc.h
  F:    include/uapi/linux/selinux_netlink.h
  F:    scripts/selinux/
  F:    security/selinux/
@@@ -16166,6 -16072,7 +16166,6 @@@ F:   include/uapi/rdma/rdma_user_rxe.
  SOFTLOGIC 6x10 MPEG CODEC
  M:    Bluecherry Maintainers <[email protected]>
  M:    Anton Sviridenko <[email protected]>
 -M:    Andrey Utkin <[email protected]>
  M:    Andrey Utkin <[email protected]>
  M:    Ismael Luceno <[email protected]>
  L:    [email protected]
@@@ -16585,6 -16492,7 +16585,6 @@@ F:   drivers/staging/rtl8712
  
  STAGING - SEPS525 LCD CONTROLLER DRIVERS
  M:    Michael Hennerich <[email protected]>
 -M:    Beniamin Bia <[email protected]>
  L:    [email protected]
  S:    Supported
  F:    Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml
@@@ -16632,7 -16540,7 +16632,7 @@@ STI CEC DRIVE
  M:    Benjamin Gaignard <[email protected]>
  S:    Maintained
  F:    Documentation/devicetree/bindings/media/stih-cec.txt
 -F:    drivers/media/platform/sti/cec/
 +F:    drivers/media/cec/platform/sti/
  
  STK1160 USB VIDEO CAPTURE DRIVER
  M:    Ezequiel Garcia <[email protected]>
@@@ -16816,13 -16724,6 +16816,13 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/gpio/snps,dw-apb-gpio.yaml
  F:    drivers/gpio/gpio-dwapb.c
  
 +SYNOPSYS DESIGNWARE APB SSI DRIVER
 +M:    Serge Semin <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.yaml
 +F:    drivers/spi/spi-dw*
 +
  SYNOPSYS DESIGNWARE AXI DMAC DRIVER
  M:    Eugeniy Paltsev <[email protected]>
  S:    Maintained
  S:    Maintained
  W:    https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
  Q:    https://patchwork.kernel.org/project/linux-integrity/list/
 -T:    git git://git.infradead.org/users/jjs/linux-tpmdd.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
  F:    drivers/char/tpm/
  
  TRACING
@@@ -17826,7 -17727,6 +17826,7 @@@ S:   Supporte
  W:    http://www.linux-mtd.infradead.org/doc/ubifs.html
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git next
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs.git fixes
 +F:    Documentation/filesystems/ubifs-authentication.rst
  F:    Documentation/filesystems/ubifs.rst
  F:    fs/ubifs/
  
@@@ -18220,6 -18120,14 +18220,6 @@@ T:  git git://linuxtv.org/media_tree.gi
  F:    drivers/media/usb/uvc/
  F:    include/uapi/linux/uvcvideo.h
  
 -USB VISION DRIVER
 -M:    Hans Verkuil <[email protected]>
 -L:    [email protected]
 -S:    Odd Fixes
 -W:    https://linuxtv.org
 -T:    git git://linuxtv.org/media_tree.git
 -F:    drivers/staging/media/usbvision/
 -
  USB WEBCAM GADGET
  M:    Laurent Pinchart <[email protected]>
  L:    [email protected]
@@@ -18373,8 -18281,7 +18373,8 @@@ F:   drivers/gpu/vga/vga_switcheroo.
  F:    include/linux/vga_switcheroo.h
  
  VIA RHINE NETWORK DRIVER
 -S:    Orphan
 +S:    Maintained
 +M:    Kevin Brace <[email protected]>
  F:    drivers/net/ethernet/via/via-rhine.c
  
  VIA SD/MMC CARD CONTROLLER DRIVER
@@@ -18419,8 -18326,10 +18419,8 @@@ S:  Maintaine
  F:    drivers/media/platform/video-mux.c
  
  VIDEOBUF2 FRAMEWORK
 -M:    Pawel Osciak <[email protected]>
 +M:    Tomasz Figa <[email protected]>
  M:    Marek Szyprowski <[email protected]>
 -M:    Kyungmin Park <[email protected]>
 -R:    Tomasz Figa <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/media/common/videobuf2/*
@@@ -18610,14 -18519,6 +18610,14 @@@ W: https://linuxtv.or
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/test-drivers/vivid/*
  
 +VIDTV VIRTUAL DIGITAL TV DRIVER
 +M:    Daniel W. S. Almeida <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +W:    https://linuxtv.org
 +T:    git git://linuxtv.org/media_tree.git
 +F:    drivers/media/test-drivers/vidtv/*
 +
  VLYNQ BUS
  M:    Florian Fainelli <[email protected]>
  L:    [email protected] (subscribers-only)
@@@ -18985,11 -18886,11 +18985,11 @@@ T:        git git://git.kernel.org/pub/scm/lin
  F:    arch/x86/mm/
  
  X86 PLATFORM DRIVERS
 -M:    Darren Hart <[email protected]>
 -M:    Andy Shevchenko <[email protected]>
 +M:    Hans de Goede <[email protected]>
 +M:    Mark Gross <[email protected]>
  L:    [email protected]
 -S:    Odd Fixes
 -T:    git git://git.infradead.org/linux-platform-drivers-x86.git
 +S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
  F:    drivers/platform/olpc/
  F:    drivers/platform/x86/
  
@@@ -19338,16 -19239,6 +19338,16 @@@ T: git git://git.kernel.org/pub/scm/lin
  F:    Documentation/filesystems/zonefs.rst
  F:    fs/zonefs/
  
 +ZR36067 VIDEO FOR LINUX DRIVER
 +M:    Corentin Labbe <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +W:    http://mjpeg.sourceforge.net/driver-zoran/
 +Q:    https://patchwork.linuxtv.org/project/linux-media/list/
 +F:    Documentation/driver-api/media/drivers/zoran.rst
 +F:    drivers/staging/media/zoran/
 +
  ZPOOL COMPRESSED PAGE STORAGE API
  M:    Dan Streetman <[email protected]>
  L:    [email protected]
diff --combined arch/arm/mm/init.c
index 45f9d5ec23604ed6c87a247e48593aed4c8d0f53,ab1d1931a3525efcff52a6b79e82d65f1a17d126..d57112a276f5ac79da2100970e8ca8c6a78af54a
@@@ -18,7 -18,7 +18,7 @@@
  #include <linux/highmem.h>
  #include <linux/gfp.h>
  #include <linux/memblock.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/sizes.h>
  #include <linux/stop_machine.h>
  #include <linux/swiotlb.h>
@@@ -299,14 -299,16 +299,14 @@@ free_memmap(unsigned long start_pfn, un
   */
  static void __init free_unused_memmap(void)
  {
 -      unsigned long start, prev_end = 0;
 -      struct memblock_region *reg;
 +      unsigned long start, end, prev_end = 0;
 +      int i;
  
        /*
         * This relies on each bank being in address order.
         * The banks are sorted previously in bootmem_init().
         */
 -      for_each_memblock(memory, reg) {
 -              start = memblock_region_memory_base_pfn(reg);
 -
 +      for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
  #ifdef CONFIG_SPARSEMEM
                /*
                 * Take care not to free memmap entries that don't exist
                 * memmap entries are valid from the bank end aligned to
                 * MAX_ORDER_NR_PAGES.
                 */
 -              prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
 -                               MAX_ORDER_NR_PAGES);
 +              prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
        }
  
  #ifdef CONFIG_SPARSEMEM
  #endif
  }
  
 -#ifdef CONFIG_HIGHMEM
 -static inline void free_area_high(unsigned long pfn, unsigned long end)
 -{
 -      for (; pfn < end; pfn++)
 -              free_highmem_page(pfn_to_page(pfn));
 -}
 -#endif
 -
  static void __init free_highpages(void)
  {
  #ifdef CONFIG_HIGHMEM
        unsigned long max_low = max_low_pfn;
 -      struct memblock_region *mem, *res;
 +      phys_addr_t range_start, range_end;
 +      u64 i;
  
        /* set highmem page free */
 -      for_each_memblock(memory, mem) {
 -              unsigned long start = memblock_region_memory_base_pfn(mem);
 -              unsigned long end = memblock_region_memory_end_pfn(mem);
 +      for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
 +                              &range_start, &range_end, NULL) {
 +              unsigned long start = PHYS_PFN(range_start);
 +              unsigned long end = PHYS_PFN(range_end);
  
                /* Ignore complete lowmem entries */
                if (end <= max_low)
                        continue;
  
 -              if (memblock_is_nomap(mem))
 -                      continue;
 -
                /* Truncate partial highmem entries */
                if (start < max_low)
                        start = max_low;
  
 -              /* Find and exclude any reserved regions */
 -              for_each_memblock(reserved, res) {
 -                      unsigned long res_start, res_end;
 -
 -                      res_start = memblock_region_reserved_base_pfn(res);
 -                      res_end = memblock_region_reserved_end_pfn(res);
 -
 -                      if (res_end < start)
 -                              continue;
 -                      if (res_start < start)
 -                              res_start = start;
 -                      if (res_start > end)
 -                              res_start = end;
 -                      if (res_end > end)
 -                              res_end = end;
 -                      if (res_start != start)
 -                              free_area_high(start, res_start);
 -                      start = res_end;
 -                      if (start == end)
 -                              break;
 -              }
 -
 -              /* And now free anything which remains */
 -              if (start < end)
 -                      free_area_high(start, end);
 +              for (; start < end; start++)
 +                      free_highmem_page(pfn_to_page(start));
        }
  #endif
  }
diff --combined arch/arm/xen/mm.c
index d3ef975a0965ad5964689ffe2d2c32569efad57f,5c80088db13b59aa5fd4232c45d34bf75f230f8d..467fa225c3d0ed5c9793107f583c2be74d7a88c9
@@@ -1,7 -1,7 +1,7 @@@
  // SPDX-License-Identifier: GPL-2.0-only
  #include <linux/cpu.h>
  #include <linux/dma-direct.h>
- #include <linux/dma-noncoherent.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/gfp.h>
  #include <linux/highmem.h>
  #include <linux/export.h>
  
  unsigned long xen_get_swiotlb_free_pages(unsigned int order)
  {
 -      struct memblock_region *reg;
 +      phys_addr_t base;
        gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
 +      u64 i;
  
 -      for_each_memblock(memory, reg) {
 -              if (reg->base < (phys_addr_t)0xffffffff) {
 +      for_each_mem_range(i, &base, NULL) {
 +              if (base < (phys_addr_t)0xffffffff) {
                        if (IS_ENABLED(CONFIG_ZONE_DMA32))
                                flags |= __GFP_DMA32;
                        else
diff --combined arch/arm64/mm/init.c
index f0bf86d8162272e89f40be3da49489c4a8ec176b,1b591ddb28b01b39ec30724f78a854486d98b5fd..a53c1e0fb0179e5c8151e0a4393ac2d736accd8d
@@@ -21,8 -21,7 +21,7 @@@
  #include <linux/of.h>
  #include <linux/of_fdt.h>
  #include <linux/dma-direct.h>
- #include <linux/dma-mapping.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/efi.h>
  #include <linux/swiotlb.h>
  #include <linux/vmalloc.h>
@@@ -429,6 -428,8 +428,8 @@@ void __init bootmem_init(void
        arm64_hugetlb_cma_reserve();
  #endif
  
+       dma_pernuma_cma_reserve();
        /*
         * sparse_init() tries to allocate memory from memblock, so must be
         * done after the fixed reservations
@@@ -471,10 -472,12 +472,10 @@@ static inline void free_memmap(unsigne
   */
  static void __init free_unused_memmap(void)
  {
 -      unsigned long start, prev_end = 0;
 -      struct memblock_region *reg;
 -
 -      for_each_memblock(memory, reg) {
 -              start = __phys_to_pfn(reg->base);
 +      unsigned long start, end, prev_end = 0;
 +      int i;
  
 +      for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
  #ifdef CONFIG_SPARSEMEM
                /*
                 * Take care not to free memmap entries that don't exist due
                 * memmap entries are valid from the bank end aligned to
                 * MAX_ORDER_NR_PAGES.
                 */
 -              prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
 -                               MAX_ORDER_NR_PAGES);
 +              prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
        }
  
  #ifdef CONFIG_SPARSEMEM
diff --combined arch/ia64/Kconfig
index 996c410f21521b8eb66d1aaa79cb2e1315be33d5,513ba0c5d33610ee3c46b9add7993cdd4530565b..f11a8ebfe5c226424e1c74d33e791865c45a2eca
@@@ -8,6 -8,7 +8,7 @@@ menu "Processor type and features
  
  config IA64
        bool
+       select ARCH_HAS_DMA_MARK_CLEAN
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select ACPI
@@@ -32,8 -33,6 +33,6 @@@
        select TTY
        select HAVE_ARCH_TRACEHOOK
        select HAVE_VIRT_CPU_ACCOUNTING
-       select DMA_NONCOHERENT_MMAP
-       select ARCH_HAS_SYNC_DMA_FOR_CPU
        select VIRT_TO_BUS
        select GENERIC_IRQ_PROBE
        select GENERIC_PENDING_IRQ if SMP
@@@ -56,7 -55,6 +55,7 @@@
        select NEED_DMA_MAP_STATE
        select NEED_SG_DMA_LENGTH
        select NUMA if !FLATMEM
 +      select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
@@@ -363,6 -361,15 +362,6 @@@ config ARCH_PROC_KCORE_TEX
  config IA64_MCA_RECOVERY
        tristate "MCA recovery from errors other than TLB."
  
 -config PERFMON
 -      bool "Performance monitor support"
 -      depends on BROKEN
 -      help
 -        Selects whether support for the IA-64 performance monitor hardware
 -        is included in the kernel.  This makes some kernel data-structures a
 -        little bigger and slows down execution a bit, but it is generally
 -        a good idea to turn this on.  If you're unsure, say Y.
 -
  config IA64_PALINFO
        tristate "/proc/pal support"
        help
diff --combined arch/ia64/mm/init.c
index 8e7b8c6c576eeea47aa2fe72accb50aa73f7ab67,ccba04d12671829a9312584eee26fe9b00ce4b58..d8686bf3ae2f685deb37fbdac22289ecbc6933cd
@@@ -8,7 -8,7 +8,7 @@@
  #include <linux/kernel.h>
  #include <linux/init.h>
  
- #include <linux/dma-noncoherent.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/dmar.h>
  #include <linux/efi.h>
  #include <linux/elf.h>
@@@ -73,8 -73,7 +73,7 @@@ __ia64_sync_icache_dcache (pte_t pte
   * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
   * flush them when they get mapped into an executable vm-area.
   */
- void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
-               enum dma_data_direction dir)
+ void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
  {
        unsigned long pfn = PHYS_PFN(paddr);
  
@@@ -538,7 -537,7 +537,7 @@@ virtual_memmap_init(u64 start, u64 end
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
                                 args->nid, args->zone, page_to_pfn(map_start),
 -                               MEMMAP_EARLY, NULL);
 +                               MEMINIT_EARLY, NULL);
        return 0;
  }
  
@@@ -547,8 -546,8 +546,8 @@@ memmap_init (unsigned long size, int ni
             unsigned long start_pfn)
  {
        if (!vmem_map) {
 -              memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
 -                              NULL);
 +              memmap_init_zone(size, nid, zone, start_pfn,
 +                               MEMINIT_EARLY, NULL);
        } else {
                struct page *start;
                struct memmap_init_callback_data args;
index 0902c459c385247cfe38341cc787ab25d01fab68,7659a8b86580fd43f23882ccfc990cebb53eb7af..45da639bd22ca6fd344ee249a9a3cf7aa4924c85
@@@ -7,7 -7,7 +7,7 @@@
   * for more details.
   */
  
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/memblock.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
@@@ -108,15 -108,15 +108,15 @@@ static void __init paging_init(void
  
  void __init setup_memory(void)
  {
 -      struct memblock_region *reg;
 -
  #ifndef CONFIG_MMU
        u32 kernel_align_start, kernel_align_size;
 +      phys_addr_t start, end;
 +      u64 i;
  
        /* Find main memory where is the kernel */
 -      for_each_memblock(memory, reg) {
 -              memory_start = (u32)reg->base;
 -              lowmem_size = reg->size;
 +      for_each_mem_range(i, &start, &end) {
 +              memory_start = start;
 +              lowmem_size = end - start;
                if ((memory_start <= (u32)_text) &&
                        ((u32)_text <= (memory_start + lowmem_size - 1))) {
                        memory_size = lowmem_size;
        pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
        pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
  
 -      /* Add active regions with valid PFNs */
 -      for_each_memblock(memory, reg) {
 -              unsigned long start_pfn, end_pfn;
 -
 -              start_pfn = memblock_region_memory_base_pfn(reg);
 -              end_pfn = memblock_region_memory_end_pfn(reg);
 -              memblock_set_node(start_pfn << PAGE_SHIFT,
 -                                (end_pfn - start_pfn) << PAGE_SHIFT,
 -                                &memblock.memory, 0);
 -      }
 -
        paging_init();
  }
  
diff --combined arch/mips/Kconfig
index 440614dc9de296372f10d0c9e7bc566dc2d8a6fb,92b0fde3d882e51e643a1bc04b8333b361b11e1d..c695d103bf6d481749136e997f239f1c0e8bd8fa
@@@ -86,7 -86,6 +86,7 @@@ config MIP
        select MODULES_USE_ELF_REL if MODULES
        select MODULES_USE_ELF_RELA if MODULES && 64BIT
        select PERF_USE_VMALLOC
 +      select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
        select RTC_LIB
        select SYSCTL_EXCEPTION_TRACE
        select VIRT_TO_BUS
@@@ -1136,7 -1135,6 +1136,6 @@@ config DMA_NONCOHEREN
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_HAS_DMA_SET_UNCACHED
        select DMA_NONCOHERENT_MMAP
-       select DMA_NONCOHERENT_CACHE_SYNC
        select NEED_DMA_MAP_STATE
  
  config SYS_HAS_EARLY_PRINTK
@@@ -3006,6 -3004,23 +3005,6 @@@ config PHYSICAL_STAR
          specified in the "crashkernel=YM@XM" command line boot parameter
          passed to the panic-ed kernel).
  
 -config SECCOMP
 -      bool "Enable seccomp to safely compute untrusted bytecode"
 -      depends on PROC_FS
 -      default y
 -      help
 -        This kernel feature is useful for number crunching applications
 -        that may need to compute untrusted bytecode during their
 -        execution. By using pipes or other transports made available to
 -        the process as file descriptors supporting the read/write
 -        syscalls, it's possible to isolate those applications in
 -        their own address space using seccomp. Once seccomp is
 -        enabled via /proc/<pid>/seccomp, it cannot be disabled
 -        and the task is only allowed to execute a few safe syscalls
 -        defined by each seccomp mode.
 -
 -        If unsure, say Y. Only embedded should say N here.
 -
  config MIPS_O32_FP64_SUPPORT
        bool "Support for O32 binaries using 64-bit FP" if !CPU_MIPSR6
        depends on 32BIT || MIPS32_O32
index ad1aecc4b4018232c8b489922901611d1909e999,232fa1017b1ec91fd0a4a9d2716fb1ae089e2181..df70308db0e697103bf172e920cd4e49f3280451
@@@ -168,7 -168,7 +168,7 @@@ void __init octeon_pci_dma_init(void
  }
  #endif /* CONFIG_PCI */
  
- dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
  {
  #ifdef CONFIG_PCI
        if (dev && dev_is_pci(dev))
        return paddr;
  }
  
- phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
  {
  #ifdef CONFIG_PCI
        if (dev && dev_is_pci(dev))
@@@ -190,25 -190,25 +190,25 @@@ char *octeon_swiotlb
  
  void __init plat_swiotlb_setup(void)
  {
 -      struct memblock_region *mem;
 +      phys_addr_t start, end;
        phys_addr_t max_addr;
        phys_addr_t addr_size;
        size_t swiotlbsize;
        unsigned long swiotlb_nslabs;
 +      u64 i;
  
        max_addr = 0;
        addr_size = 0;
  
 -      for_each_memblock(memory, mem) {
 +      for_each_mem_range(i, &start, &end) {
                /* These addresses map low for PCI. */
 -              if (mem->base > 0x410000000ull && !OCTEON_IS_OCTEON2())
 +              if (start > 0x410000000ull && !OCTEON_IS_OCTEON2())
                        continue;
  
 -              addr_size += mem->size;
 -
 -              if (max_addr < mem->base + mem->size)
 -                      max_addr = mem->base + mem->size;
 +              addr_size += (end - start);
  
 +              if (max_addr < end)
 +                      max_addr = end;
        }
  
        swiotlbsize = PAGE_SIZE;
diff --combined arch/mips/kernel/setup.c
index 335bd188b8b459a56864252d4e58f535dadc42c1,464bfd3957ae9682ce81acf828ad1078b9136d1e..ee8636ccded2950b87084500f7665e01a33f732d
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/kexec.h>
  #include <linux/sizes.h>
  #include <linux/device.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/decompress/generic.h>
  #include <linux/of_fdt.h>
  #include <linux/of_reserved_mem.h>
@@@ -300,9 -300,8 +300,9 @@@ static void __init bootmem_init(void
  
  static void __init bootmem_init(void)
  {
 -      struct memblock_region *mem;
        phys_addr_t ramstart, ramend;
 +      phys_addr_t start, end;
 +      u64 i;
  
        ramstart = memblock_start_of_DRAM();
        ramend = memblock_end_of_DRAM();
  
        min_low_pfn = ARCH_PFN_OFFSET;
        max_pfn = PFN_DOWN(ramend);
 -      for_each_memblock(memory, mem) {
 -              unsigned long start = memblock_region_memory_base_pfn(mem);
 -              unsigned long end = memblock_region_memory_end_pfn(mem);
 -
 +      for_each_mem_range(i, &start, &end) {
                /*
                 * Skip highmem here so we get an accurate max_low_pfn if low
                 * memory stops short of high memory.
                 * If the region overlaps HIGHMEM_START, end is clipped so
                 * max_pfn excludes the highmem portion.
                 */
 -              if (memblock_is_nomap(mem))
 -                      continue;
                if (start >= PFN_DOWN(HIGHMEM_START))
                        continue;
                if (end > PFN_DOWN(HIGHMEM_START))
@@@ -446,12 -450,13 +446,12 @@@ early_param("memmap", early_parse_memma
  unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
  static int __init early_parse_elfcorehdr(char *p)
  {
 -      struct memblock_region *mem;
 +      phys_addr_t start, end;
 +      u64 i;
  
        setup_elfcorehdr = memparse(p, &p);
  
 -       for_each_memblock(memory, mem) {
 -              unsigned long start = mem->base;
 -              unsigned long end = start + mem->size;
 +      for_each_mem_range(i, &start, &end) {
                if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
                        /*
                         * Reserve from the elf core header to the end of
@@@ -715,8 -720,7 +715,8 @@@ static void __init arch_mem_init(char *
  
  static void __init resource_init(void)
  {
 -      struct memblock_region *region;
 +      phys_addr_t start, end;
 +      u64 i;
  
        if (UNCAC_BASE != IO_BASE)
                return;
        bss_resource.start = __pa_symbol(&__bss_start);
        bss_resource.end = __pa_symbol(&__bss_stop) - 1;
  
 -      for_each_memblock(memory, region) {
 -              phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
 -              phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
 +      for_each_mem_range(i, &start, &end) {
                struct resource *res;
  
                res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
                              sizeof(struct resource));
  
                res->start = start;
 -              res->end = end;
 +              /*
 +               * In memblock, end points to the first byte after the
 +               * range while in resourses, end points to the last byte in
 +               * the range.
 +               */
 +              res->end = end - 1;
                res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
                res->name = "System RAM";
  
diff --combined arch/parisc/Kconfig
index cd4afe1e7a6c770dd1f0b13df74768d319017858,ed15da1da174e08152a9244b72254f81f009b9c2..d9a50f7a666c02df76063e03594ae07eb093ea80
@@@ -195,7 -195,6 +195,6 @@@ config PA1
        depends on PA7000 || PA7100LC || PA7200 || PA7300LC
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       select DMA_NONCOHERENT_CACHE_SYNC
  
  config PREFETCH
        def_bool y
@@@ -378,3 -377,19 +377,3 @@@ endmen
  
  
  source "drivers/parisc/Kconfig"
 -
 -config SECCOMP
 -      def_bool y
 -      prompt "Enable seccomp to safely compute untrusted bytecode"
 -      help
 -        This kernel feature is useful for number crunching applications
 -        that may need to compute untrusted bytecode during their
 -        execution. By using pipes or other transports made available to
 -        the process as file descriptors supporting the read/write
 -        syscalls, it's possible to isolate those applications in
 -        their own address space using seccomp. Once seccomp is
 -        enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
 -        and the task is only allowed to execute a few safe syscalls
 -        defined by each seccomp mode.
 -
 -        If unsure, say Y. Only embedded should say N here.
diff --combined arch/s390/kernel/setup.c
index d44e522c569baf7bb2ffb369906952203e7431ff,151092565a27041be558ccf882448be8aa4581db..c64a95ae830f81179d8ef93e65f57777c2a753ff
@@@ -37,7 -37,7 +37,7 @@@
  #include <linux/root_dev.h>
  #include <linux/console.h>
  #include <linux/kernel_stat.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/device.h>
  #include <linux/notifier.h>
  #include <linux/pfn.h>
@@@ -484,9 -484,8 +484,9 @@@ static struct resource __initdata *stan
  static void __init setup_resources(void)
  {
        struct resource *res, *std_res, *sub_res;
 -      struct memblock_region *reg;
 +      phys_addr_t start, end;
        int j;
 +      u64 i;
  
        code_resource.start = (unsigned long) _text;
        code_resource.end = (unsigned long) _etext - 1;
        bss_resource.start = (unsigned long) __bss_start;
        bss_resource.end = (unsigned long) __bss_stop - 1;
  
 -      for_each_memblock(memory, reg) {
 +      for_each_mem_range(i, &start, &end) {
                res = memblock_alloc(sizeof(*res), 8);
                if (!res)
                        panic("%s: Failed to allocate %zu bytes align=0x%x\n",
                res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
  
                res->name = "System RAM";
 -              res->start = reg->base;
 -              res->end = reg->base + reg->size - 1;
 +              res->start = start;
 +              /*
 +               * In memblock, end points to the first byte after the
 +               * range while in resourses, end points to the last byte in
 +               * the range.
 +               */
 +              res->end = end - 1;
                request_resource(&iomem_resource, res);
  
                for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
@@@ -782,8 -776,8 +782,8 @@@ static void __init memblock_add_mem_det
        unsigned long start, end;
        int i;
  
 -      memblock_dbg("physmem info source: %s (%hhd)\n",
 -                   get_mem_info_source(), mem_detect.info_source);
 +      pr_debug("physmem info source: %s (%hhd)\n",
 +               get_mem_info_source(), mem_detect.info_source);
        /* keep memblock lists close to the kernel */
        memblock_set_bottom_up(true);
        for_each_mem_detect_block(i, &start, &end) {
@@@ -825,15 -819,14 +825,15 @@@ static void __init reserve_kernel(void
  
  static void __init setup_memory(void)
  {
 -      struct memblock_region *reg;
 +      phys_addr_t start, end;
 +      u64 i;
  
        /*
         * Init storage key for present memory
         */
 -      for_each_memblock(memory, reg) {
 -              storage_key_init_range(reg->base, reg->base + reg->size);
 -      }
 +      for_each_mem_range(i, &start, &end)
 +              storage_key_init_range(start, end);
 +
        psw_set_key(PAGE_DEFAULT_KEY);
  
        /* Only cosmetics */
diff --combined arch/x86/kernel/setup.c
index b16caee53bea14bd1d4ee151a4716f1383bb5764,e8155e85bd8f3444cf4d48fefb828bb98bcd67ce..84f581c91db4575ae5fcb5d91f6fc47c1c8934b9
@@@ -7,6 -7,7 +7,7 @@@
   */
  #include <linux/console.h>
  #include <linux/crash_dump.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/dmi.h>
  #include <linux/efi.h>
  #include <linux/init_ohci1394_dma.h>
@@@ -19,7 -20,7 +20,8 @@@
  #include <linux/hugetlb.h>
  #include <linux/tboot.h>
  #include <linux/usb/xhci-dbgp.h>
 +#include <linux/static_call.h>
+ #include <linux/swiotlb.h>
  
  #include <uapi/linux/mount.h>
  
@@@ -264,12 -265,16 +266,12 @@@ static void __init relocate_initrd(void
        u64 area_size     = PAGE_ALIGN(ramdisk_size);
  
        /* We need to move the initrd down into directly mapped mem */
 -      relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
 -                                                 area_size, PAGE_SIZE);
 -
 +      relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
 +                                                    PFN_PHYS(max_pfn_mapped));
        if (!relocated_ramdisk)
                panic("Cannot find place for new RAMDISK of size %lld\n",
                      ramdisk_size);
  
 -      /* Note: this includes all the mem currently occupied by
 -         the initrd, we rely on that fact to keep the data intact. */
 -      memblock_reserve(relocated_ramdisk, area_size);
        initrd_start = relocated_ramdisk + PAGE_OFFSET;
        initrd_end   = initrd_start + ramdisk_size;
        printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
@@@ -296,13 -301,13 +298,13 @@@ static void __init early_reserve_initrd
  
        memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
  }
 +
  static void __init reserve_initrd(void)
  {
        /* Assume only end is not page aligned */
        u64 ramdisk_image = get_ramdisk_image();
        u64 ramdisk_size  = get_ramdisk_size();
        u64 ramdisk_end   = PAGE_ALIGN(ramdisk_image + ramdisk_size);
 -      u64 mapped_size;
  
        if (!boot_params.hdr.type_of_loader ||
            !ramdisk_image || !ramdisk_size)
  
        initrd_start = 0;
  
 -      mapped_size = memblock_mem_size(max_pfn_mapped);
 -      if (ramdisk_size >= (mapped_size>>1))
 -              panic("initrd too large to handle, "
 -                     "disabling initrd (%lld needed, %lld available)\n",
 -                     ramdisk_size, mapped_size>>1);
 -
        printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
                        ramdisk_end - 1);
  
@@@ -421,13 -432,13 +423,13 @@@ static int __init reserve_crashkernel_l
  {
  #ifdef CONFIG_X86_64
        unsigned long long base, low_base = 0, low_size = 0;
 -      unsigned long total_low_mem;
 +      unsigned long low_mem_limit;
        int ret;
  
 -      total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
 +      low_mem_limit = min(memblock_phys_mem_size(), CRASH_ADDR_LOW_MAX);
  
        /* crashkernel=Y,low */
 -      ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
 +      ret = parse_crashkernel_low(boot_command_line, low_mem_limit, &low_size, &base);
        if (ret) {
                /*
                 * two parts from kernel/dma/swiotlb.c:
                        return 0;
        }
  
 -      low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
 +      low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
        if (!low_base) {
                pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
                       (unsigned long)(low_size >> 20));
                return -ENOMEM;
        }
  
 -      ret = memblock_reserve(low_base, low_size);
 -      if (ret) {
 -              pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
 -              return ret;
 -      }
 -
 -      pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
 +      pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (low RAM limit: %ldMB)\n",
                (unsigned long)(low_size >> 20),
                (unsigned long)(low_base >> 20),
 -              (unsigned long)(total_low_mem >> 20));
 +              (unsigned long)(low_mem_limit >> 20));
  
        crashk_low_res.start = low_base;
        crashk_low_res.end   = low_base + low_size - 1;
@@@ -499,13 -516,13 +501,13 @@@ static void __init reserve_crashkernel(
                 * unless "crashkernel=size[KMG],high" is specified.
                 */
                if (!high)
 -                      crash_base = memblock_find_in_range(CRASH_ALIGN,
 -                                              CRASH_ADDR_LOW_MAX,
 -                                              crash_size, CRASH_ALIGN);
 +                      crash_base = memblock_phys_alloc_range(crash_size,
 +                                              CRASH_ALIGN, CRASH_ALIGN,
 +                                              CRASH_ADDR_LOW_MAX);
                if (!crash_base)
 -                      crash_base = memblock_find_in_range(CRASH_ALIGN,
 -                                              CRASH_ADDR_HIGH_MAX,
 -                                              crash_size, CRASH_ALIGN);
 +                      crash_base = memblock_phys_alloc_range(crash_size,
 +                                              CRASH_ALIGN, CRASH_ALIGN,
 +                                              CRASH_ADDR_HIGH_MAX);
                if (!crash_base) {
                        pr_info("crashkernel reservation failed - No suitable area found.\n");
                        return;
        } else {
                unsigned long long start;
  
 -              start = memblock_find_in_range(crash_base,
 -                                             crash_base + crash_size,
 -                                             crash_size, 1 << 20);
 +              start = memblock_phys_alloc_range(crash_size, SZ_1M, crash_base,
 +                                                crash_base + crash_size);
                if (start != crash_base) {
                        pr_info("crashkernel reservation failed - memory is in use.\n");
                        return;
                }
        }
 -      ret = memblock_reserve(crash_base, crash_size);
 -      if (ret) {
 -              pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
 -              return;
 -      }
  
        if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
                memblock_free(crash_base, crash_size);
@@@ -828,7 -851,6 +830,7 @@@ void __init setup_arch(char **cmdline_p
        early_cpu_init();
        arch_init_ideal_nops();
        jump_label_init();
 +      static_call_init();
        early_ioremap_init();
  
        setup_olpc_ofw_pgd();
        efi_fake_memmap();
        efi_find_mirror();
        efi_esrt_init();
 +      efi_mokvar_table_init();
  
        /*
         * The EFI specification says that boot service code won't be
        prefill_possible_map();
  
        init_cpu_to_node();
 +      init_gi_nodes();
  
        io_apic_init_mappings();
  
index 11c0e80b9ed4e103ec0fccf8f32aec613e7cc07d,324a207f99956be3f6f227d56d020361096f04ef..5701d5ba3df4ba2b29c525b5aba576e717d36912
@@@ -15,6 -15,7 +15,6 @@@
  #include <asm/iommu.h>
  
  #define STA2X11_SWIOTLB_SIZE (4*1024*1024)
 -extern int swiotlb_late_init_with_default_size(size_t default_size);
  
  /*
   * We build a list of bus numbers that are under the ConneXt. The
@@@ -132,7 -133,7 +132,7 @@@ static void sta2x11_map_ep(struct pci_d
        struct sta2x11_instance *instance = sta2x11_pdev_to_instance(pdev);
        struct device *dev = &pdev->dev;
        u32 amba_base, max_amba_addr;
-       int i;
+       int i, ret;
  
        if (!instance)
                return;
        pci_read_config_dword(pdev, AHB_BASE(0), &amba_base);
        max_amba_addr = amba_base + STA2X11_AMBA_SIZE - 1;
  
-       dev->dma_pfn_offset = PFN_DOWN(-amba_base);
+       ret = dma_direct_set_offset(dev, 0, amba_base, STA2X11_AMBA_SIZE);
+       if (ret)
+               dev_err(dev, "sta2x11: could not set DMA offset\n");
  
        dev->bus_dma_limit = max_amba_addr;
        pci_set_consistent_dma_mask(pdev, max_amba_addr);
diff --combined arch/xtensa/mm/init.c
index ad9d59d93f39a186ea46597a21dd760571ab6b96,8079007842ac8e43eae8e1dd1122aca8c03176b4..c6fc83efee0c94148a0da0608fec7d6e02ee140d
@@@ -26,7 -26,7 +26,7 @@@
  #include <linux/nodemask.h>
  #include <linux/mm.h>
  #include <linux/of_fdt.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  
  #include <asm/bootparam.h>
  #include <asm/page.h>
@@@ -79,32 -79,67 +79,32 @@@ void __init zones_init(void
        free_area_init(max_zone_pfn);
  }
  
 -#ifdef CONFIG_HIGHMEM
 -static void __init free_area_high(unsigned long pfn, unsigned long end)
 -{
 -      for (; pfn < end; pfn++)
 -              free_highmem_page(pfn_to_page(pfn));
 -}
 -
  static void __init free_highpages(void)
  {
 +#ifdef CONFIG_HIGHMEM
        unsigned long max_low = max_low_pfn;
 -      struct memblock_region *mem, *res;
 +      phys_addr_t range_start, range_end;
 +      u64 i;
  
 -      reset_all_zones_managed_pages();
        /* set highmem page free */
 -      for_each_memblock(memory, mem) {
 -              unsigned long start = memblock_region_memory_base_pfn(mem);
 -              unsigned long end = memblock_region_memory_end_pfn(mem);
 +      for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
 +                              &range_start, &range_end, NULL) {
 +              unsigned long start = PHYS_PFN(range_start);
 +              unsigned long end = PHYS_PFN(range_end);
  
                /* Ignore complete lowmem entries */
                if (end <= max_low)
                        continue;
  
 -              if (memblock_is_nomap(mem))
 -                      continue;
 -
                /* Truncate partial highmem entries */
                if (start < max_low)
                        start = max_low;
  
 -              /* Find and exclude any reserved regions */
 -              for_each_memblock(reserved, res) {
 -                      unsigned long res_start, res_end;
 -
 -                      res_start = memblock_region_reserved_base_pfn(res);
 -                      res_end = memblock_region_reserved_end_pfn(res);
 -
 -                      if (res_end < start)
 -                              continue;
 -                      if (res_start < start)
 -                              res_start = start;
 -                      if (res_start > end)
 -                              res_start = end;
 -                      if (res_end > end)
 -                              res_end = end;
 -                      if (res_start != start)
 -                              free_area_high(start, res_start);
 -                      start = res_end;
 -                      if (start == end)
 -                              break;
 -              }
 -
 -              /* And now free anything which remains */
 -              if (start < end)
 -                      free_area_high(start, end);
 +              for (; start < end; start++)
 +                      free_highmem_page(pfn_to_page(start));
        }
 -}
 -#else
 -static void __init free_highpages(void)
 -{
 -}
  #endif
 +}
  
  /*
   * Initialize memory pages.
index 6f89c16f45f3a19f51dd93bbd5fdd14216adc1e1,6446b2572f075f7015a6be7d252ce7721d512689..9929ff50c0c095f465d7a66272c8b2e35e9fd26a
@@@ -18,6 -18,7 +18,7 @@@
  #include <linux/pci.h>
  #include <linux/platform_device.h>
  #include <linux/slab.h>
+ #include <linux/dma-map-ops.h>
  
  #define IORT_TYPE_MASK(type)  (1 << (type))
  #define IORT_MSI_TYPE         (1 << ACPI_IORT_NODE_ITS_GROUP)
@@@ -811,7 -812,8 +812,7 @@@ static inline const struct iommu_ops *i
        return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
  }
  
 -static inline int iort_add_device_replay(const struct iommu_ops *ops,
 -                                       struct device *dev)
 +static inline int iort_add_device_replay(struct device *dev)
  {
        int err = 0;
  
@@@ -1071,7 -1073,7 +1072,7 @@@ const struct iommu_ops *iort_iommu_conf
         */
        if (!err) {
                ops = iort_fwspec_iommu_ops(dev);
 -              err = iort_add_device_replay(ops, dev);
 +              err = iort_add_device_replay(dev);
        }
  
        /* Ignore all other errors apart from EPROBE_DEFER */
  }
  
  #else
 -static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
 -{ return NULL; }
 -static inline int iort_add_device_replay(const struct iommu_ops *ops,
 -                                       struct device *dev)
 -{ return 0; }
  int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
  { return 0; }
  const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
@@@ -1178,8 -1185,9 +1179,9 @@@ void iort_dma_setup(struct device *dev
        *dma_addr = dmaaddr;
        *dma_size = size;
  
-       dev->dma_pfn_offset = PFN_DOWN(offset);
-       dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
+       ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
+       dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
  }
  
  static void __init acpi_iort_register_irq(int hwirq, const char *name,
@@@ -1329,7 -1337,7 +1331,7 @@@ static int  __init arm_smmu_v3_set_prox
  
        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
        if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
 -              int dev_node = acpi_map_pxm_to_node(smmu->pxm);
 +              int dev_node = pxm_to_node(smmu->pxm);
  
                if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
                        return -EINVAL;
diff --combined drivers/acpi/scan.c
index 684c726828e1cb2af9cff027c857e58c1c885084,e0b7d7a605b5a20d0364b9c5f236a051f9f68e91..a896e5e87c935d923c1cb9bc93b7bce363dfaff3
@@@ -13,7 -13,7 +13,7 @@@
  #include <linux/kthread.h>
  #include <linux/dmi.h>
  #include <linux/nls.h>
- #include <linux/dma-mapping.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/platform_data/x86/apple.h>
  #include <linux/pgtable.h>
  
@@@ -898,7 -898,8 +898,7 @@@ static void acpi_bus_get_wakeup_device_
         */
        err = acpi_device_sleep_wake(device, 0, 0, 0);
        if (err)
 -              ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 -                              "error in _DSW or _PSW evaluation\n"));
 +              pr_debug("error in _DSW or _PSW evaluation\n");
  }
  
  static void acpi_bus_init_power_state(struct acpi_device *device, int state)
diff --combined drivers/base/core.c
index 3b9404921da908359d04e304ec10c994ec1c6b63,d00ff3ec8f0fa06f97a8beabecc064f616de9c33..b919e6d01d9a01198ebc0ea8c9d5ae7545859200
@@@ -26,7 -26,6 +26,7 @@@
  #include <linux/pm_runtime.h>
  #include <linux/netdevice.h>
  #include <linux/sched/signal.h>
 +#include <linux/sched/mm.h>
  #include <linux/sysfs.h>
  
  #include "base.h"
@@@ -240,35 -239,27 +240,35 @@@ void device_pm_move_to_tail(struct devi
  #define to_devlink(dev)       container_of((dev), struct device_link, link_dev)
  
  static ssize_t status_show(struct device *dev,
 -                        struct device_attribute *attr, char *buf)
 +                         struct device_attribute *attr, char *buf)
  {
 -      char *status;
 +      const char *output;
  
        switch (to_devlink(dev)->status) {
        case DL_STATE_NONE:
 -              status = "not tracked"; break;
 +              output = "not tracked";
 +              break;
        case DL_STATE_DORMANT:
 -              status = "dormant"; break;
 +              output = "dormant";
 +              break;
        case DL_STATE_AVAILABLE:
 -              status = "available"; break;
 +              output = "available";
 +              break;
        case DL_STATE_CONSUMER_PROBE:
 -              status = "consumer probing"; break;
 +              output = "consumer probing";
 +              break;
        case DL_STATE_ACTIVE:
 -              status = "active"; break;
 +              output = "active";
 +              break;
        case DL_STATE_SUPPLIER_UNBIND:
 -              status = "supplier unbinding"; break;
 +              output = "supplier unbinding";
 +              break;
        default:
 -              status = "unknown"; break;
 +              output = "unknown";
 +              break;
        }
 -      return sprintf(buf, "%s\n", status);
 +
 +      return sysfs_emit(buf, "%s\n", output);
  }
  static DEVICE_ATTR_RO(status);
  
@@@ -276,16 -267,16 +276,16 @@@ static ssize_t auto_remove_on_show(stru
                                   struct device_attribute *attr, char *buf)
  {
        struct device_link *link = to_devlink(dev);
 -      char *str;
 +      const char *output;
  
        if (link->flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
 -              str = "supplier unbind";
 +              output = "supplier unbind";
        else if (link->flags & DL_FLAG_AUTOREMOVE_CONSUMER)
 -              str = "consumer unbind";
 +              output = "consumer unbind";
        else
 -              str = "never";
 +              output = "never";
  
 -      return sprintf(buf, "%s\n", str);
 +      return sysfs_emit(buf, "%s\n", output);
  }
  static DEVICE_ATTR_RO(auto_remove_on);
  
@@@ -294,7 -285,7 +294,7 @@@ static ssize_t runtime_pm_show(struct d
  {
        struct device_link *link = to_devlink(dev);
  
 -      return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
 +      return sysfs_emit(buf, "%d\n", !!(link->flags & DL_FLAG_PM_RUNTIME));
  }
  static DEVICE_ATTR_RO(runtime_pm);
  
@@@ -303,8 -294,7 +303,8 @@@ static ssize_t sync_state_only_show(str
  {
        struct device_link *link = to_devlink(dev);
  
 -      return sprintf(buf, "%d\n", !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
 +      return sysfs_emit(buf, "%d\n",
 +                        !!(link->flags & DL_FLAG_SYNC_STATE_ONLY));
  }
  static DEVICE_ATTR_RO(sync_state_only);
  
@@@ -1069,7 -1059,7 +1069,7 @@@ static ssize_t waiting_for_supplier_sho
              && dev->links.need_for_probe;
        mutex_unlock(&wfs_lock);
        device_unlock(dev);
 -      return sprintf(buf, "%u\n", val);
 +      return sysfs_emit(buf, "%u\n", val);
  }
  static DEVICE_ATTR_RO(waiting_for_supplier);
  
@@@ -1719,7 -1709,7 +1719,7 @@@ ssize_t device_show_ulong(struct devic
                          char *buf)
  {
        struct dev_ext_attribute *ea = to_ext_attr(attr);
 -      return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
 +      return sysfs_emit(buf, "%lx\n", *(unsigned long *)(ea->var));
  }
  EXPORT_SYMBOL_GPL(device_show_ulong);
  
@@@ -1749,7 -1739,7 +1749,7 @@@ ssize_t device_show_int(struct device *
  {
        struct dev_ext_attribute *ea = to_ext_attr(attr);
  
 -      return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
 +      return sysfs_emit(buf, "%d\n", *(int *)(ea->var));
  }
  EXPORT_SYMBOL_GPL(device_show_int);
  
@@@ -1770,7 -1760,7 +1770,7 @@@ ssize_t device_show_bool(struct device 
  {
        struct dev_ext_attribute *ea = to_ext_attr(attr);
  
 -      return snprintf(buf, PAGE_SIZE, "%d\n", *(bool *)(ea->var));
 +      return sysfs_emit(buf, "%d\n", *(bool *)(ea->var));
  }
  EXPORT_SYMBOL_GPL(device_show_bool);
  
@@@ -1798,6 -1788,8 +1798,8 @@@ static void device_release(struct kobje
         */
        devres_release_all(dev);
  
+       kfree(dev->dma_range_map);
        if (dev->release)
                dev->release(dev);
        else if (dev->type && dev->type->release)
@@@ -1942,7 -1934,7 +1944,7 @@@ static ssize_t uevent_show(struct devic
        struct kset *kset;
        struct kobj_uevent_env *env = NULL;
        int i;
 -      size_t count = 0;
 +      int len = 0;
        int retval;
  
        /* search the kset, the device belongs to */
  
        /* copy keys to file */
        for (i = 0; i < env->envp_idx; i++)
 -              count += sprintf(&buf[count], "%s\n", env->envp[i]);
 +              len += sysfs_emit_at(buf, len, "%s\n", env->envp[i]);
  out:
        kfree(env);
 -      return count;
 +      return len;
  }
  
  static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
@@@ -2002,7 -1994,7 +2004,7 @@@ static ssize_t online_show(struct devic
        device_lock(dev);
        val = !dev->offline;
        device_unlock(dev);
 -      return sprintf(buf, "%u\n", val);
 +      return sysfs_emit(buf, "%u\n", val);
  }
  
  static ssize_t online_store(struct device *dev, struct device_attribute *attr,
@@@ -3072,7 -3064,6 +3074,7 @@@ void device_del(struct device *dev
        struct device *parent = dev->parent;
        struct kobject *glue_dir = NULL;
        struct class_interface *class_intf;
 +      unsigned int noio_flag;
  
        device_lock(dev);
        kill_device(dev);
        /* Notify clients of device removal.  This call must come
         * before dpm_sysfs_remove().
         */
 +      noio_flag = memalloc_noio_save();
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DEL_DEVICE, dev);
        glue_dir = get_glue_dir(dev);
        kobject_del(&dev->kobj);
        cleanup_glue_dir(dev, glue_dir);
 +      memalloc_noio_restore(noio_flag);
        put_device(parent);
  }
  EXPORT_SYMBOL_GPL(device_del);
@@@ -3337,7 -3326,7 +3339,7 @@@ struct device *device_find_child_by_nam
  
        klist_iter_init(&parent->p->klist_children, &i);
        while ((child = next_device(&i)))
 -              if (!strcmp(dev_name(child), name) && get_device(child))
 +              if (sysfs_streq(dev_name(child), name) && get_device(child))
                        break;
        klist_iter_exit(&i);
        return child;
@@@ -4074,21 -4063,22 +4076,21 @@@ void device_shutdown(void
   */
  
  #ifdef CONFIG_PRINTK
 -static int
 -create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
 +static void
 +set_dev_info(const struct device *dev, struct dev_printk_info *dev_info)
  {
        const char *subsys;
 -      size_t pos = 0;
 +
 +      memset(dev_info, 0, sizeof(*dev_info));
  
        if (dev->class)
                subsys = dev->class->name;
        else if (dev->bus)
                subsys = dev->bus->name;
        else
 -              return 0;
 +              return;
  
 -      pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
 -      if (pos >= hdrlen)
 -              goto overflow;
 +      strscpy(dev_info->subsystem, subsys, sizeof(dev_info->subsystem));
  
        /*
         * Add device identifier DEVICE=:
                        c = 'b';
                else
                        c = 'c';
 -              pos++;
 -              pos += snprintf(hdr + pos, hdrlen - pos,
 -                              "DEVICE=%c%u:%u",
 -                              c, MAJOR(dev->devt), MINOR(dev->devt));
 +
 +              snprintf(dev_info->device, sizeof(dev_info->device),
 +                       "%c%u:%u", c, MAJOR(dev->devt), MINOR(dev->devt));
        } else if (strcmp(subsys, "net") == 0) {
                struct net_device *net = to_net_dev(dev);
  
 -              pos++;
 -              pos += snprintf(hdr + pos, hdrlen - pos,
 -                              "DEVICE=n%u", net->ifindex);
 +              snprintf(dev_info->device, sizeof(dev_info->device),
 +                       "n%u", net->ifindex);
        } else {
 -              pos++;
 -              pos += snprintf(hdr + pos, hdrlen - pos,
 -                              "DEVICE=+%s:%s", subsys, dev_name(dev));
 +              snprintf(dev_info->device, sizeof(dev_info->device),
 +                       "+%s:%s", subsys, dev_name(dev));
        }
 -
 -      if (pos >= hdrlen)
 -              goto overflow;
 -
 -      return pos;
 -
 -overflow:
 -      dev_WARN(dev, "device/subsystem name too long");
 -      return 0;
  }
  
  int dev_vprintk_emit(int level, const struct device *dev,
                     const char *fmt, va_list args)
  {
 -      char hdr[128];
 -      size_t hdrlen;
 +      struct dev_printk_info dev_info;
  
 -      hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
 +      set_dev_info(dev, &dev_info);
  
 -      return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
 +      return vprintk_emit(0, level, &dev_info, fmt, args);
  }
  EXPORT_SYMBOL(dev_vprintk_emit);
  
diff --combined drivers/base/dd.c
index b52d69eb4e71b30b2c3b0f3010af144fc14fb737,b3d43ace5c2b94ed7bb8c5fa63d76050871f8c15..b42229b74fd69688b1af2ea0de19008c8e169ba2
@@@ -19,7 -19,7 +19,7 @@@
  #include <linux/debugfs.h>
  #include <linux/device.h>
  #include <linux/delay.h>
- #include <linux/dma-mapping.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/init.h>
  #include <linux/module.h>
  #include <linux/kthread.h>
@@@ -486,8 -486,7 +486,8 @@@ static ssize_t state_synced_show(struc
        device_lock(dev);
        val = dev->state_synced;
        device_unlock(dev);
 -      return sprintf(buf, "%u\n", val);
 +
 +      return sysfs_emit(buf, "%u\n", val);
  }
  static DEVICE_ATTR_RO(state_synced);
  
@@@ -659,14 -658,15 +659,14 @@@ done
   */
  static int really_probe_debug(struct device *dev, struct device_driver *drv)
  {
 -      ktime_t calltime, delta, rettime;
 +      ktime_t calltime, rettime;
        int ret;
  
        calltime = ktime_get();
        ret = really_probe(dev, drv);
        rettime = ktime_get();
 -      delta = ktime_sub(rettime, calltime);
        pr_debug("probe of %s returned %d after %lld usecs\n",
 -               dev_name(dev), ret, (s64) ktime_to_us(delta));
 +               dev_name(dev), ret, ktime_us_delta(rettime, calltime));
        return ret;
  }
  
index 5887f7f52f96e35123c6fce43d0dcb067b20341e,78b8f3403c3039ea2b8d471ce2e4ccb2400f5cb4..0644936afee26ac2e47dbf3aa3e97791e76098e9
@@@ -5,7 -5,7 +5,7 @@@
  // Author: Andrzej Hajda <[email protected]>
  
  #include <linux/dma-iommu.h>
- #include <linux/dma-mapping.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/iommu.h>
  #include <linux/platform_device.h>
  
  #define EXYNOS_DEV_ADDR_START 0x20000000
  #define EXYNOS_DEV_ADDR_SIZE  0x40000000
  
 -static inline int configure_dma_max_seg_size(struct device *dev)
 -{
 -      if (!dev->dma_parms)
 -              dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
 -      if (!dev->dma_parms)
 -              return -ENOMEM;
 -
 -      dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
 -      return 0;
 -}
 -
 -static inline void clear_dma_max_seg_size(struct device *dev)
 -{
 -      kfree(dev->dma_parms);
 -      dev->dma_parms = NULL;
 -}
 -
  /*
   * drm_iommu_attach_device- attach device to iommu mapping
   *
@@@ -52,7 -69,10 +52,7 @@@ static int drm_iommu_attach_device(stru
                return -EINVAL;
        }
  
 -      ret = configure_dma_max_seg_size(subdrv_dev);
 -      if (ret)
 -              return ret;
 -
 +      dma_set_max_seg_size(subdrv_dev, DMA_BIT_MASK(32));
        if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
                /*
                 * Keep the original DMA mapping of the sub-device and
@@@ -69,6 -89,9 +69,6 @@@
                ret = iommu_attach_device(priv->mapping, subdrv_dev);
        }
  
 -      if (ret)
 -              clear_dma_max_seg_size(subdrv_dev);
 -
        return ret;
  }
  
@@@ -91,6 -114,8 +91,6 @@@ static void drm_iommu_detach_device(str
                arm_iommu_attach_device(subdrv_dev, *dma_priv);
        } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
                iommu_detach_device(priv->mapping, subdrv_dev);
 -
 -      clear_dma_max_seg_size(subdrv_dev);
  }
  
  int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
index 1716a023bca0326681f46a0474fa84e22044b413,07073222b8f6912f2a6659d5c89ecbf6ec3a5919..7777f19c9d38ff879434140ff44ce44f45c448db
@@@ -42,8 -42,6 +42,6 @@@ static int exynos_drm_alloc_buf(struct 
        if (exynos_gem->flags & EXYNOS_BO_WC ||
                        !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
                attr |= DMA_ATTR_WRITE_COMBINE;
-       else
-               attr |= DMA_ATTR_NON_CONSISTENT;
  
        /* FBDev emulation requires kernel mapping */
        if (!kvmap)
@@@ -431,10 -429,27 +429,10 @@@ exynos_drm_gem_prime_import_sg_table(st
  {
        struct exynos_drm_gem *exynos_gem;
  
 -      if (sgt->nents < 1)
 +      /* check if the entries in the sg_table are contiguous */
 +      if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
 +              DRM_ERROR("buffer chunks must be mapped contiguously");
                return ERR_PTR(-EINVAL);
 -
 -      /*
 -       * Check if the provided buffer has been mapped as contiguous
 -       * into DMA address space.
 -       */
 -      if (sgt->nents > 1) {
 -              dma_addr_t next_addr = sg_dma_address(sgt->sgl);
 -              struct scatterlist *s;
 -              unsigned int i;
 -
 -              for_each_sg(sgt->sgl, s, sgt->nents, i) {
 -                      if (!sg_dma_len(s))
 -                              break;
 -                      if (sg_dma_address(s) != next_addr) {
 -                              DRM_ERROR("buffer chunks must be mapped contiguously");
 -                              return ERR_PTR(-EINVAL);
 -                      }
 -                      next_addr = sg_dma_address(s) + sg_dma_len(s);
 -              }
        }
  
        exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
index ec602113be78cd5647b02d93f1f95368c78db476,6787ab0d0e431dfe0f1743d3007119d5aa115664..04be4cfcccc1851f0349b0d1eebe9a30dde3d870
@@@ -4,6 -4,7 +4,7 @@@
   * Author: Rob Clark <[email protected]>
   */
  
+ #include <linux/dma-map-ops.h>
  #include <linux/spinlock.h>
  #include <linux/shmem_fs.h>
  #include <linux/dma-buf.h>
@@@ -52,14 -53,26 +53,14 @@@ static void sync_for_device(struct msm_
  {
        struct device *dev = msm_obj->base.dev->dev;
  
 -      if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
 -              dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      } else {
 -              dma_map_sg(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      }
 +      dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void sync_for_cpu(struct msm_gem_object *msm_obj)
  {
        struct device *dev = msm_obj->base.dev->dev;
  
 -      if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
 -              dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      } else {
 -              dma_unmap_sg(dev, msm_obj->sgt->sgl,
 -                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 -      }
 +      dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* allocate pages from VRAM carveout, used when no IOMMU: */
@@@ -114,7 -127,7 +115,7 @@@ static struct page **get_pages(struct d
  
                msm_obj->pages = p;
  
 -              msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 +              msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
                if (IS_ERR(msm_obj->sgt)) {
                        void *ptr = ERR_CAST(msm_obj->sgt);
  
@@@ -741,31 -754,31 +742,31 @@@ int msm_gem_sync_object(struct drm_gem_
        return 0;
  }
  
 -void msm_gem_move_to_active(struct drm_gem_object *obj,
 -              struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 +void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
  {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 +      WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 -      msm_obj->gpu = gpu;
 -      if (exclusive)
 -              dma_resv_add_excl_fence(obj->resv, fence);
 -      else
 -              dma_resv_add_shared_fence(obj->resv, fence);
 -      list_del_init(&msm_obj->mm_list);
 -      list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 +
 +      if (!atomic_fetch_inc(&msm_obj->active_count)) {
 +              msm_obj->gpu = gpu;
 +              list_del_init(&msm_obj->mm_list);
 +              list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 +      }
  }
  
 -void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 +void msm_gem_active_put(struct drm_gem_object *obj)
  {
 -      struct drm_device *dev = obj->dev;
 -      struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 +      struct msm_drm_private *priv = obj->dev->dev_private;
  
 -      WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 +      WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  
 -      msm_obj->gpu = NULL;
 -      list_del_init(&msm_obj->mm_list);
 -      list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 +      if (!atomic_dec_return(&msm_obj->active_count)) {
 +              msm_obj->gpu = NULL;
 +              list_del_init(&msm_obj->mm_list);
 +              list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 +      }
  }
  
  int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@@ -840,28 -853,11 +841,28 @@@ void msm_gem_describe(struct drm_gem_ob
  
                seq_puts(m, "      vmas:");
  
 -              list_for_each_entry(vma, &msm_obj->vmas, list)
 -                      seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
 -                              vma->aspace != NULL ? vma->aspace->name : NULL,
 -                              vma->iova, vma->mapped ? "mapped" : "unmapped",
 +              list_for_each_entry(vma, &msm_obj->vmas, list) {
 +                      const char *name, *comm;
 +                      if (vma->aspace) {
 +                              struct msm_gem_address_space *aspace = vma->aspace;
 +                              struct task_struct *task =
 +                                      get_pid_task(aspace->pid, PIDTYPE_PID);
 +                              if (task) {
 +                                      comm = kstrdup(task->comm, GFP_KERNEL);
 +                              } else {
 +                                      comm = NULL;
 +                              }
 +                              name = aspace->name;
 +                      } else {
 +                              name = comm = NULL;
 +                      }
 +                      seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
 +                              name, comm ? ":" : "", comm ? comm : "",
 +                              vma->aspace, vma->iova,
 +                              vma->mapped ? "mapped" : "unmapped",
                                vma->inuse);
 +                      kfree(comm);
 +              }
  
                seq_puts(m, "\n");
        }
index 2f26f85ef538439c867dca2a21e23883ff1b07ce,9cf6473032f7d02ec70eb31ffeddcfff627445a8..77497b45f9a28d25b54860de57bc3b0431286f60
@@@ -11,6 -11,7 +11,7 @@@
  #include <linux/module.h>
  #include <linux/of_device.h>
  #include <linux/of_graph.h>
+ #include <linux/dma-mapping.h>
  #include <linux/platform_device.h>
  #include <linux/reset.h>
  
@@@ -768,7 -769,7 +769,7 @@@ static const struct sunxi_engine_ops su
        .vblank_quirk                   = sun4i_backend_vblank_quirk,
  };
  
 -static struct regmap_config sun4i_backend_regmap_config = {
 +static const struct regmap_config sun4i_backend_regmap_config = {
        .reg_bits       = 32,
        .val_bits       = 32,
        .reg_stride     = 4,
@@@ -810,8 -811,13 +811,13 @@@ static int sun4i_backend_bind(struct de
                 * because of an old DT, we need to set the DMA offset by hand
                 * on our device since the RAM mapping is at 0 for the DMA bus,
                 * unlike the CPU.
+                *
+                * XXX(hch): this has no business in a driver and needs to move
+                * to the device tree.
                 */
-               drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+               ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
+               if (ret)
+                       return ret;
        }
  
        backend->engine.node = dev->of_node;
index 4b1b02c80f556d1a84fbed9bd8e53e7cd375d56a,5396eb8d730bca5f2e04fd6bf24a9059e892795b..b9cf59443843b560075769a0d746fda250a2ab9b
@@@ -18,7 -18,7 +18,7 @@@
  #include <linux/slab.h>
  #include <linux/debugfs.h>
  #include <linux/scatterlist.h>
- #include <linux/dma-mapping.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/dma-direct.h>
  #include <linux/dma-iommu.h>
  #include <linux/iommu-helper.h>
@@@ -28,7 -28,6 +28,6 @@@
  #include <linux/export.h>
  #include <linux/irq.h>
  #include <linux/msi.h>
- #include <linux/dma-contiguous.h>
  #include <linux/irqdomain.h>
  #include <linux/percpu.h>
  #include <linux/iova.h>
@@@ -486,67 -485,6 +485,67 @@@ static void dump_command(unsigned long 
                pr_err("CMD[%d]: %08x\n", i, cmd->data[i]);
  }
  
 +static void amd_iommu_report_rmp_hw_error(volatile u32 *event)
 +{
 +      struct iommu_dev_data *dev_data = NULL;
 +      int devid, vmg_tag, flags;
 +      struct pci_dev *pdev;
 +      u64 spa;
 +
 +      devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 +      vmg_tag = (event[1]) & 0xFFFF;
 +      flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 +      spa     = ((u64)event[3] << 32) | (event[2] & 0xFFFFFFF8);
 +
 +      pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
 +                                         devid & 0xff);
 +      if (pdev)
 +              dev_data = dev_iommu_priv_get(&pdev->dev);
 +
 +      if (dev_data && __ratelimit(&dev_data->rs)) {
 +              pci_err(pdev, "Event logged [RMP_HW_ERROR vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
 +                      vmg_tag, spa, flags);
 +      } else {
 +              pr_err_ratelimited("Event logged [RMP_HW_ERROR device=%02x:%02x.%x, vmg_tag=0x%04x, spa=0x%llx, flags=0x%04x]\n",
 +                      PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 +                      vmg_tag, spa, flags);
 +      }
 +
 +      if (pdev)
 +              pci_dev_put(pdev);
 +}
 +
 +static void amd_iommu_report_rmp_fault(volatile u32 *event)
 +{
 +      struct iommu_dev_data *dev_data = NULL;
 +      int devid, flags_rmp, vmg_tag, flags;
 +      struct pci_dev *pdev;
 +      u64 gpa;
 +
 +      devid     = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
 +      flags_rmp = (event[0] >> EVENT_FLAGS_SHIFT) & 0xFF;
 +      vmg_tag   = (event[1]) & 0xFFFF;
 +      flags     = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
 +      gpa       = ((u64)event[3] << 32) | event[2];
 +
 +      pdev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(devid),
 +                                         devid & 0xff);
 +      if (pdev)
 +              dev_data = dev_iommu_priv_get(&pdev->dev);
 +
 +      if (dev_data && __ratelimit(&dev_data->rs)) {
 +              pci_err(pdev, "Event logged [RMP_PAGE_FAULT vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
 +                      vmg_tag, gpa, flags_rmp, flags);
 +      } else {
 +              pr_err_ratelimited("Event logged [RMP_PAGE_FAULT device=%02x:%02x.%x, vmg_tag=0x%04x, gpa=0x%llx, flags_rmp=0x%04x, flags=0x%04x]\n",
 +                      PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
 +                      vmg_tag, gpa, flags_rmp, flags);
 +      }
 +
 +      if (pdev)
 +              pci_dev_put(pdev);
 +}
 +
  static void amd_iommu_report_page_fault(u16 devid, u16 domain_id,
                                        u64 address, int flags)
  {
  static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
  {
        struct device *dev = iommu->iommu.dev;
 -      int type, devid, pasid, flags, tag;
 +      int type, devid, flags, tag;
        volatile u32 *event = __evt;
        int count = 0;
        u64 address;
 +      u32 pasid;
  
  retry:
        type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                break;
 +      case EVENT_TYPE_RMP_FAULT:
 +              amd_iommu_report_rmp_fault(event);
 +              break;
 +      case EVENT_TYPE_RMP_HW_ERR:
 +              amd_iommu_report_rmp_hw_error(event);
 +              break;
        case EVENT_TYPE_INV_PPR_REQ:
                pasid = PPR_PASID(*((u64 *)__evt));
                tag = event[1] & 0x03FF;
@@@ -797,21 -728,7 +796,21 @@@ static void iommu_poll_ga_log(struct am
                }
        }
  }
 -#endif /* CONFIG_IRQ_REMAP */
 +
 +static void
 +amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu)
 +{
 +      if (!irq_remapping_enabled || !dev_is_pci(dev) ||
 +          pci_dev_has_special_msi_domain(to_pci_dev(dev)))
 +              return;
 +
 +      dev_set_msi_domain(dev, iommu->msi_domain);
 +}
 +
 +#else /* CONFIG_IRQ_REMAP */
 +static inline void
 +amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
 +#endif /* !CONFIG_IRQ_REMAP */
  
  #define AMD_IOMMU_INT_MASK    \
        (MMIO_STATUS_EVT_INT_MASK | \
@@@ -874,11 -791,11 +873,11 @@@ irqreturn_t amd_iommu_int_handler(int i
   *
   ****************************************************************************/
  
 -static int wait_on_sem(volatile u64 *sem)
 +static int wait_on_sem(struct amd_iommu *iommu, u64 data)
  {
        int i = 0;
  
 -      while (*sem == 0 && i < LOOP_TIMEOUT) {
 +      while (*iommu->cmd_sem != data && i < LOOP_TIMEOUT) {
                udelay(1);
                i += 1;
        }
@@@ -909,16 -826,16 +908,16 @@@ static void copy_cmd_to_buffer(struct a
        writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
  }
  
 -static void build_completion_wait(struct iommu_cmd *cmd, u64 address)
 +static void build_completion_wait(struct iommu_cmd *cmd,
 +                                struct amd_iommu *iommu,
 +                                u64 data)
  {
 -      u64 paddr = iommu_virt_to_phys((void *)address);
 -
 -      WARN_ON(address & 0x7ULL);
 +      u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
  
        memset(cmd, 0, sizeof(*cmd));
        cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
        cmd->data[1] = upper_32_bits(paddr);
 -      cmd->data[2] = 1;
 +      cmd->data[2] = data;
        CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
  }
  
@@@ -991,7 -908,7 +990,7 @@@ static void build_inv_iotlb_pages(struc
                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
  }
  
 -static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
 +static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
                                  u64 address, bool size)
  {
        memset(cmd, 0, sizeof(*cmd));
        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
  }
  
 -static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
 +static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
                                  int qdep, u64 address, bool size)
  {
        memset(cmd, 0, sizeof(*cmd));
        CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
  }
  
 -static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
 +static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
                               int status, int tag, bool gn)
  {
        memset(cmd, 0, sizeof(*cmd));
@@@ -1127,21 -1044,22 +1126,21 @@@ static int iommu_completion_wait(struc
        struct iommu_cmd cmd;
        unsigned long flags;
        int ret;
 +      u64 data;
  
        if (!iommu->need_sync)
                return 0;
  
 -
 -      build_completion_wait(&cmd, (u64)&iommu->cmd_sem);
 -
        raw_spin_lock_irqsave(&iommu->lock, flags);
  
 -      iommu->cmd_sem = 0;
 +      data = ++iommu->cmd_sem_val;
 +      build_completion_wait(&cmd, iommu, data);
  
        ret = __iommu_queue_command_sync(iommu, &cmd, false);
        if (ret)
                goto out_unlock;
  
 -      ret = wait_on_sem(&iommu->cmd_sem);
 +      ret = wait_on_sem(iommu, data);
  
  out_unlock:
        raw_spin_unlock_irqrestore(&iommu->lock, flags);
@@@ -2238,7 -2156,6 +2237,7 @@@ static struct iommu_device *amd_iommu_p
                iommu_dev = ERR_PTR(ret);
                iommu_ignore_device(dev);
        } else {
 +              amd_iommu_set_pci_msi_domain(dev, iommu);
                iommu_dev = &iommu->iommu;
        }
  
@@@ -2868,7 -2785,7 +2867,7 @@@ out
  }
  EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
  
 -static int __flush_pasid(struct protection_domain *domain, int pasid,
 +static int __flush_pasid(struct protection_domain *domain, u32 pasid,
                         u64 address, bool size)
  {
        struct iommu_dev_data *dev_data;
        return ret;
  }
  
 -static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
 +static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
                                  u64 address)
  {
        return __flush_pasid(domain, pasid, address, false);
  }
  
 -int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
 +int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
                         u64 address)
  {
        struct protection_domain *domain = to_pdomain(dom);
  }
  EXPORT_SYMBOL(amd_iommu_flush_page);
  
 -static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
 +static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
  {
        return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
                             true);
  }
  
 -int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
 +int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
  {
        struct protection_domain *domain = to_pdomain(dom);
        unsigned long flags;
  }
  EXPORT_SYMBOL(amd_iommu_flush_tlb);
  
 -static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
 +static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
  {
        int index;
        u64 *pte;
        return pte;
  }
  
 -static int __set_gcr3(struct protection_domain *domain, int pasid,
 +static int __set_gcr3(struct protection_domain *domain, u32 pasid,
                      unsigned long cr3)
  {
        struct domain_pgtable pgtable;
        return __amd_iommu_flush_tlb(domain, pasid);
  }
  
 -static int __clear_gcr3(struct protection_domain *domain, int pasid)
 +static int __clear_gcr3(struct protection_domain *domain, u32 pasid)
  {
        struct domain_pgtable pgtable;
        u64 *pte;
        return __amd_iommu_flush_tlb(domain, pasid);
  }
  
 -int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
 +int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
                              unsigned long cr3)
  {
        struct protection_domain *domain = to_pdomain(dom);
  }
  EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
  
 -int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
 +int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
  {
        struct protection_domain *domain = to_pdomain(dom);
        unsigned long flags;
  }
  EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
  
 -int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
 +int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
                           int status, int tag)
  {
        struct iommu_dev_data *dev_data;
@@@ -3601,51 -3518,69 +3600,51 @@@ static void irte_ga_clear_allocated(str
  
  static int get_devid(struct irq_alloc_info *info)
  {
 -      int devid = -1;
 -
        switch (info->type) {
        case X86_IRQ_ALLOC_TYPE_IOAPIC:
 -              devid     = get_ioapic_devid(info->ioapic_id);
 -              break;
 +      case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
 +              return get_ioapic_devid(info->devid);
        case X86_IRQ_ALLOC_TYPE_HPET:
 -              devid     = get_hpet_devid(info->hpet_id);
 -              break;
 -      case X86_IRQ_ALLOC_TYPE_MSI:
 -      case X86_IRQ_ALLOC_TYPE_MSIX:
 -              devid = get_device_id(&info->msi_dev->dev);
 -              break;
 +      case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
 +              return get_hpet_devid(info->devid);
 +      case X86_IRQ_ALLOC_TYPE_PCI_MSI:
 +      case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
 +              return get_device_id(msi_desc_to_dev(info->desc));
        default:
 -              BUG_ON(1);
 -              break;
 +              WARN_ON_ONCE(1);
 +              return -1;
        }
 -
 -      return devid;
  }
  
 -static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
 +static struct irq_domain *get_irq_domain_for_devid(struct irq_alloc_info *info,
 +                                                 int devid)
  {
 -      struct amd_iommu *iommu;
 -      int devid;
 +      struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
  
 -      if (!info)
 +      if (!iommu)
                return NULL;
  
 -      devid = get_devid(info);
 -      if (devid >= 0) {
 -              iommu = amd_iommu_rlookup_table[devid];
 -              if (iommu)
 -                      return iommu->ir_domain;
 +      switch (info->type) {
 +      case X86_IRQ_ALLOC_TYPE_IOAPIC_GET_PARENT:
 +      case X86_IRQ_ALLOC_TYPE_HPET_GET_PARENT:
 +              return iommu->ir_domain;
 +      default:
 +              WARN_ON_ONCE(1);
 +              return NULL;
        }
 -
 -      return NULL;
  }
  
  static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
  {
 -      struct amd_iommu *iommu;
        int devid;
  
        if (!info)
                return NULL;
  
 -      switch (info->type) {
 -      case X86_IRQ_ALLOC_TYPE_MSI:
 -      case X86_IRQ_ALLOC_TYPE_MSIX:
 -              devid = get_device_id(&info->msi_dev->dev);
 -              if (devid < 0)
 -                      return NULL;
 -
 -              iommu = amd_iommu_rlookup_table[devid];
 -              if (iommu)
 -                      return iommu->msi_domain;
 -              break;
 -      default:
 -              break;
 -      }
 -
 -      return NULL;
 +      devid = get_devid(info);
 +      if (devid < 0)
 +              return NULL;
 +      return get_irq_domain_for_devid(info, devid);
  }
  
  struct irq_remap_ops amd_iommu_irq_ops = {
        .disable                = amd_iommu_disable,
        .reenable               = amd_iommu_reenable,
        .enable_faulting        = amd_iommu_enable_faulting,
 -      .get_ir_irq_domain      = get_ir_irq_domain,
        .get_irq_domain         = get_irq_domain,
  };
  
@@@ -3679,21 -3615,21 +3678,21 @@@ static void irq_remapping_prepare_irte(
        switch (info->type) {
        case X86_IRQ_ALLOC_TYPE_IOAPIC:
                /* Setup IOAPIC entry */
 -              entry = info->ioapic_entry;
 -              info->ioapic_entry = NULL;
 +              entry = info->ioapic.entry;
 +              info->ioapic.entry = NULL;
                memset(entry, 0, sizeof(*entry));
                entry->vector        = index;
                entry->mask          = 0;
 -              entry->trigger       = info->ioapic_trigger;
 -              entry->polarity      = info->ioapic_polarity;
 +              entry->trigger       = info->ioapic.trigger;
 +              entry->polarity      = info->ioapic.polarity;
                /* Mask level triggered irqs. */
 -              if (info->ioapic_trigger)
 +              if (info->ioapic.trigger)
                        entry->mask = 1;
                break;
  
        case X86_IRQ_ALLOC_TYPE_HPET:
 -      case X86_IRQ_ALLOC_TYPE_MSI:
 -      case X86_IRQ_ALLOC_TYPE_MSIX:
 +      case X86_IRQ_ALLOC_TYPE_PCI_MSI:
 +      case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
                msg->address_hi = MSI_ADDR_BASE_HI;
                msg->address_lo = MSI_ADDR_BASE_LO;
                msg->data = irte_info->index;
@@@ -3737,15 -3673,15 +3736,15 @@@ static int irq_remapping_alloc(struct i
  
        if (!info)
                return -EINVAL;
 -      if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
 -          info->type != X86_IRQ_ALLOC_TYPE_MSIX)
 +      if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_PCI_MSI &&
 +          info->type != X86_IRQ_ALLOC_TYPE_PCI_MSIX)
                return -EINVAL;
  
        /*
         * With IRQ remapping enabled, don't need contiguous CPU vectors
         * to support multiple MSI interrupts.
         */
 -      if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
 +      if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI)
                info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
  
        devid = get_devid(info);
                                        iommu->irte_ops->set_allocated(table, i);
                        }
                        WARN_ON(table->min_index != 32);
 -                      index = info->ioapic_pin;
 +                      index = info->ioapic.pin;
                } else {
                        index = -ENOMEM;
                }
 -      } else if (info->type == X86_IRQ_ALLOC_TYPE_MSI ||
 -                 info->type == X86_IRQ_ALLOC_TYPE_MSIX) {
 -              bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
 +      } else if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI ||
 +                 info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX) {
 +              bool align = (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI);
  
 -              index = alloc_irq_index(devid, nr_irqs, align, info->msi_dev);
 +              index = alloc_irq_index(devid, nr_irqs, align,
 +                                      msi_desc_to_pci_dev(info->desc));
        } else {
                index = alloc_irq_index(devid, nr_irqs, false, NULL);
        }
  
        for (i = 0; i < nr_irqs; i++) {
                irq_data = irq_domain_get_irq_data(domain, virq + i);
 -              cfg = irqd_cfg(irq_data);
 -              if (!irq_data || !cfg) {
 +              cfg = irq_data ? irqd_cfg(irq_data) : NULL;
 +              if (!cfg) {
                        ret = -EINVAL;
                        goto out_free_data;
                }
index cd6e3c70ebb31c69804512b75edc77bad15038a4,3a00fb64477b0c9d9ee01cf8dc66de596b880236..0cbcd3fc3e7e8d9c7c5633753eccff5718d3fade
@@@ -10,9 -10,8 +10,8 @@@
  
  #include <linux/acpi_iort.h>
  #include <linux/device.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/dma-iommu.h>
- #include <linux/dma-noncoherent.h>
  #include <linux/gfp.h>
  #include <linux/huge_mm.h>
  #include <linux/iommu.h>
@@@ -343,11 -342,8 +342,11 @@@ static int iommu_dma_init_domain(struc
  
        if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
                        DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
 -              cookie->fq_domain = domain;
 -              init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
 +              if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
 +                                      NULL))
 +                      pr_warn("iova flush queue initialization failed\n");
 +              else
 +                      cookie->fq_domain = domain;
        }
  
        if (!dev)
@@@ -474,7 -470,7 +473,7 @@@ static void __iommu_dma_unmap(struct de
        WARN_ON(unmapped != size);
  
        if (!cookie->fq_domain)
 -              iommu_tlb_sync(domain, &iotlb_gather);
 +              iommu_iotlb_sync(domain, &iotlb_gather);
        iommu_dma_free_iova(cookie, dma_addr, size);
  }
  
@@@ -527,9 -523,6 +526,9 @@@ static struct page **__iommu_dma_alloc_
        /* IOMMU can map any pages, so himem can also be used here */
        gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
  
 +      /* It makes no sense to muck about with huge pages */
 +      gfp &= ~__GFP_COMP;
 +
        while (count) {
                struct page *page = NULL;
                unsigned int order_size;
                        page = alloc_pages_node(nid, alloc_flags, order);
                        if (!page)
                                continue;
 -                      if (!order)
 -                              break;
 -                      if (!PageCompound(page)) {
 +                      if (order)
                                split_page(page, order);
 -                              break;
 -                      } else if (!split_huge_page(page)) {
 -                              break;
 -                      }
 -                      __free_pages(page, order);
 +                      break;
                }
                if (!page) {
                        __iommu_dma_free_pages(pages, i);
   * @size: Size of buffer in bytes
   * @dma_handle: Out argument for allocated DMA handle
   * @gfp: Allocation flags
+  * @prot: pgprot_t to use for the remapped mapping
   * @attrs: DMA attributes for this allocation
   *
   * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
   * Return: Mapped virtual address, or NULL on failure.
   */
  static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+               dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
+               unsigned long attrs)
  {
        struct iommu_domain *domain = iommu_get_dma_domain(dev);
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
        struct iova_domain *iovad = &cookie->iovad;
        bool coherent = dev_is_dma_coherent(dev);
        int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
-       pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
        unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
        struct page **pages;
        struct sg_table sgt;
@@@ -1030,8 -1030,10 +1030,10 @@@ static void *iommu_dma_alloc(struct dev
        gfp |= __GFP_ZERO;
  
        if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
-           !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
-               return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
+           !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
+               return iommu_dma_alloc_remap(dev, size, handle, gfp,
+                               dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
+       }
  
        if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            !gfpflags_allow_blocking(gfp) && !coherent)
        return cpu_addr;
  }
  
+ #ifdef CONFIG_DMA_REMAP
+ static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size,
+               dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp)
+ {
+       if (!gfpflags_allow_blocking(gfp)) {
+               struct page *page;
+               page = dma_common_alloc_pages(dev, size, handle, dir, gfp);
+               if (!page)
+                       return NULL;
+               return page_address(page);
+       }
+       return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO,
+                                    PAGE_KERNEL, 0);
+ }
+ static void iommu_dma_free_noncoherent(struct device *dev, size_t size,
+               void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir)
+ {
+       __iommu_dma_unmap(dev, handle, size);
+       __iommu_dma_free(dev, size, cpu_addr);
+ }
+ #else
+ #define iommu_dma_alloc_noncoherent           NULL
+ #define iommu_dma_free_noncoherent            NULL
+ #endif /* CONFIG_DMA_REMAP */
  static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs)
@@@ -1120,6 -1150,10 +1150,10 @@@ static unsigned long iommu_dma_get_merg
  static const struct dma_map_ops iommu_dma_ops = {
        .alloc                  = iommu_dma_alloc,
        .free                   = iommu_dma_free,
+       .alloc_pages            = dma_common_alloc_pages,
+       .free_pages             = dma_common_free_pages,
+       .alloc_noncoherent      = iommu_dma_alloc_noncoherent,
+       .free_noncoherent       = iommu_dma_free_noncoherent,
        .mmap                   = iommu_dma_mmap,
        .get_sgtable            = iommu_dma_get_sgtable,
        .map_page               = iommu_dma_map_page,
index 2e2ba294bb152c1c08aeebd1731eae96d2553e55,0c5b4500ae83d3be1c5ca87f6307c3adecd2498e..8651f6d4dfa032c00f9c2149ae643a3614960fb7
@@@ -23,7 -23,7 +23,7 @@@
  #include <linux/spinlock.h>
  #include <linux/pci.h>
  #include <linux/dmar.h>
- #include <linux/dma-mapping.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/mempool.h>
  #include <linux/memory.h>
  #include <linux/cpu.h>
@@@ -37,7 -37,7 +37,7 @@@
  #include <linux/dmi.h>
  #include <linux/pci-ats.h>
  #include <linux/memblock.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/dma-direct.h>
  #include <linux/crash_dump.h>
  #include <linux/numa.h>
@@@ -698,47 -698,12 +698,47 @@@ static int domain_update_iommu_superpag
        return fls(mask);
  }
  
 +static int domain_update_device_node(struct dmar_domain *domain)
 +{
 +      struct device_domain_info *info;
 +      int nid = NUMA_NO_NODE;
 +
 +      assert_spin_locked(&device_domain_lock);
 +
 +      if (list_empty(&domain->devices))
 +              return NUMA_NO_NODE;
 +
 +      list_for_each_entry(info, &domain->devices, link) {
 +              if (!info->dev)
 +                      continue;
 +
 +              /*
 +               * There could possibly be multiple device numa nodes as devices
 +               * within the same domain may sit behind different IOMMUs. There
 +               * isn't perfect answer in such situation, so we select first
 +               * come first served policy.
 +               */
 +              nid = dev_to_node(info->dev);
 +              if (nid != NUMA_NO_NODE)
 +                      break;
 +      }
 +
 +      return nid;
 +}
 +
  /* Some capabilities may be different across iommus */
  static void domain_update_iommu_cap(struct dmar_domain *domain)
  {
        domain_update_iommu_coherency(domain);
        domain->iommu_snooping = domain_update_iommu_snooping(NULL);
        domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL);
 +
 +      /*
 +       * If RHSA is missing, we should default to the device numa domain
 +       * as fall back.
 +       */
 +      if (domain->nid == NUMA_NO_NODE)
 +              domain->nid = domain_update_device_node(domain);
  }
  
  struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@@ -2562,7 -2527,7 +2562,7 @@@ dmar_search_domain_by_dev_info(int segm
  static int domain_setup_first_level(struct intel_iommu *iommu,
                                    struct dmar_domain *domain,
                                    struct device *dev,
 -                                  int pasid)
 +                                  u32 pasid)
  {
        int flags = PASID_FLAG_SUPERVISOR_MODE;
        struct dma_pte *pgd = domain->pgd;
@@@ -2699,7 -2664,7 +2699,7 @@@ static struct dmar_domain *dmar_insert_
                }
  
                /* Setup the PASID entry for requests without PASID: */
 -              spin_lock(&iommu->lock);
 +              spin_lock_irqsave(&iommu->lock, flags);
                if (hw_pass_through && domain_type_is_si(domain))
                        ret = intel_pasid_setup_pass_through(iommu, domain,
                                        dev, PASID_RID2PASID);
                else
                        ret = intel_pasid_setup_second_level(iommu, domain,
                                        dev, PASID_RID2PASID);
 -              spin_unlock(&iommu->lock);
 +              spin_unlock_irqrestore(&iommu->lock, flags);
                if (ret) {
                        dev_err(dev, "Setup RID2PASID failed\n");
                        dmar_remove_one_dev_info(dev);
@@@ -3747,6 -3712,8 +3747,8 @@@ static const struct dma_map_ops intel_d
        .dma_supported = dma_direct_supported,
        .mmap = dma_common_mmap,
        .get_sgtable = dma_common_get_sgtable,
+       .alloc_pages = dma_common_alloc_pages,
+       .free_pages = dma_common_free_pages,
        .get_required_mask = intel_get_required_mask,
  };
  
@@@ -3814,7 -3781,7 +3816,7 @@@ bounce_map_single(struct device *dev, p
         */
        if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
                tlb_addr = swiotlb_tbl_map_single(dev,
-                               __phys_to_dma(dev, io_tlb_start),
+                               phys_to_dma_unencrypted(dev, io_tlb_start),
                                paddr, size, aligned_size, dir, attrs);
                if (tlb_addr == DMA_MAPPING_ERROR) {
                        goto swiotlb_error;
@@@ -4000,6 -3967,8 +4002,8 @@@ static const struct dma_map_ops bounce_
        .sync_sg_for_device     = bounce_sync_sg_for_device,
        .map_resource           = bounce_map_resource,
        .unmap_resource         = bounce_unmap_resource,
+       .alloc_pages            = dma_common_alloc_pages,
+       .free_pages             = dma_common_free_pages,
        .dma_supported          = dma_direct_supported,
  };
  
@@@ -5130,6 -5099,8 +5134,6 @@@ static struct iommu_domain *intel_iommu
                if (type == IOMMU_DOMAIN_DMA)
                        intel_init_iova_domain(dmar_domain);
  
 -              domain_update_iommu_cap(dmar_domain);
 -
                domain = &dmar_domain->domain;
                domain->geometry.aperture_start = 0;
                domain->geometry.aperture_end   =
@@@ -5206,7 -5177,7 +5210,7 @@@ static int aux_domain_add_dev(struct dm
                return -ENODEV;
  
        if (domain->default_pasid <= 0) {
 -              int pasid;
 +              u32 pasid;
  
                /* No private data needed for the default pasid */
                pasid = ioasid_alloc(NULL, PASID_MIN,
@@@ -5441,7 -5412,8 +5445,7 @@@ intel_iommu_sva_invalidate(struct iommu
        int ret = 0;
        u64 size = 0;
  
 -      if (!inv_info || !dmar_domain ||
 -          inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
 +      if (!inv_info || !dmar_domain)
                return -EINVAL;
  
        if (!dev || !dev_is_pci(dev))
  
        /* Size is only valid in address selective invalidation */
        if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
 -              size = to_vtd_size(inv_info->addr_info.granule_size,
 -                                 inv_info->addr_info.nb_granules);
 +              size = to_vtd_size(inv_info->granu.addr_info.granule_size,
 +                                 inv_info->granu.addr_info.nb_granules);
  
        for_each_set_bit(cache_type,
                         (unsigned long *)&inv_info->cache,
                 * granularity.
                 */
                if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
 -                  (inv_info->pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
 -                      pasid = inv_info->pasid_info.pasid;
 +                  (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
 +                      pasid = inv_info->granu.pasid_info.pasid;
                else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
 -                       (inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
 -                      pasid = inv_info->addr_info.pasid;
 +                       (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
 +                      pasid = inv_info->granu.addr_info.pasid;
  
                switch (BIT(cache_type)) {
                case IOMMU_CACHE_INV_TYPE_IOTLB:
                        /* HW will ignore LSB bits based on address mask */
                        if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
                            size &&
 -                          (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
 +                          (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
                                pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
 -                                                 inv_info->addr_info.addr, size);
 +                                                 inv_info->granu.addr_info.addr, size);
                        }
  
                        /*
                         * We use npages = -1 to indicate that.
                         */
                        qi_flush_piotlb(iommu, did, pasid,
 -                                      mm_to_dma_pfn(inv_info->addr_info.addr),
 +                                      mm_to_dma_pfn(inv_info->granu.addr_info.addr),
                                        (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
 -                                      inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
 +                                      inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
  
                        if (!info->ats_enabled)
                                break;
                                size = 64 - VTD_PAGE_SHIFT;
                                addr = 0;
                        } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
 -                              addr = inv_info->addr_info.addr;
 +                              addr = inv_info->granu.addr_info.addr;
                        }
  
                        if (info->ats_enabled)
index 534810b6be770ec591517623eb029fcb182d0d19,f87cbb822a3d44e5cc2f2efeac6838636e2fc751..a7a9bc08dcd115c39633f7661303e4a21e0d23b2
@@@ -20,8 -20,6 +20,8 @@@
  
  #include <asm/barrier.h>
  
 +#include "io-pgtable-arm.h"
 +
  #define ARM_LPAE_MAX_ADDR_BITS                52
  #define ARM_LPAE_S2_MAX_CONCAT_PAGES  16
  #define ARM_LPAE_MAX_LEVELS           4
  #define ARM_LPAE_PTE_MEMATTR_DEV      (((arm_lpae_iopte)0x1) << 2)
  
  /* Register bits */
 -#define ARM_LPAE_TCR_TG0_4K           0
 -#define ARM_LPAE_TCR_TG0_64K          1
 -#define ARM_LPAE_TCR_TG0_16K          2
 -
 -#define ARM_LPAE_TCR_TG1_16K          1
 -#define ARM_LPAE_TCR_TG1_4K           2
 -#define ARM_LPAE_TCR_TG1_64K          3
 -
 -#define ARM_LPAE_TCR_SH_NS            0
 -#define ARM_LPAE_TCR_SH_OS            2
 -#define ARM_LPAE_TCR_SH_IS            3
 -
 -#define ARM_LPAE_TCR_RGN_NC           0
 -#define ARM_LPAE_TCR_RGN_WBWA         1
 -#define ARM_LPAE_TCR_RGN_WT           2
 -#define ARM_LPAE_TCR_RGN_WB           3
 -
  #define ARM_LPAE_VTCR_SL0_MASK                0x3
  
  #define ARM_LPAE_TCR_T0SZ_SHIFT               0
  #define ARM_LPAE_VTCR_PS_SHIFT                16
  #define ARM_LPAE_VTCR_PS_MASK         0x7
  
 -#define ARM_LPAE_TCR_PS_32_BIT                0x0ULL
 -#define ARM_LPAE_TCR_PS_36_BIT                0x1ULL
 -#define ARM_LPAE_TCR_PS_40_BIT                0x2ULL
 -#define ARM_LPAE_TCR_PS_42_BIT                0x3ULL
 -#define ARM_LPAE_TCR_PS_44_BIT                0x4ULL
 -#define ARM_LPAE_TCR_PS_48_BIT                0x5ULL
 -#define ARM_LPAE_TCR_PS_52_BIT                0x6ULL
 -
  #define ARM_LPAE_MAIR_ATTR_SHIFT(n)   ((n) << 3)
  #define ARM_LPAE_MAIR_ATTR_MASK               0xff
  #define ARM_LPAE_MAIR_ATTR_DEVICE     0x04
@@@ -728,11 -751,6 +728,6 @@@ arm_lpae_alloc_pgtable(struct io_pgtabl
        if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
                return NULL;
  
-       if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
-               dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
-               return NULL;
-       }
        data = kmalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return NULL;
index 019bb47df915e79aead597a71aa2682e63057a85,32f5982afa1ab2a11cb9ba9ffb998741003cdf2e..32ab01e89196d2c49c3ca5bc215355528be769b6
@@@ -12,7 -12,6 +12,6 @@@
  #include <linux/device.h>
  #include <linux/debugfs.h>
  #include <linux/delay.h>
- #include <linux/dma-contiguous.h>
  #include <linux/errno.h>
  #include <linux/firmware.h>
  #include <linux/interrupt.h>
@@@ -756,12 -755,18 +755,12 @@@ static void fimc_is_debugfs_remove(stru
        is->debugfs_entry = NULL;
  }
  
 -static int fimc_is_debugfs_create(struct fimc_is *is)
 +static void fimc_is_debugfs_create(struct fimc_is *is)
  {
 -      struct dentry *dentry;
 -
        is->debugfs_entry = debugfs_create_dir("fimc_is", NULL);
  
 -      dentry = debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry,
 -                                   is, &fimc_is_fops);
 -      if (!dentry)
 -              fimc_is_debugfs_remove(is);
 -
 -      return is->debugfs_entry == NULL ? -EIO : 0;
 +      debugfs_create_file("fw_log", S_IRUGO, is->debugfs_entry, is,
 +                          &fimc_is_fops);
  }
  
  static int fimc_is_runtime_resume(struct device *dev);
@@@ -847,7 -852,9 +846,7 @@@ static int fimc_is_probe(struct platfor
        if (ret < 0)
                goto err_pm;
  
 -      ret = fimc_is_debugfs_create(is);
 -      if (ret < 0)
 -              goto err_sd;
 +      fimc_is_debugfs_create(is);
  
        ret = fimc_is_request_firmware(is, FIMC_IS_FW_FILENAME);
        if (ret < 0)
  
  err_dfs:
        fimc_is_debugfs_remove(is);
 -err_sd:
        fimc_is_unregister_subdevs(is);
  err_pm:
        pm_runtime_put_noidle(dev);
index d226ecadff8e52a4a107fe25d237e413e6abbff0,307997ee7f96a2c484ca986fa345578053711cf2..eb15c8c725ca0bfd2b205c36508c5c517568a2cb
@@@ -7,6 -7,7 +7,7 @@@
   */
  
  #include <linux/clk.h>
+ #include <linux/dma-mapping.h>
  #include <linux/interrupt.h>
  #include <linux/module.h>
  #include <linux/mutex.h>
@@@ -182,8 -183,14 +183,14 @@@ static int sun4i_csi_probe(struct platf
                if (ret)
                        return ret;
        } else {
+               /*
+                * XXX(hch): this has no business in a driver and needs to move
+                * to the device tree.
+                */
  #ifdef PHYS_PFN_OFFSET
-               csi->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+               ret = dma_direct_set_offset(csi->dev, PHYS_OFFSET, 0, SZ_4G);
+               if (ret)
+                       return ret;
  #endif
        }
  
@@@ -287,7 -294,6 +294,7 @@@ static int sun4i_csi_remove(struct plat
  
        v4l2_async_notifier_unregister(&csi->notifier);
        v4l2_async_notifier_cleanup(&csi->notifier);
 +      vb2_video_unregister_device(&csi->vdev);
        media_device_unregister(&csi->mdev);
        sun4i_csi_dma_unregister(csi);
        media_device_cleanup(&csi->mdev);
index f4107b9e8c38620b7e8facf8934c660bb76b4480,7d6bbbf4a916aa505b866b14b7951fb18a081671..19ebb542befcb5846f4de69d257c6f263d10637b
@@@ -162,143 -162,6 +162,143 @@@ int usb_control_msg(struct usb_device *
  }
  EXPORT_SYMBOL_GPL(usb_control_msg);
  
 +/**
 + * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion
 + * @dev: pointer to the usb device to send the message to
 + * @endpoint: endpoint to send the message to
 + * @request: USB message request value
 + * @requesttype: USB message request type value
 + * @value: USB message value
 + * @index: USB message index value
 + * @driver_data: pointer to the data to send
 + * @size: length in bytes of the data to send
 + * @timeout: time in msecs to wait for the message to complete before timing
 + *    out (if 0 the wait is forever)
 + * @memflags: the flags for memory allocation for buffers
 + *
 + * Context: !in_interrupt ()
 + *
 + * This function sends a control message to a specified endpoint that is not
 + * expected to fill in a response (i.e. a "send message") and waits for the
 + * message to complete, or timeout.
 + *
 + * Do not use this function from within an interrupt context. If you need
 + * an asynchronous message, or need to send a message from within interrupt
 + * context, use usb_submit_urb(). If a thread in your driver uses this call,
 + * make sure your disconnect() method can wait for it to complete. Since you
 + * don't have a handle on the URB used, you can't cancel the request.
 + *
 + * The data pointer can be made to a reference on the stack, or anywhere else,
 + * as it will not be modified at all.  This does not have the restriction that
 + * usb_control_msg() has where the data pointer must be to dynamically allocated
 + * memory (i.e. memory that can be successfully DMAed to a device).
 + *
 + * Return: If successful, 0 is returned, Otherwise, a negative error number.
 + */
 +int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
 +                       __u8 requesttype, __u16 value, __u16 index,
 +                       const void *driver_data, __u16 size, int timeout,
 +                       gfp_t memflags)
 +{
 +      unsigned int pipe = usb_sndctrlpipe(dev, endpoint);
 +      int ret;
 +      u8 *data = NULL;
 +
 +      if (usb_pipe_type_check(dev, pipe))
 +              return -EINVAL;
 +
 +      if (size) {
 +              data = kmemdup(driver_data, size, memflags);
 +              if (!data)
 +                      return -ENOMEM;
 +      }
 +
 +      ret = usb_control_msg(dev, pipe, request, requesttype, value, index,
 +                            data, size, timeout);
 +      kfree(data);
 +
 +      if (ret < 0)
 +              return ret;
 +      if (ret == size)
 +              return 0;
 +      return -EINVAL;
 +}
 +EXPORT_SYMBOL_GPL(usb_control_msg_send);
 +
 +/**
 + * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion
 + * @dev: pointer to the usb device to send the message to
 + * @endpoint: endpoint to send the message to
 + * @request: USB message request value
 + * @requesttype: USB message request type value
 + * @value: USB message value
 + * @index: USB message index value
 + * @driver_data: pointer to the data to be filled in by the message
 + * @size: length in bytes of the data to be received
 + * @timeout: time in msecs to wait for the message to complete before timing
 + *    out (if 0 the wait is forever)
 + * @memflags: the flags for memory allocation for buffers
 + *
 + * Context: !in_interrupt ()
 + *
 + * This function sends a control message to a specified endpoint that is
 + * expected to fill in a response (i.e. a "receive message") and waits for the
 + * message to complete, or timeout.
 + *
 + * Do not use this function from within an interrupt context. If you need
 + * an asynchronous message, or need to send a message from within interrupt
 + * context, use usb_submit_urb(). If a thread in your driver uses this call,
 + * make sure your disconnect() method can wait for it to complete. Since you
 + * don't have a handle on the URB used, you can't cancel the request.
 + *
 + * The data pointer can be made to a reference on the stack, or anywhere else
 + * that can be successfully written to.  This function does not have the
 + * restriction that usb_control_msg() has where the data pointer must be to
 + * dynamically allocated memory (i.e. memory that can be successfully DMAed to a
 + * device).
 + *
 + * The "whole" message must be properly received from the device in order for
 + * this function to be successful.  If a device returns less than the expected
 + * amount of data, then the function will fail.  Do not use this for messages
 + * where a variable amount of data might be returned.
 + *
 + * Return: If successful, 0 is returned, Otherwise, a negative error number.
 + */
 +int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request,
 +                       __u8 requesttype, __u16 value, __u16 index,
 +                       void *driver_data, __u16 size, int timeout,
 +                       gfp_t memflags)
 +{
 +      unsigned int pipe = usb_rcvctrlpipe(dev, endpoint);
 +      int ret;
 +      u8 *data;
 +
 +      if (!size || !driver_data || usb_pipe_type_check(dev, pipe))
 +              return -EINVAL;
 +
 +      data = kmalloc(size, memflags);
 +      if (!data)
 +              return -ENOMEM;
 +
 +      ret = usb_control_msg(dev, pipe, request, requesttype, value, index,
 +                            data, size, timeout);
 +
 +      if (ret < 0)
 +              goto exit;
 +
 +      if (ret == size) {
 +              memcpy(driver_data, data, size);
 +              ret = 0;
 +      } else {
 +              ret = -EINVAL;
 +      }
 +
 +exit:
 +      kfree(data);
 +      return ret;
 +}
 +EXPORT_SYMBOL_GPL(usb_control_msg_recv);
 +
  /**
   * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion
   * @usb_dev: pointer to the usb device to send the message to
@@@ -1085,12 -948,11 +1085,12 @@@ int usb_set_isoch_delay(struct usb_devi
        if (dev->speed < USB_SPEED_SUPER)
                return 0;
  
 -      return usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 +      return usb_control_msg_send(dev, 0,
                        USB_REQ_SET_ISOCH_DELAY,
                        USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
                        dev->hub_delay, 0, NULL, 0,
 -                      USB_CTRL_SET_TIMEOUT);
 +                      USB_CTRL_SET_TIMEOUT,
 +                      GFP_NOIO);
  }
  
  /**
@@@ -1208,13 -1070,13 +1208,13 @@@ int usb_clear_halt(struct usb_device *d
         * (like some ibmcam model 1 units) seem to expect hosts to make
         * this request for iso endpoints, which can't halt!
         */
 -      result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 -              USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
 -              USB_ENDPOINT_HALT, endp, NULL, 0,
 -              USB_CTRL_SET_TIMEOUT);
 +      result = usb_control_msg_send(dev, 0,
 +                                    USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
 +                                    USB_ENDPOINT_HALT, endp, NULL, 0,
 +                                    USB_CTRL_SET_TIMEOUT, GFP_NOIO);
  
        /* don't un-halt or force to DATA0 except on success */
 -      if (result < 0)
 +      if (result)
                return result;
  
        /* NOTE:  seems like Microsoft and Apple don't bother verifying
@@@ -1576,11 -1438,9 +1576,11 @@@ int usb_set_interface(struct usb_devic
        if (dev->quirks & USB_QUIRK_NO_SET_INTF)
                ret = -EPIPE;
        else
 -              ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 -                                 USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE,
 -                                 alternate, interface, NULL, 0, 5000);
 +              ret = usb_control_msg_send(dev, 0,
 +                                         USB_REQ_SET_INTERFACE,
 +                                         USB_RECIP_INTERFACE, alternate,
 +                                         interface, NULL, 0, 5000,
 +                                         GFP_NOIO);
  
        /* 9.4.10 says devices don't need this and are free to STALL the
         * request if the interface only has one alternate setting.
                        "manual set_interface for iface %d, alt %d\n",
                        interface, alternate);
                manual = 1;
 -      } else if (ret < 0) {
 +      } else if (ret) {
                /* Re-instate the old alt setting */
                usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
                usb_enable_lpm(dev);
@@@ -1714,11 -1574,11 +1714,11 @@@ int usb_reset_configuration(struct usb_
                mutex_unlock(hcd->bandwidth_mutex);
                return retval;
        }
 -      retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 -                      USB_REQ_SET_CONFIGURATION, 0,
 -                      config->desc.bConfigurationValue, 0,
 -                      NULL, 0, USB_CTRL_SET_TIMEOUT);
 -      if (retval < 0) {
 +      retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0,
 +                                    config->desc.bConfigurationValue, 0,
 +                                    NULL, 0, USB_CTRL_SET_TIMEOUT,
 +                                    GFP_NOIO);
 +      if (retval) {
                usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
                usb_enable_lpm(dev);
                mutex_unlock(hcd->bandwidth_mutex);
@@@ -2087,12 -1947,6 +2087,6 @@@ free_interfaces
                intf->dev.bus = &usb_bus_type;
                intf->dev.type = &usb_if_device_type;
                intf->dev.groups = usb_interface_groups;
-               /*
-                * Please refer to usb_alloc_dev() to see why we set
-                * dma_mask and dma_pfn_offset.
-                */
-               intf->dev.dma_mask = dev->dev.dma_mask;
-               intf->dev.dma_pfn_offset = dev->dev.dma_pfn_offset;
                INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
                intf->minor = -1;
                device_initialize(&intf->dev);
        }
        kfree(new_interfaces);
  
 -      ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
 -                            USB_REQ_SET_CONFIGURATION, 0, configuration, 0,
 -                            NULL, 0, USB_CTRL_SET_TIMEOUT);
 -      if (ret < 0 && cp) {
 +      ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0,
 +                                 configuration, 0, NULL, 0,
 +                                 USB_CTRL_SET_TIMEOUT, GFP_NOIO);
 +      if (ret && cp) {
                /*
                 * All the old state is gone, so what else can we do?
                 * The device is probably useless now anyway.
diff --combined include/linux/device.h
index a8e3a86e35f61e9dd77a039f9b534aa40665cf5e,f85163701322d079d0a52c2a6fe55b9b167f8ee8..5ed101be7b2e7d329d35e6e91e095b48a180c79b
@@@ -206,8 -206,6 +206,8 @@@ int devres_release_group(struct device 
  
  /* managed devm_k.alloc/kfree for device drivers */
  void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
 +void *devm_krealloc(struct device *dev, void *ptr, size_t size,
 +                  gfp_t gfp) __must_check;
  __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp,
                                     const char *fmt, va_list ap) __malloc;
  __printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp,
@@@ -294,6 -292,62 +294,6 @@@ struct device_dma_parameters 
        unsigned long segment_boundary_mask;
  };
  
 -/**
 - * struct device_connection - Device Connection Descriptor
 - * @fwnode: The device node of the connected device
 - * @endpoint: The names of the two devices connected together
 - * @id: Unique identifier for the connection
 - * @list: List head, private, for internal use only
 - *
 - * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when
 - * platform firmware defines the connection. When the connection is registered
 - * with device_connection_add() @endpoint is used instead.
 - */
 -struct device_connection {
 -      struct fwnode_handle    *fwnode;
 -      const char              *endpoint[2];
 -      const char              *id;
 -      struct list_head        list;
 -};
 -
 -typedef void *(*devcon_match_fn_t)(struct device_connection *con, int ep,
 -                                 void *data);
 -
 -void *fwnode_connection_find_match(struct fwnode_handle *fwnode,
 -                                 const char *con_id, void *data,
 -                                 devcon_match_fn_t match);
 -void *device_connection_find_match(struct device *dev, const char *con_id,
 -                                 void *data, devcon_match_fn_t match);
 -
 -struct device *device_connection_find(struct device *dev, const char *con_id);
 -
 -void device_connection_add(struct device_connection *con);
 -void device_connection_remove(struct device_connection *con);
 -
 -/**
 - * device_connections_add - Add multiple device connections at once
 - * @cons: Zero terminated array of device connection descriptors
 - */
 -static inline void device_connections_add(struct device_connection *cons)
 -{
 -      struct device_connection *c;
 -
 -      for (c = cons; c->endpoint[0]; c++)
 -              device_connection_add(c);
 -}
 -
 -/**
 - * device_connections_remove - Remove multiple device connections at once
 - * @cons: Zero terminated array of device connection descriptors
 - */
 -static inline void device_connections_remove(struct device_connection *cons)
 -{
 -      struct device_connection *c;
 -
 -      for (c = cons; c->endpoint[0]; c++)
 -              device_connection_remove(c);
 -}
 -
  /**
   * enum device_link_state - Device link states.
   * @DL_STATE_NONE: The presence of the drivers is not being tracked.
@@@ -413,7 -467,7 +413,7 @@@ struct dev_links_info 
   *            such descriptors.
   * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
   *            DMA limit than the device itself supports.
-  * @dma_pfn_offset: offset of DMA memory range relatively of RAM
+  * @dma_range_map: map for DMA memory ranges relative to that of RAM
   * @dma_parms:        A low level driver may set these to teach IOMMU code about
   *            segment limitations.
   * @dma_pools:        Dma pools (if dma'ble device).
@@@ -508,7 -562,7 +508,7 @@@ struct device 
                                             64 bit addresses for consistent
                                             allocations such descriptors. */
        u64             bus_dma_limit;  /* upstream dma constraint */
-       unsigned long   dma_pfn_offset;
+       const struct bus_dma_region *dma_range_map;
  
        struct device_dma_parameters *dma_parms;
  
diff --combined include/linux/gfp.h
index 07e481993ef5da7e978b415dab569613f02b8191,dd2577c5407112d1393da7bb88694cc8a4a09e69..c603237e006ceb5e7a6d6c8b06d3d4c52038566a
@@@ -238,9 -238,7 +238,9 @@@ struct vm_area_struct
   * %__GFP_FOO flags as necessary.
   *
   * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
 - * watermark is applied to allow access to "atomic reserves"
 + * watermark is applied to allow access to "atomic reserves".
 + * The current implementation doesn't support NMI and few other strict
 + * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
   *
   * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
   * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
@@@ -552,8 -550,10 +552,10 @@@ extern struct page *alloc_pages_vma(gfp
  #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
  #else
- #define alloc_pages(gfp_mask, order) \
-               alloc_pages_node(numa_node_id(), gfp_mask, order)
+ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
+ {
+       return alloc_pages_node(numa_node_id(), gfp_mask, order);
+ }
  #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
        alloc_pages(gfp_mask, order)
  #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
  #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
  #define alloc_page_vma(gfp_mask, vma, addr)                   \
        alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
 -#define alloc_page_vma_node(gfp_mask, vma, addr, node)                \
 -      alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
  
  extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
  extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --combined kernel/dma/contiguous.c
index 0369fd5fda8fc7001828c2c2fc0a589f67e8f672,a2ee330a3749ecc8ecd52e294cc7aacb943567d4..16b95ff12e4df30f19054545fd91c2e92f0f5d6c
@@@ -5,6 -5,34 +5,34 @@@
   * Written by:
   *    Marek Szyprowski <[email protected]>
   *    Michal Nazarewicz <[email protected]>
+  *
+  * Contiguous Memory Allocator
+  *
+  *   The Contiguous Memory Allocator (CMA) makes it possible to
+  *   allocate big contiguous chunks of memory after the system has
+  *   booted.
+  *
+  * Why is it needed?
+  *
+  *   Various devices on embedded systems have no scatter-getter and/or
+  *   IO map support and require contiguous blocks of memory to
+  *   operate.  They include devices such as cameras, hardware video
+  *   coders, etc.
+  *
+  *   Such devices often require big memory buffers (a full HD frame
+  *   is, for instance, more then 2 mega pixels large, i.e. more than 6
+  *   MB of memory), which makes mechanisms such as kmalloc() or
+  *   alloc_page() ineffective.
+  *
+  *   At the same time, a solution where a big memory region is
+  *   reserved for a device is suboptimal since often more memory is
+  *   reserved then strictly required and, moreover, the memory is
+  *   inaccessible to page system even if device drivers don't use it.
+  *
+  *   CMA tries to solve this issue by operating on memory regions
+  *   where only movable pages can be allocated from.  This way, kernel
+  *   can use the memory for pagecache and when device driver requests
+  *   it, allocated pages can be migrated.
   */
  
  #define pr_fmt(fmt) "cma: " fmt
  #endif
  
  #include <asm/page.h>
- #include <asm/dma-contiguous.h>
  
  #include <linux/memblock.h>
  #include <linux/err.h>
  #include <linux/sizes.h>
- #include <linux/dma-contiguous.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/cma.h>
  
  #ifdef CONFIG_CMA_SIZE_MBYTES
@@@ -69,11 -96,33 +96,24 @@@ static int __init early_cma(char *p
  }
  early_param("cma", early_cma);
  
+ #ifdef CONFIG_DMA_PERNUMA_CMA
+ static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
+ static phys_addr_t pernuma_size_bytes __initdata;
+ static int __init early_cma_pernuma(char *p)
+ {
+       pernuma_size_bytes = memparse(p, &p);
+       return 0;
+ }
+ early_param("cma_pernuma", early_cma_pernuma);
+ #endif
  #ifdef CONFIG_CMA_SIZE_PERCENTAGE
  
  static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
  {
 -      struct memblock_region *reg;
 -      unsigned long total_pages = 0;
 -
 -      /*
 -       * We cannot use memblock_phys_mem_size() here, because
 -       * memblock_analyze() has not been called yet.
 -       */
 -      for_each_memblock(memory, reg)
 -              total_pages += memblock_region_memory_end_pfn(reg) -
 -                             memblock_region_memory_base_pfn(reg);
 +      unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
  
        return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
  }
@@@ -87,6 -136,34 +127,34 @@@ static inline __maybe_unused phys_addr_
  
  #endif
  
+ #ifdef CONFIG_DMA_PERNUMA_CMA
+ void __init dma_pernuma_cma_reserve(void)
+ {
+       int nid;
+       if (!pernuma_size_bytes)
+               return;
+       for_each_online_node(nid) {
+               int ret;
+               char name[CMA_MAX_NAME];
+               struct cma **cma = &dma_contiguous_pernuma_area[nid];
+               snprintf(name, sizeof(name), "pernuma%d", nid);
+               ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
+                                                0, false, name, cma, nid);
+               if (ret) {
+                       pr_warn("%s: reservation failed: err %d, node %d", __func__,
+                               ret, nid);
+                       continue;
+               }
+               pr_debug("%s: reserved %llu MiB on node %d\n", __func__,
+                       (unsigned long long)pernuma_size_bytes / SZ_1M, nid);
+       }
+ }
+ #endif
  /**
   * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
   * @limit: End address of the reserved memory (optional, 0 for any).
@@@ -134,6 -211,11 +202,11 @@@ void __init dma_contiguous_reserve(phys
        }
  }
  
+ void __weak
+ dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+ {
+ }
  /**
   * dma_contiguous_reserve_area() - reserve custom contiguous area
   * @size: Size of the reserved area (in bytes),
@@@ -219,23 -301,44 +292,44 @@@ static struct page *cma_alloc_aligned(s
   * @size:  Requested allocation size.
   * @gfp:   Allocation flags.
   *
-  * This function allocates contiguous memory buffer for specified device. It
-  * tries to use device specific contiguous memory area if available, or the
-  * default global one.
+  * tries to use device specific contiguous memory area if available, or it
+  * tries to use per-numa cma, if the allocation fails, it will fallback to
+  * try default global one.
   *
-  * Note that it byapss one-page size of allocations from the global area as
-  * the addresses within one page are always contiguous, so there is no need
-  * to waste CMA pages for that kind; it also helps reduce fragmentations.
+  * Note that it bypass one-page size of allocations from the per-numa and
+  * global area as the addresses within one page are always contiguous, so
+  * there is no need to waste CMA pages for that kind; it also helps reduce
+  * fragmentations.
   */
  struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
  {
+ #ifdef CONFIG_DMA_PERNUMA_CMA
+       int nid = dev_to_node(dev);
+ #endif
        /* CMA can be used only in the context which permits sleeping */
        if (!gfpflags_allow_blocking(gfp))
                return NULL;
        if (dev->cma_area)
                return cma_alloc_aligned(dev->cma_area, size, gfp);
-       if (size <= PAGE_SIZE || !dma_contiguous_default_area)
+       if (size <= PAGE_SIZE)
                return NULL;
+ #ifdef CONFIG_DMA_PERNUMA_CMA
+       if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
+               struct cma *cma = dma_contiguous_pernuma_area[nid];
+               struct page *page;
+               if (cma) {
+                       page = cma_alloc_aligned(cma, size, gfp);
+                       if (page)
+                               return page;
+               }
+       }
+ #endif
+       if (!dma_contiguous_default_area)
+               return NULL;
        return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
  }
  
   */
  void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
  {
-       if (!cma_release(dev_get_cma_area(dev), page,
-                        PAGE_ALIGN(size) >> PAGE_SHIFT))
-               __free_pages(page, get_order(size));
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       /* if dev has its own cma, free page from there */
+       if (dev->cma_area) {
+               if (cma_release(dev->cma_area, page, count))
+                       return;
+       } else {
+               /*
+                * otherwise, page is from either per-numa cma or default cma
+                */
+ #ifdef CONFIG_DMA_PERNUMA_CMA
+               if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
+                                       page, count))
+                       return;
+ #endif
+               if (cma_release(dma_contiguous_default_area, page, count))
+                       return;
+       }
+       /* not in any cma, free from buddy */
+       __free_pages(page, get_order(size));
  }
  
  /*
  
  static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
  {
-       dev_set_cma_area(dev, rmem->priv);
+       dev->cma_area = rmem->priv;
        return 0;
  }
  
  static void rmem_cma_device_release(struct reserved_mem *rmem,
                                    struct device *dev)
  {
-       dev_set_cma_area(dev, NULL);
+       dev->cma_area = NULL;
  }
  
  static const struct reserved_mem_ops rmem_cma_ops = {
@@@ -318,7 -439,7 +430,7 @@@ static int __init rmem_cma_setup(struc
        dma_contiguous_early_fixup(rmem->base, rmem->size);
  
        if (default_cma)
-               dma_contiguous_set_default(cma);
+               dma_contiguous_default_area = cma;
  
        rmem->ops = &rmem_cma_ops;
        rmem->priv = cma;
diff --combined kernel/dma/swiotlb.c
index 465a567678d9663d28790b08222485cea9d4f10b,2be1e8b34ae3b9383b1a63e32f6e5db40a44ad61..b4eea0abc3f00208600fe80d6f293922a56b6ffe
@@@ -22,7 -22,7 +22,7 @@@
  
  #include <linux/cache.h>
  #include <linux/dma-direct.h>
- #include <linux/dma-noncoherent.h>
+ #include <linux/dma-map-ops.h>
  #include <linux/mm.h>
  #include <linux/export.h>
  #include <linux/spinlock.h>
@@@ -93,7 -93,7 +93,7 @@@ static unsigned int io_tlb_index
   * Max segment that we can provide which (if pages are contingous) will
   * not be bounced (unless SWIOTLB_FORCE is set).
   */
 -unsigned int max_segment;
 +static unsigned int max_segment;
  
  /*
   * We need to save away the original address corresponding to a mapped entry
@@@ -172,7 -172,9 +172,7 @@@ void swiotlb_print_info(void
                return;
        }
  
 -      pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
 -             (unsigned long long)io_tlb_start,
 -             (unsigned long long)io_tlb_end,
 +      pr_info("mapped [mem %pa-%pa] (%luMB)\n", &io_tlb_start, &io_tlb_end,
               bytes >> 20);
  }
  
@@@ -668,13 -670,13 +668,13 @@@ dma_addr_t swiotlb_map(struct device *d
                              swiotlb_force);
  
        swiotlb_addr = swiotlb_tbl_map_single(dev,
-                       __phys_to_dma(dev, io_tlb_start),
+                       phys_to_dma_unencrypted(dev, io_tlb_start),
                        paddr, size, size, dir, attrs);
        if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
                return DMA_MAPPING_ERROR;
  
        /* Ensure that the address returned is DMA'ble */
-       dma_addr = __phys_to_dma(dev, swiotlb_addr);
+       dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
        if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
                swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
                        attrs | DMA_ATTR_SKIP_CPU_SYNC);
diff --combined mm/Kconfig
index 8c60c49a123bd642a89c1f6ff1b9bef6fbe33fed,d75a0107f61fa286d1a5816230d5aa6d90ef4cc3..e72e61c1d62e1a358881811fe976594d6ab48191
@@@ -383,7 -383,7 +383,7 @@@ config NOMMU_INITIAL_TRIM_EXCES
          This option specifies the initial value of this option.  The default
          of 1 says that all excess pages should be trimmed.
  
 -        See Documentation/mm/nommu-mmap.rst for more information.
 +        See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
  
  config TRANSPARENT_HUGEPAGE
        bool "Transparent Hugepage Support"
@@@ -516,13 -516,14 +516,14 @@@ config CMA_DEBUGF
  config CMA_AREAS
        int "Maximum count of the CMA areas"
        depends on CMA
+       default 19 if NUMA
        default 7
        help
          CMA allows to create CMA areas for particular purpose, mainly,
          used as device private area. This parameter sets the maximum
          number of CMA area in the system.
  
-         If unsure, leave the default value "7".
+         If unsure, leave the default value "7" in UMA and "19" in NUMA.
  
  config MEM_SOFT_DIRTY
        bool "Track memory changes"
@@@ -831,10 -832,10 +832,10 @@@ config PERCPU_STAT
          be used to help understand percpu memory usage.
  
  config GUP_BENCHMARK
 -      bool "Enable infrastructure for get_user_pages_fast() benchmarking"
 +      bool "Enable infrastructure for get_user_pages() and related calls benchmarking"
        help
          Provides /sys/kernel/debug/gup_benchmark that helps with testing
 -        performance of get_user_pages_fast().
 +        performance of get_user_pages() and related calls.
  
          See tools/testing/selftests/vm/gup_benchmark.c
  
diff --combined mm/hugetlb.c
index ddbd3b355361b893f1d9a04f8417a7eca3828115,1cc743d52565301088204116580c193b4480fdc0..fe76f8fd5a732c5f1301fab8600dd081cdc4498e
@@@ -240,6 -240,7 +240,6 @@@ get_file_region_entry_from_cache(struc
  
        resv->region_cache_count--;
        nrg = list_first_entry(&resv->region_cache, struct file_region, link);
 -      VM_BUG_ON(!nrg);
        list_del(&nrg->link);
  
        nrg->from = from;
@@@ -308,7 -309,8 +308,7 @@@ static void coalesce_file_region(struc
                list_del(&rg->link);
                kfree(rg);
  
 -              coalesce_file_region(resv, prg);
 -              return;
 +              rg = prg;
        }
  
        nrg = list_next_entry(rg, link);
  
                list_del(&rg->link);
                kfree(rg);
 -
 -              coalesce_file_region(resv, nrg);
 -              return;
        }
  }
  
 -/* Must be called with resv->lock held. Calling this with count_only == true
 - * will count the number of pages to be added but will not modify the linked
 - * list. If regions_needed != NULL and count_only == true, then regions_needed
 - * will indicate the number of file_regions needed in the cache to carry out to
 - * add the regions for this range.
 +/*
 + * Must be called with resv->lock held.
 + *
 + * Calling this with regions_needed != NULL will count the number of pages
 + * to be added but will not modify the linked list. And regions_needed will
 + * indicate the number of file_regions needed in the cache to carry out to add
 + * the regions for this range.
   */
  static long add_reservation_in_range(struct resv_map *resv, long f, long t,
                                     struct hugetlb_cgroup *h_cg,
 -                                   struct hstate *h, long *regions_needed,
 -                                   bool count_only)
 +                                   struct hstate *h, long *regions_needed)
  {
        long add = 0;
        struct list_head *head = &resv->regions;
                 */
                if (rg->from > last_accounted_offset) {
                        add += rg->from - last_accounted_offset;
 -                      if (!count_only) {
 +                      if (!regions_needed) {
                                nrg = get_file_region_entry_from_cache(
                                        resv, last_accounted_offset, rg->from);
                                record_hugetlb_cgroup_uncharge_info(h_cg, h,
                                                                    resv, nrg);
                                list_add(&nrg->link, rg->link.prev);
                                coalesce_file_region(resv, nrg);
 -                      } else if (regions_needed)
 +                      } else
                                *regions_needed += 1;
                }
  
         */
        if (last_accounted_offset < t) {
                add += t - last_accounted_offset;
 -              if (!count_only) {
 +              if (!regions_needed) {
                        nrg = get_file_region_entry_from_cache(
                                resv, last_accounted_offset, t);
                        record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
                        list_add(&nrg->link, rg->link.prev);
                        coalesce_file_region(resv, nrg);
 -              } else if (regions_needed)
 +              } else
                        *regions_needed += 1;
        }
  
@@@ -444,8 -448,11 +444,8 @@@ static int allocate_file_region_entries
  
                spin_lock(&resv->lock);
  
 -              list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
 -                      list_del(&rg->link);
 -                      list_add(&rg->link, &resv->region_cache);
 -                      resv->region_cache_count++;
 -              }
 +              list_splice(&allocated_regions, &resv->region_cache);
 +              resv->region_cache_count += to_allocate;
        }
  
        return 0;
@@@ -485,8 -492,8 +485,8 @@@ static long region_add(struct resv_map 
  retry:
  
        /* Count how many regions are actually needed to execute this add. */
 -      add_reservation_in_range(resv, f, t, NULL, NULL, &actual_regions_needed,
 -                               true);
 +      add_reservation_in_range(resv, f, t, NULL, NULL,
 +                               &actual_regions_needed);
  
        /*
         * Check for sufficient descriptors in the cache to accommodate
                goto retry;
        }
  
 -      add = add_reservation_in_range(resv, f, t, h_cg, h, NULL, false);
 +      add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
  
        resv->adds_in_progress -= in_regions_needed;
  
@@@ -550,9 -557,9 +550,9 @@@ static long region_chg(struct resv_map 
  
        spin_lock(&resv->lock);
  
 -      /* Count how many hugepages in this range are NOT respresented. */
 +      /* Count how many hugepages in this range are NOT represented. */
        chg = add_reservation_in_range(resv, f, t, NULL, NULL,
 -                                     out_regions_needed, true);
 +                                     out_regions_needed);
  
        if (*out_regions_needed == 0)
                *out_regions_needed = 1;
@@@ -1040,17 -1047,21 +1040,17 @@@ static struct page *dequeue_huge_page_n
                if (nocma && is_migrate_cma_page(page))
                        continue;
  
 -              if (!PageHWPoison(page))
 -                      break;
 +              if (PageHWPoison(page))
 +                      continue;
 +
 +              list_move(&page->lru, &h->hugepage_activelist);
 +              set_page_refcounted(page);
 +              h->free_huge_pages--;
 +              h->free_huge_pages_node[nid]--;
 +              return page;
        }
  
 -      /*
 -       * if 'non-isolated free hugepage' not found on the list,
 -       * the allocation fails.
 -       */
 -      if (&h->hugepage_freelists[nid] == &page->lru)
 -              return NULL;
 -      list_move(&page->lru, &h->hugepage_activelist);
 -      set_page_refcounted(page);
 -      h->free_huge_pages--;
 -      h->free_huge_pages_node[nid]--;
 -      return page;
 +      return NULL;
  }
  
  static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
@@@ -1500,9 -1511,9 +1500,9 @@@ static void prep_new_huge_page(struct h
  {
        INIT_LIST_HEAD(&page->lru);
        set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
 -      spin_lock(&hugetlb_lock);
        set_hugetlb_cgroup(page, NULL);
        set_hugetlb_cgroup_rsvd(page, NULL);
 +      spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
        spin_unlock(&hugetlb_lock);
@@@ -2412,7 -2423,7 +2412,7 @@@ struct page *alloc_huge_page(struct vm_
                        h->resv_huge_pages--;
                }
                spin_lock(&hugetlb_lock);
 -              list_move(&page->lru, &h->hugepage_activelist);
 +              list_add(&page->lru, &h->hugepage_activelist);
                /* Fall through */
        }
        hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
@@@ -3571,20 -3582,18 +3571,20 @@@ void hugetlb_report_meminfo(struct seq_
        seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
  }
  
 -int hugetlb_report_node_meminfo(int nid, char *buf)
 +int hugetlb_report_node_meminfo(char *buf, int len, int nid)
  {
        struct hstate *h = &default_hstate;
 +
        if (!hugepages_supported())
                return 0;
 -      return sprintf(buf,
 -              "Node %d HugePages_Total: %5u\n"
 -              "Node %d HugePages_Free:  %5u\n"
 -              "Node %d HugePages_Surp:  %5u\n",
 -              nid, h->nr_huge_pages_node[nid],
 -              nid, h->free_huge_pages_node[nid],
 -              nid, h->surplus_huge_pages_node[nid]);
 +
 +      return sysfs_emit_at(buf, len,
 +                           "Node %d HugePages_Total: %5u\n"
 +                           "Node %d HugePages_Free:  %5u\n"
 +                           "Node %d HugePages_Surp:  %5u\n",
 +                           nid, h->nr_huge_pages_node[nid],
 +                           nid, h->free_huge_pages_node[nid],
 +                           nid, h->surplus_huge_pages_node[nid]);
  }
  
  void hugetlb_show_meminfo(void)
@@@ -3790,23 -3799,23 +3790,23 @@@ bool is_hugetlb_entry_migration(pte_t p
        if (huge_pte_none(pte) || pte_present(pte))
                return false;
        swp = pte_to_swp_entry(pte);
 -      if (non_swap_entry(swp) && is_migration_entry(swp))
 +      if (is_migration_entry(swp))
                return true;
        else
                return false;
  }
  
 -static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 +static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
  {
        swp_entry_t swp;
  
        if (huge_pte_none(pte) || pte_present(pte))
 -              return 0;
 +              return false;
        swp = pte_to_swp_entry(pte);
 -      if (non_swap_entry(swp) && is_hwpoison_entry(swp))
 -              return 1;
 +      if (is_hwpoison_entry(swp))
 +              return true;
        else
 -              return 0;
 +              return false;
  }
  
  int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
@@@ -5339,16 -5348,10 +5339,16 @@@ void adjust_range_if_pmd_sharing_possib
   * !shared pmd case because we can allocate the pmd later as well, it makes the
   * code much cleaner.
   *
 - * This routine must be called with i_mmap_rwsem held in at least read mode.
 - * For hugetlbfs, this prevents removal of any page table entries associated
 - * with the address space.  This is important as we are setting up sharing
 - * based on existing page table entries (mappings).
 + * This routine must be called with i_mmap_rwsem held in at least read mode if
 + * sharing is possible.  For hugetlbfs, this prevents removal of any page
 + * table entries associated with the address space.  This is important as we
 + * are setting up sharing based on existing page table entries (mappings).
 + *
 + * NOTE: This routine is only called from huge_pte_alloc.  Some callers of
 + * huge_pte_alloc know that sharing is not possible and do not take
 + * i_mmap_rwsem as a performance optimization.  This is handled by the
 + * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
 + * only required for subsequent processing.
   */
  pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  {
        if (!vma_shareable(vma, addr))
                return (pte_t *)pmd_alloc(mm, pud, addr);
  
 +      i_mmap_assert_locked(mapping);
        vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
                if (svma == vma)
                        continue;
@@@ -5706,12 -5708,12 +5706,12 @@@ void __init hugetlb_cma_reserve(int ord
        reserved = 0;
        for_each_node_state(nid, N_ONLINE) {
                int res;
-               char name[20];
+               char name[CMA_MAX_NAME];
  
                size = min(per_node, hugetlb_cma_size - reserved);
                size = round_up(size, PAGE_SIZE << order);
  
-               snprintf(name, 20, "hugetlb%d", nid);
+               snprintf(name, sizeof(name), "hugetlb%d", nid);
                res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
                                                 0, false, name,
                                                 &hugetlb_cma[nid], nid);
diff --combined mm/memory.c
index f482af8bc828dc14c8518b99b15cc2f18a9cdb5c,7e4e8b81a73823851afeca2b9303fd63f9731343..2afb01ea1307650453e22abfee3e4c18bcf3f81a
@@@ -65,7 -65,6 +65,6 @@@
  #include <linux/gfp.h>
  #include <linux/migrate.h>
  #include <linux/string.h>
- #include <linux/dma-debug.h>
  #include <linux/debugfs.h>
  #include <linux/userfaultfd_k.h>
  #include <linux/dax.h>
@@@ -773,105 -772,15 +772,105 @@@ copy_nonpresent_pte(struct mm_struct *d
        return 0;
  }
  
 -static inline void
 -copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 -              pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
 -              unsigned long addr, int *rss)
 +/*
 + * Copy a present and normal page if necessary.
 + *
 + * NOTE! The usual case is that this doesn't need to do
 + * anything, and can just return a positive value. That
 + * will let the caller know that it can just increase
 + * the page refcount and re-use the pte the traditional
 + * way.
 + *
 + * But _if_ we need to copy it because it needs to be
 + * pinned in the parent (and the child should get its own
 + * copy rather than just a reference to the same page),
 + * we'll do that here and return zero to let the caller
 + * know we're done.
 + *
 + * And if we need a pre-allocated page but don't yet have
 + * one, return a negative error to let the preallocation
 + * code know so that it can do so outside the page table
 + * lock.
 + */
 +static inline int
 +copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 +                pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
 +                struct page **prealloc, pte_t pte, struct page *page)
  {
 -      unsigned long vm_flags = vma->vm_flags;
 +      struct mm_struct *src_mm = src_vma->vm_mm;
 +      struct page *new_page;
 +
 +      if (!is_cow_mapping(src_vma->vm_flags))
 +              return 1;
 +
 +      /*
 +       * What we want to do is to check whether this page may
 +       * have been pinned by the parent process.  If so,
 +       * instead of wrprotect the pte on both sides, we copy
 +       * the page immediately so that we'll always guarantee
 +       * the pinned page won't be randomly replaced in the
 +       * future.
 +       *
 +       * The page pinning checks are just "has this mm ever
 +       * seen pinning", along with the (inexact) check of
 +       * the page count. That might give false positives for
 +       * for pinning, but it will work correctly.
 +       */
 +      if (likely(!atomic_read(&src_mm->has_pinned)))
 +              return 1;
 +      if (likely(!page_maybe_dma_pinned(page)))
 +              return 1;
 +
 +      new_page = *prealloc;
 +      if (!new_page)
 +              return -EAGAIN;
 +
 +      /*
 +       * We have a prealloc page, all good!  Take it
 +       * over and copy the page & arm it.
 +       */
 +      *prealloc = NULL;
 +      copy_user_highpage(new_page, page, addr, src_vma);
 +      __SetPageUptodate(new_page);
 +      page_add_new_anon_rmap(new_page, dst_vma, addr, false);
 +      lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
 +      rss[mm_counter(new_page)]++;
 +
 +      /* All done, just insert the new page copy in the child */
 +      pte = mk_pte(new_page, dst_vma->vm_page_prot);
 +      pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma);
 +      set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
 +      return 0;
 +}
 +
 +/*
 + * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
 + * is required to copy this pte.
 + */
 +static inline int
 +copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 +               pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
 +               struct page **prealloc)
 +{
 +      struct mm_struct *src_mm = src_vma->vm_mm;
 +      unsigned long vm_flags = src_vma->vm_flags;
        pte_t pte = *src_pte;
        struct page *page;
  
 +      page = vm_normal_page(src_vma, addr, pte);
 +      if (page) {
 +              int retval;
 +
 +              retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
 +                                         addr, rss, prealloc, pte, page);
 +              if (retval <= 0)
 +                      return retval;
 +
 +              get_page(page);
 +              page_dup_rmap(page, false);
 +              rss[mm_counter(page)]++;
 +      }
 +
        /*
         * If it's a COW mapping, write protect it both
         * in the parent and the child
        if (!(vm_flags & VM_UFFD_WP))
                pte = pte_clear_uffd_wp(pte);
  
 -      page = vm_normal_page(vma, addr, pte);
 -      if (page) {
 -              get_page(page);
 -              page_dup_rmap(page, false);
 -              rss[mm_counter(page)]++;
 +      set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
 +      return 0;
 +}
 +
 +static inline struct page *
 +page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
 +                 unsigned long addr)
 +{
 +      struct page *new_page;
 +
 +      new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
 +      if (!new_page)
 +              return NULL;
 +
 +      if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
 +              put_page(new_page);
 +              return NULL;
        }
 +      cgroup_throttle_swaprate(new_page, GFP_KERNEL);
  
 -      set_pte_at(dst_mm, addr, dst_pte, pte);
 +      return new_page;
  }
  
 -static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 -                 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
 -                 unsigned long addr, unsigned long end)
 +static int
 +copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 +             pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 +             unsigned long end)
  {
 +      struct mm_struct *dst_mm = dst_vma->vm_mm;
 +      struct mm_struct *src_mm = src_vma->vm_mm;
        pte_t *orig_src_pte, *orig_dst_pte;
        pte_t *src_pte, *dst_pte;
        spinlock_t *src_ptl, *dst_ptl;
 -      int progress = 0;
 +      int progress, ret = 0;
        int rss[NR_MM_COUNTERS];
        swp_entry_t entry = (swp_entry_t){0};
 +      struct page *prealloc = NULL;
  
  again:
 +      progress = 0;
        init_rss_vec(rss);
  
        dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
 -      if (!dst_pte)
 -              return -ENOMEM;
 +      if (!dst_pte) {
 +              ret = -ENOMEM;
 +              goto out;
 +      }
        src_pte = pte_offset_map(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                if (unlikely(!pte_present(*src_pte))) {
                        entry.val = copy_nonpresent_pte(dst_mm, src_mm,
                                                        dst_pte, src_pte,
 -                                                      vma, addr, rss);
 +                                                      src_vma, addr, rss);
                        if (entry.val)
                                break;
                        progress += 8;
                        continue;
                }
 -              copy_present_pte(dst_mm, src_mm, dst_pte, src_pte,
 -                               vma, addr, rss);
 +              /* copy_present_pte() will clear `*prealloc' if consumed */
 +              ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
 +                                     addr, rss, &prealloc);
 +              /*
 +               * If we need a pre-allocated page for this pte, drop the
 +               * locks, allocate, and try again.
 +               */
 +              if (unlikely(ret == -EAGAIN))
 +                      break;
 +              if (unlikely(prealloc)) {
 +                      /*
 +                       * pre-alloc page cannot be reused by next time so as
 +                       * to strictly follow mempolicy (e.g., alloc_page_vma()
 +                       * will allocate page according to address).  This
 +                       * could only happen if one pinned pte changed.
 +                       */
 +                      put_page(prealloc);
 +                      prealloc = NULL;
 +              }
                progress += 8;
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
  
        cond_resched();
  
        if (entry.val) {
 -              if (add_swap_count_continuation(entry, GFP_KERNEL) < 0)
 +              if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
 +                      ret = -ENOMEM;
 +                      goto out;
 +              }
 +              entry.val = 0;
 +      } else if (ret) {
 +              WARN_ON_ONCE(ret != -EAGAIN);
 +              prealloc = page_copy_prealloc(src_mm, src_vma, addr);
 +              if (!prealloc)
                        return -ENOMEM;
 -              progress = 0;
 +              /* We've captured and resolved the error. Reset, try again. */
 +              ret = 0;
        }
        if (addr != end)
                goto again;
 -      return 0;
 +out:
 +      if (unlikely(prealloc))
 +              put_page(prealloc);
 +      return ret;
  }
  
 -static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 -              pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
 -              unsigned long addr, unsigned long end)
 +static inline int
 +copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 +             pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
 +             unsigned long end)
  {
 +      struct mm_struct *dst_mm = dst_vma->vm_mm;
 +      struct mm_struct *src_mm = src_vma->vm_mm;
        pmd_t *src_pmd, *dst_pmd;
        unsigned long next;
  
                if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
                        || pmd_devmap(*src_pmd)) {
                        int err;
 -                      VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
 +                      VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
                        err = copy_huge_pmd(dst_mm, src_mm,
 -                                          dst_pmd, src_pmd, addr, vma);
 +                                          dst_pmd, src_pmd, addr, src_vma);
                        if (err == -ENOMEM)
                                return -ENOMEM;
                        if (!err)
                }
                if (pmd_none_or_clear_bad(src_pmd))
                        continue;
 -              if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
 -                                              vma, addr, next))
 +              if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
 +                                 addr, next))
                        return -ENOMEM;
        } while (dst_pmd++, src_pmd++, addr = next, addr != end);
        return 0;
  }
  
 -static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 -              p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma,
 -              unsigned long addr, unsigned long end)
 +static inline int
 +copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 +             p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
 +             unsigned long end)
  {
 +      struct mm_struct *dst_mm = dst_vma->vm_mm;
 +      struct mm_struct *src_mm = src_vma->vm_mm;
        pud_t *src_pud, *dst_pud;
        unsigned long next;
  
                if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
                        int err;
  
 -                      VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, vma);
 +                      VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
                        err = copy_huge_pud(dst_mm, src_mm,
 -                                          dst_pud, src_pud, addr, vma);
 +                                          dst_pud, src_pud, addr, src_vma);
                        if (err == -ENOMEM)
                                return -ENOMEM;
                        if (!err)
                }
                if (pud_none_or_clear_bad(src_pud))
                        continue;
 -              if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
 -                                              vma, addr, next))
 +              if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
 +                                 addr, next))
                        return -ENOMEM;
        } while (dst_pud++, src_pud++, addr = next, addr != end);
        return 0;
  }
  
 -static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 -              pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
 -              unsigned long addr, unsigned long end)
 +static inline int
 +copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
 +             pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
 +             unsigned long end)
  {
 +      struct mm_struct *dst_mm = dst_vma->vm_mm;
        p4d_t *src_p4d, *dst_p4d;
        unsigned long next;
  
                next = p4d_addr_end(addr, end);
                if (p4d_none_or_clear_bad(src_p4d))
                        continue;
 -              if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d,
 -                                              vma, addr, next))
 +              if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
 +                                 addr, next))
                        return -ENOMEM;
        } while (dst_p4d++, src_p4d++, addr = next, addr != end);
        return 0;
  }
  
 -int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 -              struct vm_area_struct *vma)
 +int
 +copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
  {
        pgd_t *src_pgd, *dst_pgd;
        unsigned long next;
 -      unsigned long addr = vma->vm_start;
 -      unsigned long end = vma->vm_end;
 +      unsigned long addr = src_vma->vm_start;
 +      unsigned long end = src_vma->vm_end;
 +      struct mm_struct *dst_mm = dst_vma->vm_mm;
 +      struct mm_struct *src_mm = src_vma->vm_mm;
        struct mmu_notifier_range range;
        bool is_cow;
        int ret;
         * readonly mappings. The tradeoff is that copy_page_range is more
         * efficient than faulting.
         */
 -      if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
 -                      !vma->anon_vma)
 +      if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
 +          !src_vma->anon_vma)
                return 0;
  
 -      if (is_vm_hugetlb_page(vma))
 -              return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 +      if (is_vm_hugetlb_page(src_vma))
 +              return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
  
 -      if (unlikely(vma->vm_flags & VM_PFNMAP)) {
 +      if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
                /*
                 * We do not free on error cases below as remove_vma
                 * gets called on error from higher level routine
                 */
 -              ret = track_pfn_copy(vma);
 +              ret = track_pfn_copy(src_vma);
                if (ret)
                        return ret;
        }
         * parent mm. And a permission downgrade will only happen if
         * is_cow_mapping() returns true.
         */
 -      is_cow = is_cow_mapping(vma->vm_flags);
 +      is_cow = is_cow_mapping(src_vma->vm_flags);
  
        if (is_cow) {
                mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
 -                                      0, vma, src_mm, addr, end);
 +                                      0, src_vma, src_mm, addr, end);
                mmu_notifier_invalidate_range_start(&range);
        }
  
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(src_pgd))
                        continue;
 -              if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd,
 -                                          vma, addr, next))) {
 +              if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
 +                                          addr, next))) {
                        ret = -ENOMEM;
                        break;
                }
@@@ -3594,7 -3444,7 +3593,7 @@@ static vm_fault_t __do_fault(struct vm_
         *                              unlock_page(A)
         * lock_page(B)
         *                              lock_page(B)
 -       * pte_alloc_pne
 +       * pte_alloc_one
         *   shrink_page_list
         *     wait_on_page_writeback(A)
         *                              SetPageWriteback(B)
         *                              # flush A, B to clear the writeback
         */
        if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
 -              vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
 +              vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
                if (!vmf->prealloc_pte)
                        return VM_FAULT_OOM;
                smp_wmb(); /* See comment in __pte_alloc() */
@@@ -3769,7 -3619,7 +3768,7 @@@ static vm_fault_t do_set_pmd(struct vm_
  
  /**
   * alloc_set_pte - setup new PTE entry for given page and add reverse page
 - * mapping. If needed, the fucntion allocates page table or use pre-allocated.
 + * mapping. If needed, the function allocates page table or use pre-allocated.
   *
   * @vmf: fault environment
   * @page: page to map
This page took 0.435697 seconds and 4 git commands to generate.