]> Git Repo - linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <[email protected]>
Thu, 2 Sep 2021 21:47:21 +0000 (14:47 -0700)
committerLinus Torvalds <[email protected]>
Thu, 2 Sep 2021 21:47:21 +0000 (14:47 -0700)
Pull rdma updates from Jason Gunthorpe:
 "This is quite a small cycle, no major series stands out. The HNS and
  rxe drivers saw the most activity this cycle, with rxe being broken
  for a good chunk of time. The significant deleted line count is due to
  a SPDX cleanup series.

  Summary:

   - Various cleanup and small features for rtrs

   - kmap_local_page() conversions

   - Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns

   - Cache the IB subnet prefix

   - Rework how CRC is calcuated in rxe

   - Clean reference counting in iwpm's netlink

   - Pull object allocation and lifecycle for user QPs to the uverbs
     core code

   - Several small hns features and continued general code cleanups

   - Fix the scatterlist confusion of orig_nents/nents introduced in an
     earlier patch creating the append operation"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (90 commits)
  RDMA/mlx5: Relax DCS QP creation checks
  RDMA/hns: Delete unnecessary blank lines.
  RDMA/hns: Encapsulate the qp db as a function
  RDMA/hns: Adjust the order in which irq are requested and enabled
  RDMA/hns: Remove RST2RST error prints for hw v1
  RDMA/hns: Remove dqpn filling when modify qp from Init to Init
  RDMA/hns: Fix QP's resp incomplete assignment
  RDMA/hns: Fix query destination qpn
  RDMA/hfi1: Convert to SPDX identifier
  IB/rdmavt: Convert to SPDX identifier
  RDMA/hns: Bugfix for incorrect association between dip_idx and dgid
  RDMA/hns: Bugfix for the missing assignment for dip_idx
  RDMA/hns: Bugfix for data type of dip_idx
  RDMA/hns: Fix incorrect lsn field
  RDMA/irdma: Remove the repeated declaration
  RDMA/core/sa_query: Retry SA queries
  RDMA: Use the sg_table directly and remove the opencoded version from umem
  lib/scatterlist: Fix wrong update of orig_nents
  lib/scatterlist: Provide a dedicated function to support table append
  RDMA/hns: Delete unused hns bitmap interface
  ...

1  2 
MAINTAINERS
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/efa/efa_main.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/mlx5/main.c

diff --combined MAINTAINERS
index ad456ca8f7405a4be992e44bbbab474f649f326f,b25f14a04917970e8daeaa2c9dcf8ac8323ca0b5..fb1c48c340092c63ed99633e611b0e53312a4d38
@@@ -459,12 -459,6 +459,12 @@@ S:       Maintaine
  W:    https://parisc.wiki.kernel.org/index.php/AD1889
  F:    sound/pci/ad1889.*
  
 +AD5110 ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
 +M:    Mugilraj Dhavachelvan <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/iio/potentiometer/ad5110.c
 +
  AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
  M:    Michael Hennerich <[email protected]>
  S:    Supported
@@@ -798,7 -792,7 +798,7 @@@ F: Documentation/devicetree/bindings/i2
  F:    drivers/i2c/busses/i2c-altera.c
  
  ALTERA MAILBOX DRIVER
 -M:    Ley Foon Tan <ley.foon.tan@intel.com>
 +M:    Joyce Ooi <joyce.ooi@intel.com>
  S:    Maintained
  F:    drivers/mailbox/mailbox-altera.c
  
  S:    Maintained
  F:    drivers/media/i2c/aptina-pll.*
  
 +AQUACOMPUTER D5 NEXT PUMP SENSOR DRIVER
 +M:    Aleksa Savic <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/hwmon/aquacomputer_d5next.rst
 +F:    drivers/hwmon/aquacomputer_d5next.c
 +
  AQUANTIA ETHERNET DRIVER (atlantic)
  M:    Igor Russkikh <[email protected]>
  L:    [email protected]
@@@ -1395,7 -1382,7 +1395,7 @@@ F:      Documentation/devicetree/bindings/ar
  F:    Documentation/devicetree/bindings/arm/arm,realview.yaml
  F:    Documentation/devicetree/bindings/arm/arm,versatile.yaml
  F:    Documentation/devicetree/bindings/arm/arm,vexpress-juno.yaml
 -F:    Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
 +F:    Documentation/devicetree/bindings/auxdisplay/arm,versatile-lcd.yaml
  F:    Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
  F:    Documentation/devicetree/bindings/i2c/i2c-versatile.txt
  F:    Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
@@@ -1501,7 -1488,7 +1501,7 @@@ M:      Miquel Raynal <miquel.raynal@bootlin
  M:    Naga Sureshkumar Relli <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/devicetree/bindings/mtd/arm,pl353-smc.yaml
 +F:    Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml
  F:    drivers/memory/pl353-smc.c
  
  ARM PRIMECELL CLCD PL110 DRIVER
@@@ -1703,7 -1690,7 +1703,7 @@@ L:      [email protected]
  S:    Maintained
  W:    https://asahilinux.org
  B:    https://github.com/AsahiLinux/linux/issues
 -C:    irc://chat.freenode.net/asahi-dev
 +C:    irc://irc.oftc.net/asahi-dev
  T:    git https://github.com/AsahiLinux/linux.git
  F:    Documentation/devicetree/bindings/arm/apple.yaml
  F:    Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
@@@ -2023,12 -2010,10 +2023,12 @@@ M:   Krzysztof Halasa <[email protected]
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
 +F:    Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.yaml
  F:    Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt
  F:    Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
  F:    Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
  F:    arch/arm/mach-ixp4xx/
 +F:    drivers/bus/intel-ixp4xx-eb.c
  F:    drivers/clocksource/timer-ixp4xx.c
  F:    drivers/crypto/ixp4xx_crypto.c
  F:    drivers/gpio/gpio-ixp4xx.c
@@@ -2857,7 -2842,7 +2857,7 @@@ AS3645A LED FLASH CONTROLLER DRIVE
  M:    Sakari Ailus <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/leds/leds-as3645a.c
 +F:    drivers/leds/flash/leds-as3645a.c
  
  ASAHI KASEI AK7375 LENS VOICE COIL DRIVER
  M:    Tianshu Qiu <[email protected]>
@@@ -3212,7 -3197,7 +3212,7 @@@ S:      Maintaine
  W:    https://www.open-mesh.org/
  Q:    https://patchwork.open-mesh.org/project/batman/list/
  B:    https://www.open-mesh.org/projects/batman-adv/issues
 -C:    irc://chat.freenode.net/batman
 +C:    ircs://irc.hackint.org/batadv
  T:    git https://git.open-mesh.org/linux-merge.git
  F:    Documentation/networking/batman-adv.rst
  F:    include/uapi/linux/batadv_packet.h
@@@ -3424,6 -3409,7 +3424,6 @@@ F:      drivers/net/ethernet/netronome/nfp/b
  
  BPF JIT for POWERPC (32-BIT AND 64-BIT)
  M:    Naveen N. Rao <[email protected]>
 -M:    Sandipan Das <[email protected]>
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
@@@ -3869,7 -3855,7 +3869,7 @@@ M:      Markus Mayer <[email protected]
  M:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.txt
 +F:    Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
  F:    drivers/memory/brcmstb_dpfe.c
  
  BROADCOM STB NAND FLASH DRIVER
  S:    Maintained
  F:    drivers/mtd/nand/raw/brcmnand/
  
 +BROADCOM STB PCIE DRIVER
 +M:    Jim Quinlan <[email protected]>
 +M:    Nicolas Saenz Julienne <[email protected]>
 +M:    Florian Fainelli <[email protected]>
 +M:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
 +F:    drivers/pci/controller/pcie-brcmstb.c
 +
  BROADCOM SYSTEMPORT ETHERNET DRIVER
  M:    Florian Fainelli <[email protected]>
  L:    [email protected]
@@@ -4522,7 -4498,7 +4522,7 @@@ L:      [email protected]
  S:    Supported
  W:    https://clangbuiltlinux.github.io/
  B:    https://github.com/ClangBuiltLinux/linux/issues
 -C:    irc://chat.freenode.net/clangbuiltlinux
 +C:    irc://irc.libera.chat/clangbuiltlinux
  F:    Documentation/kbuild/llvm.rst
  F:    include/linux/compiler-clang.h
  F:    scripts/clang-tools/
@@@ -4634,7 -4610,7 +4634,7 @@@ F:      include/linux/clk
  F:    include/linux/of_clk.h
  X:    drivers/clk/clkdev.c
  
 -COMMON INTERNET FILE SYSTEM (CIFS)
 +COMMON INTERNET FILE SYSTEM CLIENT (CIFS)
  M:    Steve French <[email protected]>
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
@@@ -4643,7 -4619,6 +4643,7 @@@ W:      http://linux-cifs.samba.org
  T:    git git://git.samba.org/sfrench/cifs-2.6.git
  F:    Documentation/admin-guide/cifs/
  F:    fs/cifs/
 +F:    fs/cifs_common/
  
  COMPACTPCI HOTPLUG CORE
  M:    Scott Murray <[email protected]>
@@@ -5595,7 -5570,7 +5595,7 @@@ M:      Lukasz Luba <[email protected]
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/devicetree/bindings/memory-controllers/exynos5422-dmc.txt
 +F:    Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
  F:    drivers/memory/samsung/exynos5422-dmc.c
  
  DME1737 HARDWARE MONITOR DRIVER
@@@ -5709,7 -5684,6 +5709,7 @@@ DPAA2 ETHERNET SWITCH DRIVE
  M:    Ioana Ciornei <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst
  F:    drivers/net/ethernet/freescale/dpaa2/dpaa2-switch*
  F:    drivers/net/ethernet/freescale/dpaa2/dpsw*
  
@@@ -5733,11 -5707,6 +5733,11 @@@ F:    Documentation/admin-guide/blockdev
  F:    drivers/block/drbd/
  F:    lib/lru_cache.c
  
 +DRIVER COMPONENT FRAMEWORK
 +L:    [email protected]
 +F:    drivers/base/component.c
 +F:    include/linux/component.h
 +
  DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
  M:    Greg Kroah-Hartman <[email protected]>
  R:    "Rafael J. Wysocki" <[email protected]>
@@@ -5801,7 -5770,7 +5801,7 @@@ M:      Gerd Hoffmann <[email protected]
  L:    [email protected]
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
 -F:    drivers/gpu/drm/bochs/
 +F:    drivers/gpu/drm/tiny/bochs.c
  
  DRM DRIVER FOR BOE HIMAX8279D PANELS
  M:    Jerry Han <[email protected]>
@@@ -5986,13 -5955,6 +5986,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
  F:    drivers/gpu/drm/panel/panel-raydium-rm67191.c
  
 +DRM DRIVER FOR SAMSUNG DB7430 PANELS
 +M:    Linus Walleij <[email protected]>
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
 +F:    drivers/gpu/drm/panel/panel-samsung-db7430.c
 +
  DRM DRIVER FOR SITRONIX ST7703 PANELS
  M:    Guido Günther <[email protected]>
  R:    Purism Kernel Team <[email protected]>
@@@ -6091,27 -6053,21 +6091,27 @@@ F:   drivers/gpu/drm/vboxvideo
  
  DRM DRIVER FOR VMWARE VIRTUAL GPU
  M:    "VMware Graphics" <[email protected]>
 -M:    Roland Scheidegger <[email protected]>
  M:    Zack Rusin <[email protected]>
  L:    [email protected]
  S:    Supported
 -T:    git git://people.freedesktop.org/~sroland/linux
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    drivers/gpu/drm/vmwgfx/
  F:    include/uapi/drm/vmwgfx_drm.h
  
 +DRM DRIVER FOR WIDECHIPS WS2401 PANELS
 +M:    Linus Walleij <[email protected]>
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
 +F:    drivers/gpu/drm/panel/panel-widechips-ws2401.c
 +
  DRM DRIVERS
  M:    David Airlie <[email protected]>
  M:    Daniel Vetter <[email protected]>
  L:    [email protected]
  S:    Maintained
  B:    https://gitlab.freedesktop.org/drm
 -C:    irc://chat.freenode.net/dri-devel
 +C:    irc://irc.oftc.net/dri-devel
  T:    git git://anongit.freedesktop.org/drm/drm
  F:    Documentation/devicetree/bindings/display/
  F:    Documentation/devicetree/bindings/gpu/
@@@ -6604,7 -6560,6 +6604,7 @@@ EDAC-ARMAD
  M:    Jan Luebbe <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
  F:    drivers/edac/armada_xp_*
  
  EDAC-AST2500
@@@ -6949,12 -6904,6 +6949,12 @@@ M:    Mark Einon <[email protected]
  S:    Odd Fixes
  F:    drivers/net/ethernet/agere/
  
 +ETAS ES58X CAN/USB DRIVER
 +M:    Vincent Mailhol <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/net/can/usb/etas_es58x/
 +
  ETHERNET BRIDGE
  M:    Roopa Prabhu <[email protected]>
  M:    Nikolay Aleksandrov <[email protected]>
@@@ -6996,7 -6945,7 +6996,7 @@@ F:      include/uapi/linux/mdio.
  F:    include/uapi/linux/mii.h
  
  EXFAT FILE SYSTEM
 -M:    Namjae Jeon <[email protected]>
 +M:    Namjae Jeon <[email protected]>
  M:    Sungjong Seo <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -8446,7 -8395,7 +8446,7 @@@ F:      drivers/crypto/hisilicon/sgl.
  F:    drivers/crypto/hisilicon/zip/
  
  HISILICON ROCE DRIVER
- M:    Lijun Ou <oulijun@huawei.com>
+ M:    Wenpeng Liang <liangwenpeng@huawei.com>
  M:    Weihang Li <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -8484,12 -8433,10 +8484,12 @@@ S:   Maintaine
  F:    Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
  F:    drivers/spmi/hisi-spmi-controller.c
  
 -HISILICON STAGING DRIVERS FOR HIKEY 960/970
 +HISILICON SPMI PMIC DRIVER FOR HIKEY 6421v600
  M:    Mauro Carvalho Chehab <[email protected]>
 +L:    [email protected]
  S:    Maintained
 -F:    drivers/staging/hikey9xx/
 +F:    Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
 +F:    drivers/mfd/hi6421-spmi-pmic.c
  
  HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
  M:    Zaibo Xu <[email protected]>
@@@ -8648,9 -8595,6 +8648,9 @@@ T:      git git://git.kernel.org/pub/scm/lin
  F:    Documentation/ABI/stable/sysfs-bus-vmbus
  F:    Documentation/ABI/testing/debugfs-hyperv
  F:    Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst
 +F:    arch/arm64/hyperv
 +F:    arch/arm64/include/asm/hyperv-tlfs.h
 +F:    arch/arm64/include/asm/mshyperv.h
  F:    arch/x86/hyperv
  F:    arch/x86/include/asm/hyperv-tlfs.h
  F:    arch/x86/include/asm/mshyperv.h
@@@ -9097,7 -9041,7 +9097,7 @@@ F:      drivers/usb/atm/ueagle-atm.
  IMGTEC ASCII LCD DRIVER
  M:    Paul Burton <[email protected]>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
 +F:    Documentation/devicetree/bindings/auxdisplay/img,ascii-lcd.yaml
  F:    drivers/auxdisplay/img-ascii-lcd.c
  
  IMGTEC IR DECODER DRIVER
@@@ -9269,20 -9213,13 +9269,20 @@@ INTEL ATOMISP2 DUMMY / POWER-MANAGEMEN
  M:    Hans de Goede <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel_atomisp2_pm.c
 +F:    drivers/platform/x86/intel/atomisp2/pm.c
  
  INTEL ATOMISP2 LED DRIVER
  M:    Hans de Goede <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel_atomisp2_led.c
 +F:    drivers/platform/x86/intel/atomisp2/led.c
 +
 +INTEL BIOS SAR INT1092 DRIVER
 +M:    Shravan S <[email protected]>
 +M:    Intel Corporation <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/platform/x86/intel/int1092/
  
  INTEL BROXTON PMC DRIVER
  M:    Mika Westerberg <[email protected]>
@@@ -9314,7 -9251,7 +9314,7 @@@ S:      Supporte
  W:    https://01.org/linuxgraphics/
  Q:    http://patchwork.freedesktop.org/project/intel-gfx/
  B:    https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
 -C:    irc://chat.freenode.net/intel-gfx
 +C:    irc://irc.oftc.net/intel-gfx
  T:    git git://anongit.freedesktop.org/drm-intel
  F:    Documentation/gpu/i915.rst
  F:    drivers/gpu/drm/i915/
@@@ -9378,7 -9315,7 +9378,7 @@@ INTEL HID EVENT DRIVE
  M:    Alex Hung <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel-hid.c
 +F:    drivers/platform/x86/intel/hid.c
  
  INTEL I/OAT DMA DRIVER
  M:    Dave Jiang <[email protected]>
@@@ -9522,17 -9459,17 +9522,17 @@@ F:   include/linux/mfd/intel-m10-bmc.
  
  INTEL MENLOW THERMAL DRIVER
  M:    Sujith Thomas <[email protected]>
 -L:    platform-driver-x86@vger.kernel.org
 +L:    linux-pm@vger.kernel.org
  S:    Supported
  W:    https://01.org/linux-acpi
 -F:    drivers/platform/x86/intel_menlow.c
 +F:    drivers/thermal/intel/intel_menlow.c
  
  INTEL P-Unit IPC DRIVER
  M:    Zha Qipeng <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    arch/x86/include/asm/intel_punit_ipc.h
 -F:    drivers/platform/x86/intel_punit_ipc.c
 +F:    drivers/platform/x86/intel/punit_ipc.c
  
  INTEL PMC CORE DRIVER
  M:    Rajneesh Bhardwaj <[email protected]>
@@@ -9540,7 -9477,7 +9540,7 @@@ M:      David E Box <[email protected]
  L:    [email protected]
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-platform-intel-pmc
 -F:    drivers/platform/x86/intel_pmc_core*
 +F:    drivers/platform/x86/intel/pmc/
  
  INTEL PMIC GPIO DRIVERS
  M:    Andy Shevchenko <[email protected]>
@@@ -9558,7 -9495,7 +9558,7 @@@ INTEL PMT DRIVE
  M:    "David E. Box" <[email protected]>
  S:    Maintained
  F:    drivers/mfd/intel_pmt.c
 -F:    drivers/platform/x86/intel_pmt_*
 +F:    drivers/platform/x86/intel/pmt/
  
  INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
  M:    Stanislav Yakovlev <[email protected]>
@@@ -9595,7 -9532,7 +9595,7 @@@ INTEL SPEED SELECT TECHNOLOG
  M:    Srinivas Pandruvada <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel_speed_select_if/
 +F:    drivers/platform/x86/intel/speed_select_if/
  F:    include/uapi/linux/isst_if.h
  F:    tools/power/x86/intel-speed-select/
  
@@@ -9616,19 -9553,19 +9616,19 @@@ M:   "David E. Box" <[email protected]
  L:    [email protected]
  S:    Maintained
  F:    arch/x86/include/asm/intel_telemetry.h
 -F:    drivers/platform/x86/intel_telemetry*
 +F:    drivers/platform/x86/intel/telemetry/
  
  INTEL UNCORE FREQUENCY CONTROL
  M:    Srinivas Pandruvada <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel-uncore-frequency.c
 +F:    drivers/platform/x86/intel/uncore-frequency.c
  
  INTEL VIRTUAL BUTTON DRIVER
  M:    AceLan Kao <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel-vbtn.c
 +F:    drivers/platform/x86/intel/vbtn.c
  
  INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
  M:    Stanislaw Gruszka <[email protected]>
@@@ -9649,12 -9586,12 +9649,12 @@@ M:   Jithu Joseph <[email protected]
  R:    Maurice Ma <[email protected]>
  S:    Maintained
  W:    https://slimbootloader.github.io/security/firmware-update.html
 -F:    drivers/platform/x86/intel-wmi-sbl-fw-update.c
 +F:    drivers/platform/x86/intel/wmi/sbl-fw-update.c
  
  INTEL WMI THUNDERBOLT FORCE POWER DRIVER
  L:    [email protected]
  S:    Maintained
 -F:    drivers/platform/x86/intel-wmi-thunderbolt.c
 +F:    drivers/platform/x86/intel/wmi/thunderbolt.c
  
  INTEL WWAN IOSM DRIVER
  M:    M Chetan Kumar <[email protected]>
@@@ -9812,6 -9749,11 +9812,6 @@@ M:     David Sterba <[email protected]
  S:    Odd Fixes
  F:    drivers/tty/ipwireless/
  
 -IPX NETWORK LAYER
 -L:    [email protected]
 -S:    Obsolete
 -F:    include/uapi/linux/ipx.h
 -
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
  M:    Marc Zyngier <[email protected]>
  S:    Maintained
@@@ -10161,17 -10103,6 +10161,17 @@@ T: git git://git.kernel.org/pub/scm/lin
  F:    Documentation/dev-tools/kselftest*
  F:    tools/testing/selftests/
  
 +KERNEL SMB3 SERVER (KSMBD)
 +M:    Namjae Jeon <[email protected]>
 +M:    Sergey Senozhatsky <[email protected]>
 +M:    Steve French <[email protected]>
 +M:    Hyunchul Lee <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://git.samba.org/ksmbd.git
 +F:    fs/cifs_common/
 +F:    fs/ksmbd/
 +
  KERNEL UNIT TESTING FRAMEWORK (KUnit)
  M:    Brendan Higgins <[email protected]>
  L:    [email protected]
@@@ -10457,7 -10388,6 +10457,7 @@@ F:   net/core/skmsg.
  F:    net/core/sock_map.c
  F:    net/ipv4/tcp_bpf.c
  F:    net/ipv4/udp_bpf.c
 +F:    net/unix/unix_bpf.c
  
  LANDLOCK SECURITY MODULE
  M:    Mickaël Salaün <[email protected]>
@@@ -10679,6 -10609,15 +10679,6 @@@ F:  LICENSES
  F:    scripts/spdxcheck-test.sh
  F:    scripts/spdxcheck.py
  
 -LIGHTNVM PLATFORM SUPPORT
 -M:    Matias Bjorling <[email protected]>
 -L:    [email protected]
 -S:    Maintained
 -W:    http://github/OpenChannelSSD
 -F:    drivers/lightnvm/
 -F:    include/linux/lightnvm.h
 -F:    include/uapi/linux/lightnvm.h
 -
  LINEAR RANGES HELPERS
  M:    Mark Brown <[email protected]>
  R:    Matti Vaittinen <[email protected]>
@@@ -11091,18 -11030,6 +11091,18 @@@ F: drivers/mailbox/arm_mhuv2.
  F:    include/linux/mailbox/arm_mhuv2_message.h
  F:    Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
  
 +MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
 +M:    Jeremy Kerr <[email protected]>
 +M:    Matt Johnston <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/networking/mctp.rst
 +F:    drivers/net/mctp/
 +F:    include/net/mctp.h
 +F:    include/net/mctpdevice.h
 +F:    include/net/netns/mctp.h
 +F:    net/mctp/
 +
  MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
  M:    Michael Kerrisk <[email protected]>
  L:    [email protected]
@@@ -11400,12 -11327,6 +11400,12 @@@ W: https://linuxtv.or
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/radio/radio-maxiradio*
  
 +MAXLINEAR ETHERNET PHY DRIVER
 +M:    Xu Liang <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/net/phy/mxl-gpy.c
 +
  MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
  R:    Yasushi SHOJI <[email protected]>
  L:    [email protected]
@@@ -13859,15 -13780,6 +13859,15 @@@ T: git git://linuxtv.org/media_tree.gi
  F:    Documentation/devicetree/bindings/media/i2c/ov8856.yaml
  F:    drivers/media/i2c/ov8856.c
  
 +OMNIVISION OV9282 SENSOR DRIVER
 +M:    Paul J. Murphy <[email protected]>
 +M:    Daniele Alessandrelli <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
 +F:    drivers/media/i2c/ov9282.c
 +
  OMNIVISION OV9640 SENSOR DRIVER
  M:    Petr Cvek <[email protected]>
  L:    [email protected]
@@@ -13958,12 -13870,6 +13958,12 @@@ F: Documentation/devicetree
  F:    arch/*/boot/dts/
  F:    include/dt-bindings/
  
 +OPENCOMPUTE PTP CLOCK DRIVER
 +M:    Jonathan Lemon <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/ptp/ptp_ocp.c
 +
  OPENCORES I2C BUS DRIVER
  M:    Peter Korsgaard <[email protected]>
  M:    Andrew Lunn <[email protected]>
@@@ -14289,7 -14195,7 +14289,7 @@@ M:   Lucas Stach <[email protected]
  L:    [email protected]
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
 +F:    Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
  F:    drivers/pci/controller/dwc/*imx6*
  
  PCI DRIVER FOR FU740
@@@ -14377,8 -14283,7 +14377,8 @@@ M:   Jingoo Han <[email protected]
  M:    Gustavo Pimentel <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/devicetree/bindings/pci/designware-pcie.txt
 +F:    Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
 +F:    Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
  F:    drivers/pci/controller/dwc/*designware*
  
  PCI DRIVER FOR TI DRA7XX/J721E
@@@ -14515,7 -14420,7 +14515,7 @@@ M:   Xiaowei Song <songxiaowei@hisilicon.
  M:    Binghui Wang <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/devicetree/bindings/pci/kirin-pcie.txt
 +F:    Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
  F:    drivers/pci/controller/dwc/pcie-kirin.c
  
  PCIE DRIVER FOR HISILICON STB
@@@ -14525,13 -14430,6 +14525,13 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
  F:    drivers/pci/controller/dwc/pcie-histb.c
  
 +PCIE DRIVER FOR INTEL LGM GW SOC
 +M:    Rahul Tanwar <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
 +F:    drivers/pci/controller/dwc/pcie-intel-gw.c
 +
  PCIE DRIVER FOR MEDIATEK
  M:    Ryder Lee <[email protected]>
  M:    Jianjun Wang <[email protected]>
@@@ -14727,12 -14625,6 +14727,12 @@@ F: Documentation/driver-api/pin-control
  F:    drivers/pinctrl/
  F:    include/linux/pinctrl/
  
 +PIN CONTROLLER - AMD
 +M:    Basavaraj Natikar <[email protected]>
 +M:    Shyam Sundar S K <[email protected]>
 +S:    Maintained
 +F:    drivers/pinctrl/pinctrl-amd.c
 +
  PIN CONTROLLER - FREESCALE
  M:    Dong Aisheng <[email protected]>
  M:    Fabio Estevam <[email protected]>
@@@ -14751,19 -14643,12 +14751,19 @@@ S:        Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
  F:    drivers/pinctrl/intel/
  
 +PIN CONTROLLER - KEEMBAY
 +M:    Lakshmi Sowjanya D <[email protected]>
 +S:    Supported
 +F:    drivers/pinctrl/pinctrl-keembay*
 +
  PIN CONTROLLER - MEDIATEK
  M:    Sean Wang <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/devicetree/bindings/pinctrl/pinctrl-mt65xx.txt
 -F:    Documentation/devicetree/bindings/pinctrl/pinctrl-mt7622.txt
 +F:    Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
 +F:    Documentation/devicetree/bindings/pinctrl/mediatek,mt6797-pinctrl.yaml
 +F:    Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
 +F:    Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
  F:    drivers/pinctrl/mediatek/
  
  PIN CONTROLLER - MICROCHIP AT91
@@@ -15041,10 -14926,12 +15041,10 @@@ S:        Maintaine
  F:    include/linux/printk.h
  F:    kernel/printk/
  
 -PRISM54 WIRELESS DRIVER
 -M:    Luis Chamberlain <[email protected]>
 -L:    [email protected]
 -S:    Obsolete
 -W:    https://wireless.wiki.kernel.org/en/users/Drivers/p54
 -F:    drivers/net/wireless/intersil/prism54/
 +PRINTK INDEXING
 +R:    Chris Down <[email protected]>
 +S:    Maintained
 +F:    kernel/printk/index.c
  
  PROC FILESYSTEM
  L:    [email protected]
  S:    Maintained
  F:    drivers/phy/renesas/phy-rcar-gen3-usb*.c
  
 +RENESAS RZ/G2L A/D DRIVER
 +M:    Lad Prabhakar <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Supported
 +F:    Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
 +F:    drivers/iio/adc/rzg2l_adc.c
 +
  RESET CONTROLLER FRAMEWORK
  M:    Philipp Zabel <[email protected]>
  S:    Maintained
@@@ -16439,7 -16318,7 +16439,7 @@@ SAMSUNG EXYNOS TRUE RANDOM NUMBER GENER
  M:    Łukasz Stelmach <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.txt
 +F:    Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
  F:    drivers/char/hw_random/exynos-trng.c
  
  SAMSUNG FRAMEBUFFER DRIVER
@@@ -16532,14 -16411,10 +16532,14 @@@ L:        [email protected]
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
  F:    Documentation/devicetree/bindings/clock/exynos*.txt
 +F:    Documentation/devicetree/bindings/clock/samsung,*.yaml
  F:    Documentation/devicetree/bindings/clock/samsung,s3c*
  F:    Documentation/devicetree/bindings/clock/samsung,s5p*
  F:    drivers/clk/samsung/
  F:    include/dt-bindings/clock/exynos*.h
 +F:    include/dt-bindings/clock/s3c*.h
 +F:    include/dt-bindings/clock/s5p*.h
 +F:    include/dt-bindings/clock/samsung,*.h
  F:    include/linux/clk/samsung.h
  F:    include/linux/platform_data/clk-s3c2410.h
  
@@@ -16581,12 -16456,6 +16581,12 @@@ F: drivers/phy/samsung/phy-s5pv210-usb2
  F:    drivers/phy/samsung/phy-samsung-usb2.c
  F:    drivers/phy/samsung/phy-samsung-usb2.h
  
 +SANCLOUD BEAGLEBONE ENHANCED DEVICE TREE
 +M:    Paul Barker <[email protected]>
 +R:    Marc Murphy <[email protected]>
 +S:    Supported
 +F:    arch/arm/boot/dts/am335x-sancloud*
 +
  SC1200 WDT DRIVER
  M:    Zwane Mwaikambo <[email protected]>
  S:    Maintained
@@@ -16846,12 -16715,6 +16846,12 @@@ F: drivers/iio/chemical/scd30_core.
  F:    drivers/iio/chemical/scd30_i2c.c
  F:    drivers/iio/chemical/scd30_serial.c
  
 +SENSIRION SGP40 GAS SENSOR DRIVER
 +M:    Andreas Klinger <[email protected]>
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
 +F:    drivers/iio/chemical/sgp40.c
 +
  SENSIRION SPS30 AIR POLLUTION SENSOR DRIVER
  M:    Tomasz Duszynski <[email protected]>
  S:    Maintained
@@@ -17430,15 -17293,6 +17430,15 @@@ T: git git://linuxtv.org/media_tree.gi
  F:    Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
  F:    drivers/media/i2c/imx334.c
  
 +SONY IMX335 SENSOR DRIVER
 +M:    Paul J. Murphy <[email protected]>
 +M:    Daniele Alessandrelli <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
 +F:    drivers/media/i2c/imx335.c
 +
  SONY IMX355 SENSOR DRIVER
  M:    Tianshu Qiu <[email protected]>
  L:    [email protected]
@@@ -17446,15 -17300,6 +17446,15 @@@ S: Maintaine
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/i2c/imx355.c
  
 +SONY IMX412 SENSOR DRIVER
 +M:    Paul J. Murphy <[email protected]>
 +M:    Daniele Alessandrelli <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
 +F:    drivers/media/i2c/imx412.c
 +
  SONY MEMORYSTICK SUBSYSTEM
  M:    Maxim Levitsky <[email protected]>
  M:    Alex Dubov <[email protected]>
@@@ -17774,9 -17619,8 +17774,9 @@@ F:   drivers/staging/olpc_dcon
  
  STAGING - REALTEK RTL8188EU DRIVERS
  M:    Larry Finger <[email protected]>
 -S:    Odd Fixes
 -F:    drivers/staging/rtl8188eu/
 +M:    Phillip Potter <[email protected]>
 +S:    Supported
 +F:    drivers/staging/r8188eu/
  
  STAGING - REALTEK RTL8712U DRIVERS
  M:    Larry Finger <[email protected]>
@@@ -18113,7 -17957,6 +18113,7 @@@ F:   drivers/regulator/scmi-regulator.
  F:    drivers/reset/reset-scmi.c
  F:    include/linux/sc[mp]i_protocol.h
  F:    include/trace/events/scmi.h
 +F:    include/uapi/linux/virtio_scmi.h
  
  SYSTEM RESET/SHUTDOWN DRIVERS
  M:    Sebastian Reichel <[email protected]>
@@@ -18964,14 -18807,6 +18964,14 @@@ F: arch/x86/mm/testmmiotrace.
  F:    include/linux/mmiotrace.h
  F:    kernel/trace/trace_mmiotrace.c
  
 +TRADITIONAL CHINESE DOCUMENTATION
 +M:    Hu Haowen <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +W:    https://github.com/srcres258/linux-doc
 +T:    git git://github.com/srcres258/linux-doc.git doc-zh-tw
 +F:    Documentation/translations/zh_TW/
 +
  TRIVIAL PATCHES
  M:    Jiri Kosina <[email protected]>
  S:    Maintained
@@@ -19631,7 -19466,6 +19631,7 @@@ T:   git git://github.com/awilliam/linux-
  F:    Documentation/driver-api/vfio.rst
  F:    drivers/vfio/
  F:    include/linux/vfio.h
 +F:    include/linux/vfio_pci_core.h
  F:    include/uapi/linux/vfio.h
  
  VFIO FSL-MC DRIVER
@@@ -19882,15 -19716,6 +19882,15 @@@ S: Maintaine
  F:    include/uapi/linux/virtio_snd.h
  F:    sound/virtio/*
  
 +VIRTIO I2C DRIVER
 +M:    Jie Deng <[email protected]>
 +M:    Viresh Kumar <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/i2c/busses/i2c-virtio.c
 +F:    include/uapi/linux/virtio_i2c.h
 +
  VIRTUAL BOX GUEST DEVICE DRIVER
  M:    Hans de Goede <[email protected]>
  M:    Arnd Bergmann <[email protected]>
index 1d009494af8ba96b524d72039448d6d20e106d90,cf3278041f9c3656973cba37dbdf4e01040019e0..deb23dbec8b5241e06e87101f3528be5f324ecde
@@@ -73,7 -73,7 +73,7 @@@
   * Thus the chain of references always flows in one direction, avoiding loops:
   * importing GEM object -> dma-buf -> exported GEM bo. A further complication
   * are the lookup caches for import and export. These are required to guarantee
 - * that any given object will always have only one uniqe userspace handle. This
 + * that any given object will always have only one unique userspace handle. This
   * is required to allow userspace to detect duplicated imports, since some GEM
   * drivers do fail command submissions if a given buffer object is listed more
   * than once. These import and export caches in &drm_prime_file_private only
@@@ -549,7 -549,7 +549,7 @@@ int drm_prime_handle_to_fd_ioctl(struc
   *
   * FIXME: The underlying helper functions are named rather inconsistently.
   *
 - * Exporting buffers
 + * Importing buffers
   * ~~~~~~~~~~~~~~~~~
   *
   * Importing dma-bufs using drm_gem_prime_import() relies on
@@@ -807,8 -807,8 +807,8 @@@ struct sg_table *drm_prime_pages_to_sg(
                                       struct page **pages, unsigned int nr_pages)
  {
        struct sg_table *sg;
-       struct scatterlist *sge;
        size_t max_segment = 0;
+       int err;
  
        sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (!sg)
                max_segment = dma_max_mapping_size(dev->dev);
        if (max_segment == 0)
                max_segment = UINT_MAX;
-       sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
-                                         nr_pages << PAGE_SHIFT,
-                                         max_segment,
-                                         NULL, 0, GFP_KERNEL);
-       if (IS_ERR(sge)) {
+       err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
+                                               nr_pages << PAGE_SHIFT,
+                                               max_segment, GFP_KERNEL);
+       if (err) {
                kfree(sg);
-               sg = ERR_CAST(sge);
+               sg = ERR_PTR(err);
        }
        return sg;
  }
index 771eb2963123ff4353ccb104dc6d6d50dbf6360d,0000000000000000000000000000000000000000..35eedc14f5228d5d4ca1f6339f52d3be987b48b2
mode 100644,000000..100644
--- /dev/null
@@@ -1,965 -1,0 +1,964 @@@
-       struct scatterlist *sg;
 +// SPDX-License-Identifier: MIT
 +/*
 + * Copyright © 2021 Intel Corporation
 + */
 +
 +#include <drm/ttm/ttm_bo_driver.h>
 +#include <drm/ttm/ttm_placement.h>
 +
 +#include "i915_drv.h"
 +#include "intel_memory_region.h"
 +#include "intel_region_ttm.h"
 +
 +#include "gem/i915_gem_object.h"
 +#include "gem/i915_gem_region.h"
 +#include "gem/i915_gem_ttm.h"
 +#include "gem/i915_gem_mman.h"
 +
 +#include "gt/intel_migrate.h"
 +#include "gt/intel_engine_pm.h"
 +
 +#define I915_PL_LMEM0 TTM_PL_PRIV
 +#define I915_PL_SYSTEM TTM_PL_SYSTEM
 +#define I915_PL_STOLEN TTM_PL_VRAM
 +#define I915_PL_GGTT TTM_PL_TT
 +
 +#define I915_TTM_PRIO_PURGE     0
 +#define I915_TTM_PRIO_NO_PAGES  1
 +#define I915_TTM_PRIO_HAS_PAGES 2
 +
 +/*
 + * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
 + */
 +#define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
 +
 +/**
 + * struct i915_ttm_tt - TTM page vector with additional private information
 + * @ttm: The base TTM page vector.
 + * @dev: The struct device used for dma mapping and unmapping.
 + * @cached_st: The cached scatter-gather table.
 + *
 + * Note that DMA may be going on right up to the point where the page-
 + * vector is unpopulated in delayed destroy. Hence keep the
 + * scatter-gather table mapped and cached up to that point. This is
 + * different from the cached gem object io scatter-gather table which
 + * doesn't have an associated dma mapping.
 + */
 +struct i915_ttm_tt {
 +      struct ttm_tt ttm;
 +      struct device *dev;
 +      struct sg_table *cached_st;
 +};
 +
 +static const struct ttm_place sys_placement_flags = {
 +      .fpfn = 0,
 +      .lpfn = 0,
 +      .mem_type = I915_PL_SYSTEM,
 +      .flags = 0,
 +};
 +
 +static struct ttm_placement i915_sys_placement = {
 +      .num_placement = 1,
 +      .placement = &sys_placement_flags,
 +      .num_busy_placement = 1,
 +      .busy_placement = &sys_placement_flags,
 +};
 +
 +static int i915_ttm_err_to_gem(int err)
 +{
 +      /* Fastpath */
 +      if (likely(!err))
 +              return 0;
 +
 +      switch (err) {
 +      case -EBUSY:
 +              /*
 +               * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
 +               * restart the operation, since we don't record the contending
 +               * lock. We use -EAGAIN to restart.
 +               */
 +              return -EAGAIN;
 +      case -ENOSPC:
 +              /*
 +               * Memory type / region is full, and we can't evict.
 +               * Except possibly system, that returns -ENOMEM;
 +               */
 +              return -ENXIO;
 +      default:
 +              break;
 +      }
 +
 +      return err;
 +}
 +
 +static bool gpu_binds_iomem(struct ttm_resource *mem)
 +{
 +      return mem->mem_type != TTM_PL_SYSTEM;
 +}
 +
 +static bool cpu_maps_iomem(struct ttm_resource *mem)
 +{
 +      /* Once / if we support GGTT, this is also false for cached ttm_tts */
 +      return mem->mem_type != TTM_PL_SYSTEM;
 +}
 +
 +static enum i915_cache_level
 +i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
 +                   struct ttm_tt *ttm)
 +{
 +      return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
 +              ttm->caching == ttm_cached) ? I915_CACHE_LLC :
 +              I915_CACHE_NONE;
 +}
 +
 +static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
 +
 +static enum ttm_caching
 +i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
 +{
 +      /*
 +       * Objects only allowed in system get cached cpu-mappings.
 +       * Other objects get WC mapping for now. Even if in system.
 +       */
 +      if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
 +          obj->mm.n_placements <= 1)
 +              return ttm_cached;
 +
 +      return ttm_write_combined;
 +}
 +
 +static void
 +i915_ttm_place_from_region(const struct intel_memory_region *mr,
 +                         struct ttm_place *place,
 +                         unsigned int flags)
 +{
 +      memset(place, 0, sizeof(*place));
 +      place->mem_type = intel_region_to_ttm_type(mr);
 +
 +      if (flags & I915_BO_ALLOC_CONTIGUOUS)
 +              place->flags = TTM_PL_FLAG_CONTIGUOUS;
 +}
 +
 +static void
 +i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
 +                          struct ttm_place *requested,
 +                          struct ttm_place *busy,
 +                          struct ttm_placement *placement)
 +{
 +      unsigned int num_allowed = obj->mm.n_placements;
 +      unsigned int flags = obj->flags;
 +      unsigned int i;
 +
 +      placement->num_placement = 1;
 +      i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
 +                                 obj->mm.region, requested, flags);
 +
 +      /* Cache this on object? */
 +      placement->num_busy_placement = num_allowed;
 +      for (i = 0; i < placement->num_busy_placement; ++i)
 +              i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
 +
 +      if (num_allowed == 0) {
 +              *busy = *requested;
 +              placement->num_busy_placement = 1;
 +      }
 +
 +      placement->placement = requested;
 +      placement->busy_placement = busy;
 +}
 +
 +static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
 +                                       uint32_t page_flags)
 +{
 +      struct ttm_resource_manager *man =
 +              ttm_manager_type(bo->bdev, bo->resource->mem_type);
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +      struct i915_ttm_tt *i915_tt;
 +      int ret;
 +
 +      i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL);
 +      if (!i915_tt)
 +              return NULL;
 +
 +      if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
 +          man->use_tt)
 +              page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
 +
 +      ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
 +                        i915_ttm_select_tt_caching(obj));
 +      if (ret) {
 +              kfree(i915_tt);
 +              return NULL;
 +      }
 +
 +      i915_tt->dev = obj->base.dev->dev;
 +
 +      return &i915_tt->ttm;
 +}
 +
 +static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
 +{
 +      struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
 +
 +      if (i915_tt->cached_st) {
 +              dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st,
 +                                DMA_BIDIRECTIONAL, 0);
 +              sg_free_table(i915_tt->cached_st);
 +              kfree(i915_tt->cached_st);
 +              i915_tt->cached_st = NULL;
 +      }
 +      ttm_pool_free(&bdev->pool, ttm);
 +}
 +
 +static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
 +{
 +      struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
 +
 +      ttm_tt_destroy_common(bdev, ttm);
 +      ttm_tt_fini(ttm);
 +      kfree(i915_tt);
 +}
 +
 +static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
 +                                     const struct ttm_place *place)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +
 +      /* Will do for now. Our pinned objects are still on TTM's LRU lists */
 +      return i915_gem_object_evictable(obj);
 +}
 +
 +static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
 +                               struct ttm_placement *placement)
 +{
 +      *placement = i915_sys_placement;
 +}
 +
 +static int i915_ttm_move_notify(struct ttm_buffer_object *bo)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +      int ret;
 +
 +      ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
 +      if (ret)
 +              return ret;
 +
 +      ret = __i915_gem_object_put_pages(obj);
 +      if (ret)
 +              return ret;
 +
 +      return 0;
 +}
 +
 +static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
 +{
 +      struct radix_tree_iter iter;
 +      void __rcu **slot;
 +
 +      if (!obj->ttm.cached_io_st)
 +              return;
 +
 +      rcu_read_lock();
 +      radix_tree_for_each_slot(slot, &obj->ttm.get_io_page.radix, &iter, 0)
 +              radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index);
 +      rcu_read_unlock();
 +
 +      sg_free_table(obj->ttm.cached_io_st);
 +      kfree(obj->ttm.cached_io_st);
 +      obj->ttm.cached_io_st = NULL;
 +}
 +
 +static void
 +i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
 +{
 +      struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 +
 +      if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
 +              obj->write_domain = I915_GEM_DOMAIN_WC;
 +              obj->read_domains = I915_GEM_DOMAIN_WC;
 +      } else {
 +              obj->write_domain = I915_GEM_DOMAIN_CPU;
 +              obj->read_domains = I915_GEM_DOMAIN_CPU;
 +      }
 +}
 +
 +static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
 +{
 +      struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 +      unsigned int cache_level;
 +      unsigned int i;
 +
 +      /*
 +       * If object was moved to an allowable region, update the object
 +       * region to consider it migrated. Note that if it's currently not
 +       * in an allowable region, it's evicted and we don't update the
 +       * object region.
 +       */
 +      if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
 +              for (i = 0; i < obj->mm.n_placements; ++i) {
 +                      struct intel_memory_region *mr = obj->mm.placements[i];
 +
 +                      if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
 +                          mr != obj->mm.region) {
 +                              i915_gem_object_release_memory_region(obj);
 +                              i915_gem_object_init_memory_region(obj, mr);
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
 +
 +      obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
 +              I915_BO_FLAG_STRUCT_PAGE;
 +
 +      cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
 +                                         bo->ttm);
 +      i915_gem_object_set_cache_coherency(obj, cache_level);
 +}
 +
 +static void i915_ttm_purge(struct drm_i915_gem_object *obj)
 +{
 +      struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 +      struct ttm_operation_ctx ctx = {
 +              .interruptible = true,
 +              .no_wait_gpu = false,
 +      };
 +      struct ttm_placement place = {};
 +      int ret;
 +
 +      if (obj->mm.madv == __I915_MADV_PURGED)
 +              return;
 +
 +      /* TTM's purge interface. Note that we might be reentering. */
 +      ret = ttm_bo_validate(bo, &place, &ctx);
 +      if (!ret) {
 +              obj->write_domain = 0;
 +              obj->read_domains = 0;
 +              i915_ttm_adjust_gem_after_move(obj);
 +              i915_ttm_free_cached_io_st(obj);
 +              obj->mm.madv = __I915_MADV_PURGED;
 +      }
 +}
 +
 +static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +      int ret = i915_ttm_move_notify(bo);
 +
 +      GEM_WARN_ON(ret);
 +      GEM_WARN_ON(obj->ttm.cached_io_st);
 +      if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
 +              i915_ttm_purge(obj);
 +}
 +
 +static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +
 +      if (likely(obj)) {
 +              /* This releases all gem object bindings to the backend. */
 +              i915_ttm_free_cached_io_st(obj);
 +              __i915_gem_free_object(obj);
 +      }
 +}
 +
 +static struct intel_memory_region *
 +i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type)
 +{
 +      struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
 +
 +      /* There's some room for optimization here... */
 +      GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM &&
 +                 ttm_mem_type < I915_PL_LMEM0);
 +      if (ttm_mem_type == I915_PL_SYSTEM)
 +              return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM,
 +                                                0);
 +
 +      return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL,
 +                                        ttm_mem_type - I915_PL_LMEM0);
 +}
 +
 +static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm)
 +{
 +      struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
-       sg = __sg_alloc_table_from_pages
-               (st, ttm->pages, ttm->num_pages, 0,
-                (unsigned long)ttm->num_pages << PAGE_SHIFT,
-                i915_sg_segment_size(), NULL, 0, GFP_KERNEL);
-       if (IS_ERR(sg)) {
 +      struct sg_table *st;
 +      int ret;
 +
 +      if (i915_tt->cached_st)
 +              return i915_tt->cached_st;
 +
 +      st = kzalloc(sizeof(*st), GFP_KERNEL);
 +      if (!st)
 +              return ERR_PTR(-ENOMEM);
 +
-               return ERR_CAST(sg);
++      ret = sg_alloc_table_from_pages_segment(st,
++                      ttm->pages, ttm->num_pages,
++                      0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
++                      i915_sg_segment_size(), GFP_KERNEL);
++      if (ret) {
 +              kfree(st);
++              return ERR_PTR(ret);
 +      }
 +
 +      ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0);
 +      if (ret) {
 +              sg_free_table(st);
 +              kfree(st);
 +              return ERR_PTR(ret);
 +      }
 +
 +      i915_tt->cached_st = st;
 +      return st;
 +}
 +
 +static struct sg_table *
 +i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
 +                       struct ttm_resource *res)
 +{
 +      struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 +
 +      if (!gpu_binds_iomem(res))
 +              return i915_ttm_tt_get_st(bo->ttm);
 +
 +      /*
 +       * If CPU mapping differs, we need to add the ttm_tt pages to
 +       * the resulting st. Might make sense for GGTT.
 +       */
 +      GEM_WARN_ON(!cpu_maps_iomem(res));
 +      return intel_region_ttm_resource_to_st(obj->mm.region, res);
 +}
 +
 +static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
 +                             struct ttm_resource *dst_mem,
 +                             struct sg_table *dst_st)
 +{
 +      struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
 +                                                   bdev);
 +      struct ttm_resource_manager *src_man =
 +              ttm_manager_type(bo->bdev, bo->resource->mem_type);
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +      struct sg_table *src_st;
 +      struct i915_request *rq;
 +      struct ttm_tt *ttm = bo->ttm;
 +      enum i915_cache_level src_level, dst_level;
 +      int ret;
 +
 +      if (!i915->gt.migrate.context)
 +              return -EINVAL;
 +
 +      dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
 +      if (!ttm || !ttm_tt_is_populated(ttm)) {
 +              if (bo->type == ttm_bo_type_kernel)
 +                      return -EINVAL;
 +
 +              if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
 +                      return 0;
 +
 +              intel_engine_pm_get(i915->gt.migrate.context->engine);
 +              ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
 +                                                dst_st->sgl, dst_level,
 +                                                gpu_binds_iomem(dst_mem),
 +                                                0, &rq);
 +
 +              if (!ret && rq) {
 +                      i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
 +                      i915_request_put(rq);
 +              }
 +              intel_engine_pm_put(i915->gt.migrate.context->engine);
 +      } else {
 +              src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
 +                      obj->ttm.cached_io_st;
 +
 +              src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
 +              intel_engine_pm_get(i915->gt.migrate.context->engine);
 +              ret = intel_context_migrate_copy(i915->gt.migrate.context,
 +                                               NULL, src_st->sgl, src_level,
 +                                               gpu_binds_iomem(bo->resource),
 +                                               dst_st->sgl, dst_level,
 +                                               gpu_binds_iomem(dst_mem),
 +                                               &rq);
 +              if (!ret && rq) {
 +                      i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
 +                      i915_request_put(rq);
 +              }
 +              intel_engine_pm_put(i915->gt.migrate.context->engine);
 +      }
 +
 +      return ret;
 +}
 +
 +static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
 +                       struct ttm_operation_ctx *ctx,
 +                       struct ttm_resource *dst_mem,
 +                       struct ttm_place *hop)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +      struct ttm_resource_manager *dst_man =
 +              ttm_manager_type(bo->bdev, dst_mem->mem_type);
 +      struct intel_memory_region *dst_reg, *src_reg;
 +      union {
 +              struct ttm_kmap_iter_tt tt;
 +              struct ttm_kmap_iter_iomap io;
 +      } _dst_iter, _src_iter;
 +      struct ttm_kmap_iter *dst_iter, *src_iter;
 +      struct sg_table *dst_st;
 +      int ret;
 +
 +      dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
 +      src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
 +      GEM_BUG_ON(!dst_reg || !src_reg);
 +
 +      /* Sync for now. We could do the actual copy async. */
 +      ret = ttm_bo_wait_ctx(bo, ctx);
 +      if (ret)
 +              return ret;
 +
 +      ret = i915_ttm_move_notify(bo);
 +      if (ret)
 +              return ret;
 +
 +      if (obj->mm.madv != I915_MADV_WILLNEED) {
 +              i915_ttm_purge(obj);
 +              ttm_resource_free(bo, &dst_mem);
 +              return 0;
 +      }
 +
 +      /* Populate ttm with pages if needed. Typically system memory. */
 +      if (bo->ttm && (dst_man->use_tt ||
 +                      (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
 +              ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      dst_st = i915_ttm_resource_get_st(obj, dst_mem);
 +      if (IS_ERR(dst_st))
 +              return PTR_ERR(dst_st);
 +
 +      ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
 +      if (ret) {
 +              /* If we start mapping GGTT, we can no longer use man::use_tt here. */
 +              dst_iter = !cpu_maps_iomem(dst_mem) ?
 +                      ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
 +                      ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
 +                                               dst_st, dst_reg->region.start);
 +
 +              src_iter = !cpu_maps_iomem(bo->resource) ?
 +                      ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
 +                      ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
 +                                               obj->ttm.cached_io_st,
 +                                               src_reg->region.start);
 +
 +              ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
 +      }
 +      /* Below dst_mem becomes bo->resource. */
 +      ttm_bo_move_sync_cleanup(bo, dst_mem);
 +      i915_ttm_adjust_domains_after_move(obj);
 +      i915_ttm_free_cached_io_st(obj);
 +
 +      if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
 +              obj->ttm.cached_io_st = dst_st;
 +              obj->ttm.get_io_page.sg_pos = dst_st->sgl;
 +              obj->ttm.get_io_page.sg_idx = 0;
 +      }
 +
 +      i915_ttm_adjust_gem_after_move(obj);
 +      return 0;
 +}
 +
 +static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
 +{
 +      if (!cpu_maps_iomem(mem))
 +              return 0;
 +
 +      mem->bus.caching = ttm_write_combined;
 +      mem->bus.is_iomem = true;
 +
 +      return 0;
 +}
 +
 +static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 +                                       unsigned long page_offset)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +      unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start;
 +      struct scatterlist *sg;
 +      unsigned int ofs;
 +
 +      GEM_WARN_ON(bo->ttm);
 +
 +      sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
 +
 +      return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
 +}
 +
 +static struct ttm_device_funcs i915_ttm_bo_driver = {
 +      .ttm_tt_create = i915_ttm_tt_create,
 +      .ttm_tt_unpopulate = i915_ttm_tt_unpopulate,
 +      .ttm_tt_destroy = i915_ttm_tt_destroy,
 +      .eviction_valuable = i915_ttm_eviction_valuable,
 +      .evict_flags = i915_ttm_evict_flags,
 +      .move = i915_ttm_move,
 +      .swap_notify = i915_ttm_swap_notify,
 +      .delete_mem_notify = i915_ttm_delete_mem_notify,
 +      .io_mem_reserve = i915_ttm_io_mem_reserve,
 +      .io_mem_pfn = i915_ttm_io_mem_pfn,
 +};
 +
 +/**
 + * i915_ttm_driver - Return a pointer to the TTM device funcs
 + *
 + * Return: Pointer to statically allocated TTM device funcs.
 + */
 +struct ttm_device_funcs *i915_ttm_driver(void)
 +{
 +      return &i915_ttm_bo_driver;
 +}
 +
 +static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
 +                              struct ttm_placement *placement)
 +{
 +      struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 +      struct ttm_operation_ctx ctx = {
 +              .interruptible = true,
 +              .no_wait_gpu = false,
 +      };
 +      struct sg_table *st;
 +      int real_num_busy;
 +      int ret;
 +
 +      /* First try only the requested placement. No eviction. */
 +      real_num_busy = fetch_and_zero(&placement->num_busy_placement);
 +      ret = ttm_bo_validate(bo, placement, &ctx);
 +      if (ret) {
 +              ret = i915_ttm_err_to_gem(ret);
 +              /*
 +               * Anything that wants to restart the operation gets to
 +               * do that.
 +               */
 +              if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
 +                  ret == -EAGAIN)
 +                      return ret;
 +
 +              /*
 +               * If the initial attempt fails, allow all accepted placements,
 +               * evicting if necessary.
 +               */
 +              placement->num_busy_placement = real_num_busy;
 +              ret = ttm_bo_validate(bo, placement, &ctx);
 +              if (ret)
 +                      return i915_ttm_err_to_gem(ret);
 +      }
 +
 +      i915_ttm_adjust_lru(obj);
 +      if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
 +              ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
 +              if (ret)
 +                      return ret;
 +
 +              i915_ttm_adjust_domains_after_move(obj);
 +              i915_ttm_adjust_gem_after_move(obj);
 +      }
 +
 +      if (!i915_gem_object_has_pages(obj)) {
 +              /* Object either has a page vector or is an iomem object */
 +              st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
 +              if (IS_ERR(st))
 +                      return PTR_ERR(st);
 +
 +              __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
 +      }
 +
 +      return ret;
 +}
 +
 +static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
 +{
 +      struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
 +      struct ttm_placement placement;
 +
 +      GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
 +
 +      /* Move to the requested placement. */
 +      i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
 +
 +      return __i915_ttm_get_pages(obj, &placement);
 +}
 +
 +/**
 + * DOC: Migration vs eviction
 + *
 + * GEM migration may not be the same as TTM migration / eviction. If
 + * the TTM core decides to evict an object it may be evicted to a
 + * TTM memory type that is not in the object's allowable GEM regions, or
 + * in fact theoretically to a TTM memory type that doesn't correspond to
 + * a GEM memory region. In that case the object's GEM region is not
 + * updated, and the data is migrated back to the GEM region at
 + * get_pages time. TTM may however set up CPU ptes to the object even
 + * when it is evicted.
 + * Gem forced migration using the i915_ttm_migrate() op, is allowed even
 + * to regions that are not in the object's list of allowable placements.
 + */
 +static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
 +                          struct intel_memory_region *mr)
 +{
 +      struct ttm_place requested;
 +      struct ttm_placement placement;
 +      int ret;
 +
 +      i915_ttm_place_from_region(mr, &requested, obj->flags);
 +      placement.num_placement = 1;
 +      placement.num_busy_placement = 1;
 +      placement.placement = &requested;
 +      placement.busy_placement = &requested;
 +
 +      ret = __i915_ttm_get_pages(obj, &placement);
 +      if (ret)
 +              return ret;
 +
 +      /*
 +       * Reinitialize the region bindings. This is primarily
 +       * required for objects where the new region is not in
 +       * its allowable placements.
 +       */
 +      if (obj->mm.region != mr) {
 +              i915_gem_object_release_memory_region(obj);
 +              i915_gem_object_init_memory_region(obj, mr);
 +      }
 +
 +      return 0;
 +}
 +
 +static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
 +                             struct sg_table *st)
 +{
 +      /*
 +       * We're currently not called from a shrinker, so put_pages()
 +       * typically means the object is about to destroyed, or called
 +       * from move_notify(). So just avoid doing much for now.
 +       * If the object is not destroyed next, The TTM eviction logic
 +       * and shrinkers will move it out if needed.
 +       */
 +
 +      i915_ttm_adjust_lru(obj);
 +}
 +
 +static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
 +{
 +      struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
 +
 +      /*
 +       * Don't manipulate the TTM LRUs while in TTM bo destruction.
 +       * We're called through i915_ttm_delete_mem_notify().
 +       */
 +      if (!kref_read(&bo->kref))
 +              return;
 +
 +      /*
 +       * Put on the correct LRU list depending on the MADV status
 +       */
 +      spin_lock(&bo->bdev->lru_lock);
 +      if (obj->mm.madv != I915_MADV_WILLNEED) {
 +              bo->priority = I915_TTM_PRIO_PURGE;
 +      } else if (!i915_gem_object_has_pages(obj)) {
 +              if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
 +                      bo->priority = I915_TTM_PRIO_HAS_PAGES;
 +      } else {
 +              if (bo->priority > I915_TTM_PRIO_NO_PAGES)
 +                      bo->priority = I915_TTM_PRIO_NO_PAGES;
 +      }
 +
 +      ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
 +      spin_unlock(&bo->bdev->lru_lock);
 +}
 +
 +/*
 + * TTM-backed gem object destruction requires some clarification.
 + * Basically we have two possibilities here. We can either rely on the
 + * i915 delayed destruction and put the TTM object when the object
 + * is idle. This would be detected by TTM which would bypass the
 + * TTM delayed destroy handling. The other approach is to put the TTM
 + * object early and rely on the TTM destroyed handling, and then free
 + * the leftover parts of the GEM object once TTM's destroyed list handling is
 + * complete. For now, we rely on the latter for two reasons:
 + * a) TTM can evict an object even when it's on the delayed destroy list,
 + * which in theory allows for complete eviction.
 + * b) There is work going on in TTM to allow freeing an object even when
 + * it's not idle, and using the TTM destroyed list handling could help us
 + * benefit from that.
 + */
 +static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
 +{
 +      if (obj->ttm.created) {
 +              ttm_bo_put(i915_gem_to_ttm(obj));
 +      } else {
 +              __i915_gem_free_object(obj);
 +              call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 +      }
 +}
 +
 +static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
 +{
 +      struct vm_area_struct *area = vmf->vma;
 +      struct drm_i915_gem_object *obj =
 +              i915_ttm_to_gem(area->vm_private_data);
 +
 +      /* Sanity check that we allow writing into this object */
 +      if (unlikely(i915_gem_object_is_readonly(obj) &&
 +                   area->vm_flags & VM_WRITE))
 +              return VM_FAULT_SIGBUS;
 +
 +      return ttm_bo_vm_fault(vmf);
 +}
 +
 +static int
 +vm_access_ttm(struct vm_area_struct *area, unsigned long addr,
 +            void *buf, int len, int write)
 +{
 +      struct drm_i915_gem_object *obj =
 +              i915_ttm_to_gem(area->vm_private_data);
 +
 +      if (i915_gem_object_is_readonly(obj) && write)
 +              return -EACCES;
 +
 +      return ttm_bo_vm_access(area, addr, buf, len, write);
 +}
 +
 +static void ttm_vm_open(struct vm_area_struct *vma)
 +{
 +      struct drm_i915_gem_object *obj =
 +              i915_ttm_to_gem(vma->vm_private_data);
 +
 +      GEM_BUG_ON(!obj);
 +      i915_gem_object_get(obj);
 +}
 +
 +static void ttm_vm_close(struct vm_area_struct *vma)
 +{
 +      struct drm_i915_gem_object *obj =
 +              i915_ttm_to_gem(vma->vm_private_data);
 +
 +      GEM_BUG_ON(!obj);
 +      i915_gem_object_put(obj);
 +}
 +
 +static const struct vm_operations_struct vm_ops_ttm = {
 +      .fault = vm_fault_ttm,
 +      .access = vm_access_ttm,
 +      .open = ttm_vm_open,
 +      .close = ttm_vm_close,
 +};
 +
 +static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
 +{
 +      /* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
 +      GEM_BUG_ON(!drm_mm_node_allocated(&obj->base.vma_node.vm_node));
 +
 +      return drm_vma_node_offset_addr(&obj->base.vma_node);
 +}
 +
 +static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
 +      .name = "i915_gem_object_ttm",
 +
 +      .get_pages = i915_ttm_get_pages,
 +      .put_pages = i915_ttm_put_pages,
 +      .truncate = i915_ttm_purge,
 +      .adjust_lru = i915_ttm_adjust_lru,
 +      .delayed_free = i915_ttm_delayed_free,
 +      .migrate = i915_ttm_migrate,
 +      .mmap_offset = i915_ttm_mmap_offset,
 +      .mmap_ops = &vm_ops_ttm,
 +};
 +
 +void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 +{
 +      struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 +
 +      i915_gem_object_release_memory_region(obj);
 +      mutex_destroy(&obj->ttm.get_io_page.lock);
 +      if (obj->ttm.created)
 +              call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
 +}
 +
 +/**
 + * __i915_gem_ttm_object_init - Initialize a ttm-backed i915 gem object
 + * @mem: The initial memory region for the object.
 + * @obj: The gem object.
 + * @size: Object size in bytes.
 + * @flags: gem object flags.
 + *
 + * Return: 0 on success, negative error code on failure.
 + */
 +int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
 +                             struct drm_i915_gem_object *obj,
 +                             resource_size_t size,
 +                             resource_size_t page_size,
 +                             unsigned int flags)
 +{
 +      static struct lock_class_key lock_class;
 +      struct drm_i915_private *i915 = mem->i915;
 +      struct ttm_operation_ctx ctx = {
 +              .interruptible = true,
 +              .no_wait_gpu = false,
 +      };
 +      enum ttm_bo_type bo_type;
 +      int ret;
 +
 +      drm_gem_private_object_init(&i915->drm, &obj->base, size);
 +      i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
 +      i915_gem_object_init_memory_region(obj, mem);
 +      i915_gem_object_make_unshrinkable(obj);
 +      INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
 +      mutex_init(&obj->ttm.get_io_page.lock);
 +      bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
 +              ttm_bo_type_kernel;
 +
 +      obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
 +
 +      /* Forcing the page size is kernel internal only */
 +      GEM_BUG_ON(page_size && obj->mm.n_placements);
 +
 +      /*
 +       * If this function fails, it will call the destructor, but
 +       * our caller still owns the object. So no freeing in the
 +       * destructor until obj->ttm.created is true.
 +       * Similarly, in delayed_destroy, we can't call ttm_bo_put()
 +       * until successful initialization.
 +       */
 +      ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
 +                                 bo_type, &i915_sys_placement,
 +                                 page_size >> PAGE_SHIFT,
 +                                 &ctx, NULL, NULL, i915_ttm_bo_destroy);
 +      if (ret)
 +              return i915_ttm_err_to_gem(ret);
 +
 +      obj->ttm.created = true;
 +      i915_ttm_adjust_domains_after_move(obj);
 +      i915_ttm_adjust_gem_after_move(obj);
 +      i915_gem_object_unlock(obj);
 +
 +      return 0;
 +}
 +
 +static const struct intel_memory_region_ops ttm_system_region_ops = {
 +      .init_object = __i915_gem_ttm_object_init,
 +};
 +
 +struct intel_memory_region *
 +i915_gem_ttm_system_setup(struct drm_i915_private *i915,
 +                        u16 type, u16 instance)
 +{
 +      struct intel_memory_region *mr;
 +
 +      mr = intel_memory_region_create(i915, 0,
 +                                      totalram_pages() << PAGE_SHIFT,
 +                                      PAGE_SIZE, 0,
 +                                      type, instance,
 +                                      &ttm_system_region_ops);
 +      if (IS_ERR(mr))
 +              return mr;
 +
 +      intel_memory_region_set_name(mr, "system-ttm");
 +      return mr;
 +}
index 468a7a617fbfae7c0333cc1c32818d85c5b5f3f7,458f797a9e1eb48975257b78eb66fde3524e1a0c..8ea0fa665e5305788a078765874d1488246996c4
@@@ -67,11 -67,11 +67,11 @@@ static bool i915_gem_userptr_invalidate
        if (!mmu_notifier_range_blockable(range))
                return false;
  
 -      spin_lock(&i915->mm.notifier_lock);
 +      write_lock(&i915->mm.notifier_lock);
  
        mmu_interval_set_seq(mni, cur_seq);
  
 -      spin_unlock(&i915->mm.notifier_lock);
 +      write_unlock(&i915->mm.notifier_lock);
  
        /*
         * We don't wait when the process is exiting. This is valid
@@@ -107,15 -107,16 +107,15 @@@ i915_gem_userptr_init__mmu_notifier(str
  
  static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
  {
 -      struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct page **pvec = NULL;
  
 -      spin_lock(&i915->mm.notifier_lock);
 +      assert_object_held_shared(obj);
 +
        if (!--obj->userptr.page_ref) {
                pvec = obj->userptr.pvec;
                obj->userptr.pvec = NULL;
        }
        GEM_BUG_ON(obj->userptr.page_ref < 0);
 -      spin_unlock(&i915->mm.notifier_lock);
  
        if (pvec) {
                const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
  
  static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
  {
 -      struct drm_i915_private *i915 = to_i915(obj->base.dev);
        const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        unsigned int max_segment = i915_sg_segment_size();
        struct sg_table *st;
        unsigned int sg_page_sizes;
-       struct scatterlist *sg;
        struct page **pvec;
        int ret;
  
        if (!st)
                return -ENOMEM;
  
 -      spin_lock(&i915->mm.notifier_lock);
 -      if (GEM_WARN_ON(!obj->userptr.page_ref)) {
 -              spin_unlock(&i915->mm.notifier_lock);
 -              ret = -EFAULT;
 +      if (!obj->userptr.page_ref) {
 +              ret = -EAGAIN;
                goto err_free;
        }
  
        obj->userptr.page_ref++;
        pvec = obj->userptr.pvec;
 -      spin_unlock(&i915->mm.notifier_lock);
  
  alloc_table:
-       sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
-                                        num_pages << PAGE_SHIFT, max_segment,
-                                        NULL, 0, GFP_KERNEL);
-       if (IS_ERR(sg)) {
-               ret = PTR_ERR(sg);
+       ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
+                                               num_pages << PAGE_SHIFT,
+                                               max_segment, GFP_KERNEL);
+       if (ret)
                goto err;
-       }
  
        ret = i915_gem_gtt_prepare_pages(obj, st);
        if (ret) {
@@@ -236,7 -238,7 +233,7 @@@ i915_gem_userptr_put_pages(struct drm_i
        i915_gem_object_userptr_drop_ref(obj);
  }
  
 -static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages)
 +static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
  {
        struct sg_table *pages;
        int err;
        if (!IS_ERR_OR_NULL(pages))
                i915_gem_userptr_put_pages(obj, pages);
  
 -      if (get_pages)
 -              err = ____i915_gem_object_get_pages(obj);
 -
        return err;
  }
  
  int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
  {
 -      struct drm_i915_private *i915 = to_i915(obj->base.dev);
        const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        struct page **pvec;
        unsigned int gup_flags = 0;
        if (obj->userptr.notifier.mm != current->mm)
                return -EFAULT;
  
 +      notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
 +
        ret = i915_gem_object_lock_interruptible(obj, NULL);
        if (ret)
                return ret;
  
 -      /* optimistically try to preserve current pages while unlocked */
 -      if (i915_gem_object_has_pages(obj) &&
 -          !mmu_interval_check_retry(&obj->userptr.notifier,
 -                                    obj->userptr.notifier_seq)) {
 -              spin_lock(&i915->mm.notifier_lock);
 -              if (obj->userptr.pvec &&
 -                  !mmu_interval_read_retry(&obj->userptr.notifier,
 -                                           obj->userptr.notifier_seq)) {
 -                      obj->userptr.page_ref++;
 -
 -                      /* We can keep using the current binding, this is the fastpath */
 -                      ret = 1;
 -              }
 -              spin_unlock(&i915->mm.notifier_lock);
 +      if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
 +              i915_gem_object_unlock(obj);
 +              return 0;
        }
  
 -      if (!ret) {
 -              /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
 -              ret = i915_gem_object_userptr_unbind(obj, false);
 -      }
 +      ret = i915_gem_object_userptr_unbind(obj);
        i915_gem_object_unlock(obj);
 -      if (ret < 0)
 +      if (ret)
                return ret;
  
 -      if (ret > 0)
 -              return 0;
 -
 -      notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
 -
        pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
        if (!pvec)
                return -ENOMEM;
        }
        ret = 0;
  
 -      spin_lock(&i915->mm.notifier_lock);
 +      ret = i915_gem_object_lock_interruptible(obj, NULL);
 +      if (ret)
 +              goto out;
  
        if (mmu_interval_read_retry(&obj->userptr.notifier,
                !obj->userptr.page_ref ? notifier_seq :
        if (!obj->userptr.page_ref++) {
                obj->userptr.pvec = pvec;
                obj->userptr.notifier_seq = notifier_seq;
 -
                pvec = NULL;
 +              ret = ____i915_gem_object_get_pages(obj);
        }
  
 +      obj->userptr.page_ref--;
 +
  out_unlock:
 -      spin_unlock(&i915->mm.notifier_lock);
 +      i915_gem_object_unlock(obj);
  
  out:
        if (pvec) {
@@@ -347,6 -366,11 +344,6 @@@ int i915_gem_object_userptr_submit_done
        return 0;
  }
  
 -void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj)
 -{
 -      i915_gem_object_userptr_drop_ref(obj);
 -}
 -
  int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
  {
        int err;
                i915_gem_object_unlock(obj);
        }
  
 -      i915_gem_object_userptr_submit_fini(obj);
        return err;
  }
  
@@@ -422,34 -447,6 +419,34 @@@ static const struct drm_i915_gem_object
  
  #endif
  
 +static int
 +probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
 +{
 +      const unsigned long end = addr + len;
 +      struct vm_area_struct *vma;
 +      int ret = -EFAULT;
 +
 +      mmap_read_lock(mm);
 +      for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
 +              /* Check for holes, note that we also update the addr below */
 +              if (vma->vm_start > addr)
 +                      break;
 +
 +              if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
 +                      break;
 +
 +              if (vma->vm_end >= end) {
 +                      ret = 0;
 +                      break;
 +              }
 +
 +              addr = vma->vm_end;
 +      }
 +      mmap_read_unlock(mm);
 +
 +      return ret;
 +}
 +
  /*
   * Creates a new mm object that wraps some normal memory from the process
   * context - user memory.
@@@ -505,8 -502,7 +502,8 @@@ i915_gem_userptr_ioctl(struct drm_devic
        }
  
        if (args->flags & ~(I915_USERPTR_READ_ONLY |
 -                          I915_USERPTR_UNSYNCHRONIZED))
 +                          I915_USERPTR_UNSYNCHRONIZED |
 +                          I915_USERPTR_PROBE))
                return -EINVAL;
  
        if (i915_gem_object_size_2big(args->user_size))
                        return -ENODEV;
        }
  
 +      if (args->flags & I915_USERPTR_PROBE) {
 +              /*
 +               * Check that the range pointed to represents real struct
 +               * pages and not iomappings (at this moment in time!)
 +               */
 +              ret = probe_range(current->mm, args->user_ptr, args->user_size);
 +              if (ret)
 +                      return ret;
 +      }
 +
  #ifdef CONFIG_MMU_NOTIFIER
        obj = i915_gem_object_alloc();
        if (obj == NULL)
                return -ENOMEM;
  
        drm_gem_private_object_init(dev, &obj->base, args->user_size);
 -      i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
 -                           I915_BO_ALLOC_STRUCT_PAGE);
 +      i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
 +      obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
  int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
  {
  #ifdef CONFIG_MMU_NOTIFIER
 -      spin_lock_init(&dev_priv->mm.notifier_lock);
 +      rwlock_init(&dev_priv->mm.notifier_lock);
  #endif
  
        return 0;
index b0973c27e774d31ce27ef063ed692ea7933a49eb,fc372d2e52a1c24bda6db6e474658d599dbf3073..8b8991e3ed2d0114199e1eb2e3df53b1ab1fc608
@@@ -222,6 -222,36 +222,6 @@@ static bool __vmw_piter_sg_next(struct 
  }
  
  
 -/**
 - * __vmw_piter_non_sg_page: Helper functions to return a pointer
 - * to the current page.
 - *
 - * @viter: Pointer to the iterator
 - *
 - * These functions return a pointer to the page currently
 - * pointed to by @viter. Functions are selected depending on the
 - * current mapping mode.
 - */
 -static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
 -{
 -      return viter->pages[viter->i];
 -}
 -
 -/**
 - * __vmw_piter_phys_addr: Helper functions to return the DMA
 - * address of the current page.
 - *
 - * @viter: Pointer to the iterator
 - *
 - * These functions return the DMA address of the page currently
 - * pointed to by @viter. Functions are selected depending on the
 - * current mapping mode.
 - */
 -static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
 -{
 -      return page_to_phys(viter->pages[viter->i]);
 -}
 -
  static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
  {
        return viter->addrs[viter->i];
@@@ -249,8 -279,13 +249,8 @@@ void vmw_piter_start(struct vmw_piter *
  {
        viter->i = p_offset - 1;
        viter->num_pages = vsgt->num_pages;
 -      viter->page = &__vmw_piter_non_sg_page;
        viter->pages = vsgt->pages;
        switch (vsgt->mode) {
 -      case vmw_dma_phys:
 -              viter->next = &__vmw_piter_non_sg_next;
 -              viter->dma_address = &__vmw_piter_phys_addr;
 -              break;
        case vmw_dma_alloc_coherent:
                viter->next = &__vmw_piter_non_sg_next;
                viter->dma_address = &__vmw_piter_dma_addr;
@@@ -328,7 -363,6 +328,6 @@@ static int vmw_ttm_map_dma(struct vmw_t
        int ret = 0;
        static size_t sgl_size;
        static size_t sgt_size;
-       struct scatterlist *sg;
  
        if (vmw_tt->mapped)
                return 0;
                if (unlikely(ret != 0))
                        return ret;
  
-               sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
-                               vsgt->num_pages, 0,
-                               (unsigned long) vsgt->num_pages << PAGE_SHIFT,
-                               dma_get_max_seg_size(dev_priv->drm.dev),
-                               NULL, 0, GFP_KERNEL);
-               if (IS_ERR(sg)) {
-                       ret = PTR_ERR(sg);
+               ret = sg_alloc_table_from_pages_segment(
+                       &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0,
+                       (unsigned long)vsgt->num_pages << PAGE_SHIFT,
+                       dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL);
+               if (ret)
                        goto out_sg_alloc_fail;
-               }
  
                if (vsgt->num_pages > vmw_tt->sgt.orig_nents) {
                        uint64_t over_alloc =
index ea0054c60fbc68305a4f83023a325fc32079b18f,634d1586a1fabe4856344749d5b883c298026e81..3048862c961c9de78f216a2488b40c952cba9e65
@@@ -815,7 -815,7 +815,7 @@@ int bnxt_re_destroy_qp(struct ib_qp *ib
        if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
                rc = bnxt_re_destroy_gsi_sqp(qp);
                if (rc)
-                       goto sh_fail;
+                       return rc;
        }
  
        mutex_lock(&rdev->qp_lock);
        ib_umem_release(qp->rumem);
        ib_umem_release(qp->sumem);
  
-       kfree(qp);
        return 0;
- sh_fail:
-       return rc;
  }
  
  static u8 __from_ib_qp_type(enum ib_qp_type type)
@@@ -1402,27 -1399,22 +1399,22 @@@ static bool bnxt_re_test_qp_limits(stru
        return rc;
  }
  
- struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
-                               struct ib_qp_init_attr *qp_init_attr,
-                               struct ib_udata *udata)
+ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
+                     struct ib_udata *udata)
  {
+       struct ib_pd *ib_pd = ib_qp->pd;
        struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
        struct bnxt_re_dev *rdev = pd->rdev;
        struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
-       struct bnxt_re_qp *qp;
+       struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
        int rc;
  
        rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
        if (!rc) {
                rc = -EINVAL;
-               goto exit;
+               goto fail;
        }
  
-       qp = kzalloc(sizeof(*qp), GFP_KERNEL);
-       if (!qp) {
-               rc = -ENOMEM;
-               goto exit;
-       }
        qp->rdev = rdev;
        rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
        if (rc)
        mutex_unlock(&rdev->qp_lock);
        atomic_inc(&rdev->qp_count);
  
-       return &qp->ib_qp;
+       return 0;
  qp_destroy:
        bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
  free_umem:
        ib_umem_release(qp->rumem);
        ib_umem_release(qp->sumem);
  fail:
-       kfree(qp);
- exit:
-       return ERR_PTR(rc);
+       return rc;
  }
  
  static u8 __from_ib_qp_state(enum ib_qp_state state)
@@@ -1681,7 -1671,6 +1671,7 @@@ int bnxt_re_create_srq(struct ib_srq *i
        if (nq)
                nq->budget++;
        atomic_inc(&rdev->srq_count);
 +      spin_lock_init(&srq->lock);
  
        return 0;
  
index 4678bd6ec7d63465a2ef4aebd3ffb9aefc53336c,b3baffe861a6c04ceae6645b0824a6a1a4465a17..66268e41b470e283385fc294ffa6e31fc1617678
@@@ -711,6 -711,7 +711,7 @@@ static const struct ib_device_ops bnxt_
        INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
        INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
        INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
+       INIT_RDMA_OBJ_SIZE(ib_qp, bnxt_re_qp, ib_qp),
        INIT_RDMA_OBJ_SIZE(ib_srq, bnxt_re_srq, ib_srq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, bnxt_re_ucontext, ib_uctx),
  };
@@@ -1397,6 -1398,7 +1398,6 @@@ static int bnxt_re_dev_init(struct bnxt
        memset(&rattr, 0, sizeof(rattr));
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
 -              rtnl_unlock();
                ibdev_err(&rdev->ibdev,
                          "Failed to register with netedev: %#x\n", rc);
                return -EINVAL;
index be4a07bd268a312499670b6aafff2af3759694bd,a27ff0c12e0a75a698b3d0d18a3d0c6833a5158d..417dea5f90cfec82518ec6a7bb9aba0136f7a186
@@@ -83,8 -83,7 +83,7 @@@ static int efa_request_mgmnt_irq(struc
        int err;
  
        irq = &dev->admin_irq;
-       err = request_irq(irq->vector, irq->handler, 0, irq->name,
-                         irq->data);
+       err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
        if (err) {
                dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
                        err);
@@@ -92,8 -91,8 +91,8 @@@
        }
  
        dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
-               nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector);
-       irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
+               nr_cpumask_bits, &irq->affinity_hint_mask, irq->irqn);
+       irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
  
        return 0;
  }
@@@ -106,15 -105,13 +105,13 @@@ static void efa_setup_mgmnt_irq(struct 
                 "efa-mgmnt@pci:%s", pci_name(dev->pdev));
        dev->admin_irq.handler = efa_intr_msix_mgmnt;
        dev->admin_irq.data = dev;
-       dev->admin_irq.vector =
+       dev->admin_irq.irqn =
                pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
        cpu = cpumask_first(cpu_online_mask);
-       dev->admin_irq.cpu = cpu;
        cpumask_set_cpu(cpu,
                        &dev->admin_irq.affinity_hint_mask);
-       dev_info(&dev->pdev->dev, "Setup irq:0x%p vector:%d name:%s\n",
-                &dev->admin_irq,
-                dev->admin_irq.vector,
+       dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
+                dev->admin_irq.irqn,
                 dev->admin_irq.name);
  }
  
@@@ -123,8 -120,8 +120,8 @@@ static void efa_free_mgmnt_irq(struct e
        struct efa_irq *irq;
  
        irq = &dev->admin_irq;
-       irq_set_affinity_hint(irq->vector, NULL);
-       free_irq(irq->vector, irq->data);
+       irq_set_affinity_hint(irq->irqn, NULL);
+       free_irq(irq->irqn, irq->data);
  }
  
  static int efa_set_mgmnt_irq(struct efa_dev *dev)
@@@ -271,6 -268,7 +268,7 @@@ static const struct ib_device_ops efa_d
        INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
        INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
  };
  
@@@ -357,7 -355,6 +355,7 @@@ static int efa_enable_msix(struct efa_d
        }
  
        if (irq_num != msix_vecs) {
 +              efa_disable_msix(dev);
                dev_err(&dev->pdev->dev,
                        "Allocated %d MSI-X (out of %d requested)\n",
                        irq_num, msix_vecs);
index e83dc562629ed36dffa68fa47c0ee5942933c403,21966ba255b988fa48cf854706b4f29efaf30bed..2b6c24b7b58655b2d847585db6db71c4c5d85b13
@@@ -1,48 -1,6 +1,6 @@@
+ // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
  /*
   * Copyright(c) 2015 - 2018 Intel Corporation.
-  *
-  * This file is provided under a dual BSD/GPLv2 license.  When using or
-  * redistributing this file, you may do so under either license.
-  *
-  * GPL LICENSE SUMMARY
-  *
-  * This program is free software; you can redistribute it and/or modify
-  * it under the terms of version 2 of the GNU General Public License as
-  * published by the Free Software Foundation.
-  *
-  * This program is distributed in the hope that it will be useful, but
-  * WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  * General Public License for more details.
-  *
-  * BSD LICENSE
-  *
-  * Redistribution and use in source and binary forms, with or without
-  * modification, are permitted provided that the following conditions
-  * are met:
-  *
-  *  - Redistributions of source code must retain the above copyright
-  *    notice, this list of conditions and the following disclaimer.
-  *  - Redistributions in binary form must reproduce the above copyright
-  *    notice, this list of conditions and the following disclaimer in
-  *    the documentation and/or other materials provided with the
-  *    distribution.
-  *  - Neither the name of Intel Corporation nor the names of its
-  *    contributors may be used to endorse or promote products derived
-  *    from this software without specific prior written permission.
-  *
-  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-  *
   */
  
  #include <linux/spinlock.h>
@@@ -1860,7 -1818,7 +1818,7 @@@ retry
  
        /*
         * The SDMA idle interrupt is not guaranteed to be ordered with respect
-        * to updates to the the dma_head location in host memory. The head
+        * to updates to the dma_head location in host memory. The head
         * value read might not be fully up to date. If there are pending
         * descriptors and the SDMA idle interrupt fired then read from the
         * CSR SDMA head instead to get the latest value from the hardware.
@@@ -3055,7 -3013,6 +3013,7 @@@ static void __sdma_process_event(struc
  static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
  {
        int i;
 +      struct sdma_desc *descp;
  
        /* Handle last descriptor */
        if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
        if (unlikely(tx->num_desc == MAX_DESC))
                goto enomem;
  
 -      tx->descp = kmalloc_array(
 -                      MAX_DESC,
 -                      sizeof(struct sdma_desc),
 -                      GFP_ATOMIC);
 -      if (!tx->descp)
 +      descp = kmalloc_array(MAX_DESC, sizeof(struct sdma_desc), GFP_ATOMIC);
 +      if (!descp)
                goto enomem;
 +      tx->descp = descp;
  
        /* reserve last descriptor for coalescing */
        tx->desc_limit = MAX_DESC - 1;
index 466f0a521940c9fa40cbdd812983484a6d667074,b36ad5ad8197776d7838114f26839069c6aedcaa..8664bcf6d3f590e394e88ff8b1698c12d53a76e0
@@@ -1184,6 -1184,16 +1184,16 @@@ static int mlx5_ib_query_device(struct 
                                MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
        }
  
+       if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
+               resp.response_length += sizeof(resp.dci_streams_caps);
+               resp.dci_streams_caps.max_log_num_concurent =
+                       MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
+               resp.dci_streams_caps.max_log_num_errored =
+                       MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
+       }
        if (uhw_outlen) {
                err = ib_copy_to_udata(uhw, &resp, resp.response_length);
  
@@@ -2501,6 -2511,13 +2511,13 @@@ static void pkey_change_handler(struct 
                container_of(work, struct mlx5_ib_port_resources,
                             pkey_change_work);
  
+       if (!ports->gsi)
+               /*
+                * We got this event before device was fully configured
+                * and MAD registration code wasn't called/finished yet.
+                */
+               return;
        mlx5_ib_gsi_pkey_change(ports->gsi);
  }
  
@@@ -2795,33 -2812,16 +2812,16 @@@ static int mlx5_ib_dev_res_init(struct 
        if (!MLX5_CAP_GEN(dev->mdev, xrc))
                return -EOPNOTSUPP;
  
-       mutex_init(&devr->mutex);
-       devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
-       if (!devr->p0)
-               return -ENOMEM;
-       devr->p0->device  = ibdev;
-       devr->p0->uobject = NULL;
-       atomic_set(&devr->p0->usecnt, 0);
-       ret = mlx5_ib_alloc_pd(devr->p0, NULL);
-       if (ret)
-               goto error0;
+       devr->p0 = ib_alloc_pd(ibdev, 0);
+       if (IS_ERR(devr->p0))
+               return PTR_ERR(devr->p0);
  
-       devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
-       if (!devr->c0) {
-               ret = -ENOMEM;
+       devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+       if (IS_ERR(devr->c0)) {
+               ret = PTR_ERR(devr->c0);
                goto error1;
        }
  
-       devr->c0->device = &dev->ib_dev;
-       atomic_set(&devr->c0->usecnt, 0);
-       ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
-       if (ret)
-               goto err_create_cq;
        ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
        if (ret)
                goto error2;
        attr.srq_type = IB_SRQT_XRC;
        attr.ext.cq = devr->c0;
  
-       devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
-       if (!devr->s0) {
-               ret = -ENOMEM;
-               goto error4;
-       }
-       devr->s0->device        = &dev->ib_dev;
-       devr->s0->pd            = devr->p0;
-       devr->s0->srq_type      = IB_SRQT_XRC;
-       devr->s0->ext.cq        = devr->c0;
-       ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
-       if (ret)
+       devr->s0 = ib_create_srq(devr->p0, &attr);
+       if (IS_ERR(devr->s0)) {
+               ret = PTR_ERR(devr->s0);
                goto err_create;
-       atomic_inc(&devr->s0->ext.cq->usecnt);
-       atomic_inc(&devr->p0->usecnt);
-       atomic_set(&devr->s0->usecnt, 0);
+       }
  
        memset(&attr, 0, sizeof(attr));
        attr.attr.max_sge = 1;
        attr.attr.max_wr = 1;
        attr.srq_type = IB_SRQT_BASIC;
-       devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
-       if (!devr->s1) {
-               ret = -ENOMEM;
-               goto error5;
-       }
-       devr->s1->device        = &dev->ib_dev;
-       devr->s1->pd            = devr->p0;
-       devr->s1->srq_type      = IB_SRQT_BASIC;
-       devr->s1->ext.cq        = devr->c0;
  
-       ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
-       if (ret)
+       devr->s1 = ib_create_srq(devr->p0, &attr);
+       if (IS_ERR(devr->s1)) {
+               ret = PTR_ERR(devr->s1);
                goto error6;
-       atomic_inc(&devr->p0->usecnt);
-       atomic_set(&devr->s1->usecnt, 0);
+       }
  
        for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
                INIT_WORK(&devr->ports[port].pkey_change_work,
        return 0;
  
  error6:
-       kfree(devr->s1);
- error5:
-       mlx5_ib_destroy_srq(devr->s0, NULL);
+       ib_destroy_srq(devr->s0);
  err_create:
-       kfree(devr->s0);
- error4:
        mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
  error3:
        mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
  error2:
-       mlx5_ib_destroy_cq(devr->c0, NULL);
- err_create_cq:
-       kfree(devr->c0);
+       ib_destroy_cq(devr->c0);
  error1:
-       mlx5_ib_dealloc_pd(devr->p0, NULL);
- error0:
-       kfree(devr->p0);
+       ib_dealloc_pd(devr->p0);
        return ret;
  }
  
@@@ -2908,20 -2877,21 +2877,21 @@@ static void mlx5_ib_dev_res_cleanup(str
        struct mlx5_ib_resources *devr = &dev->devr;
        int port;
  
-       mlx5_ib_destroy_srq(devr->s1, NULL);
-       kfree(devr->s1);
-       mlx5_ib_destroy_srq(devr->s0, NULL);
-       kfree(devr->s0);
-       mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
-       mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
-       mlx5_ib_destroy_cq(devr->c0, NULL);
-       kfree(devr->c0);
-       mlx5_ib_dealloc_pd(devr->p0, NULL);
-       kfree(devr->p0);
-       /* Make sure no change P_Key work items are still executing */
+       /*
+        * Make sure no change P_Key work items are still executing.
+        *
+        * At this stage, the mlx5_ib_event should be unregistered
+        * and it ensures that no new works are added.
+        */
        for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
                cancel_work_sync(&devr->ports[port].pkey_change_work);
+       ib_destroy_srq(devr->s1);
+       ib_destroy_srq(devr->s0);
+       mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
+       mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
+       ib_destroy_cq(devr->c0);
+       ib_dealloc_pd(devr->p0);
  }
  
  static u32 get_core_cap_flags(struct ib_device *ibdev,
@@@ -3799,6 -3769,7 +3769,7 @@@ static const struct ib_device_ops mlx5_
        INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
        INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
        INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
        INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
  };
@@@ -4061,7 -4032,7 +4032,7 @@@ static void mlx5_ib_stage_pre_ib_reg_um
                mlx5_ib_warn(dev, "mr cache cleanup failed\n");
  
        if (dev->umrc.qp)
-               mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
+               ib_destroy_qp(dev->umrc.qp);
        if (dev->umrc.cq)
                ib_free_cq(dev->umrc.cq);
        if (dev->umrc.pd)
@@@ -4114,23 -4085,17 +4085,17 @@@ static int mlx5_ib_stage_post_ib_reg_um
        init_attr->cap.max_send_sge = 1;
        init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
        init_attr->port_num = 1;
-       qp = mlx5_ib_create_qp(pd, init_attr, NULL);
+       qp = ib_create_qp(pd, init_attr);
        if (IS_ERR(qp)) {
                mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
                ret = PTR_ERR(qp);
                goto error_3;
        }
-       qp->device     = &dev->ib_dev;
-       qp->real_qp    = qp;
-       qp->uobject    = NULL;
-       qp->qp_type    = MLX5_IB_QPT_REG_UMR;
-       qp->send_cq    = init_attr->send_cq;
-       qp->recv_cq    = init_attr->recv_cq;
  
        attr->qp_state = IB_QPS_INIT;
        attr->port_num = 1;
-       ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
-                               IB_QP_PORT, NULL);
+       ret = ib_modify_qp(qp, attr,
+                          IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
        if (ret) {
                mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
                goto error_4;
        attr->qp_state = IB_QPS_RTR;
        attr->path_mtu = IB_MTU_256;
  
-       ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
+       ret = ib_modify_qp(qp, attr, IB_QP_STATE);
        if (ret) {
                mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
                goto error_4;
  
        memset(attr, 0, sizeof(*attr));
        attr->qp_state = IB_QPS_RTS;
-       ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
+       ret = ib_modify_qp(qp, attr, IB_QP_STATE);
        if (ret) {
                mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
                goto error_4;
        return 0;
  
  error_4:
-       mlx5_ib_destroy_qp(qp, NULL);
+       ib_destroy_qp(qp);
        dev->umrc.qp = NULL;
  
  error_3:
@@@ -4462,8 -4427,7 +4427,8 @@@ static void mlx5r_mp_remove(struct auxi
        mutex_lock(&mlx5_ib_multiport_mutex);
        if (mpi->ibdev)
                mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
 -      list_del(&mpi->list);
 +      else
 +              list_del(&mpi->list);
        mutex_unlock(&mlx5_ib_multiport_mutex);
        kfree(mpi);
  }
This page took 0.27276 seconds and 4 git commands to generate.