]> Git Repo - linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <[email protected]>
Thu, 9 May 2019 16:02:46 +0000 (09:02 -0700)
committerLinus Torvalds <[email protected]>
Thu, 9 May 2019 16:02:46 +0000 (09:02 -0700)
Pull rdma updates from Jason Gunthorpe:
 "This has been a smaller cycle than normal. One new driver was
  accepted, which is unusual, and at least one more driver remains in
  review on the list.

  Summary:

   - Driver fixes for hns, hfi1, nes, rxe, i40iw, mlx5, cxgb4,
     vmw_pvrdma

   - Many patches from MatthewW converting radix tree and IDR users to
     use xarray

   - Introduction of tracepoints to the MAD layer

   - Build large SGLs at the start for DMA mapping and get the driver to
     split them

   - Generally clean SGL handling code throughout the subsystem

   - Support for restricting RDMA devices to net namespaces for
     containers

   - Progress to remove object allocation boilerplate code from drivers

   - Change in how the mlx5 driver shows representor ports linked to VFs

   - mlx5 uapi feature to access the on chip SW ICM memory

   - Add a new driver for 'EFA'. This is HW that supports user space
     packet processing through QPs in Amazon's cloud"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (186 commits)
  RDMA/ipoib: Allow user space differentiate between valid dev_port
  IB/core, ipoib: Do not overreact to SM LID change event
  RDMA/device: Don't fire uevent before device is fully initialized
  lib/scatterlist: Remove leftover from sg_page_iter comment
  RDMA/efa: Add driver to Kconfig/Makefile
  RDMA/efa: Add the efa module
  RDMA/efa: Add EFA verbs implementation
  RDMA/efa: Add common command handlers
  RDMA/efa: Implement functions that submit and complete admin commands
  RDMA/efa: Add the ABI definitions
  RDMA/efa: Add the com service API definitions
  RDMA/efa: Add the efa_com.h file
  RDMA/efa: Add the efa.h header file
  RDMA/efa: Add EFA device definitions
  RDMA: Add EFA related definitions
  RDMA/umem: Remove hugetlb flag
  RDMA/bnxt_re: Use core helpers to get aligned DMA address
  RDMA/i40iw: Use core helpers to get aligned DMA address within a supported page size
  RDMA/verbs: Add a DMA iterator to return aligned contiguous memory blocks
  RDMA/umem: Add API to find best driver supported page size in an MR
  ...

21 files changed:
1  2 
MAINTAINERS
drivers/infiniband/Kconfig
drivers/infiniband/core/addr.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/vnic_main.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/qib/qib_iba7322.c
include/linux/mlx5/driver.h

diff --combined MAINTAINERS
index a225661d623770b5df175c6242049fea58ec3bc9,a868d8ce1437cb47cdf5cd416d773cf9159b94be..ae4063ef5533a31a67186d829a8c6835941886e3
@@@ -268,13 -268,12 +268,13 @@@ L:      [email protected]
  S:    Maintained
  F:    drivers/gpio/gpio-104-idio-16.c
  
 -ACCES 104-QUAD-8 IIO DRIVER
 +ACCES 104-QUAD-8 DRIVER
  M:    William Breathitt Gray <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
  F:    Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
 -F:    drivers/iio/counter/104-quad-8.c
 +F:    drivers/counter/104-quad-8.c
  
  ACCES PCI-IDIO-16 GPIO DRIVER
  M:    William Breathitt Gray <[email protected]>
@@@ -469,7 -468,7 +469,7 @@@ ADM1025 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/adm1025
 +F:    Documentation/hwmon/adm1025.rst
  F:    drivers/hwmon/adm1025.c
  
  ADM1029 HARDWARE MONITOR DRIVER
@@@ -521,7 -520,7 +521,7 @@@ ADS1015 HARDWARE MONITOR DRIVE
  M:    Dirk Eibach <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/ads1015
 +F:    Documentation/hwmon/ads1015.rst
  F:    drivers/hwmon/ads1015.c
  F:    include/linux/platform_data/ads1015.h
  
@@@ -534,7 -533,7 +534,7 @@@ ADT7475 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/adt7475
 +F:    Documentation/hwmon/adt7475.rst
  F:    drivers/hwmon/adt7475.c
  
  ADVANSYS SCSI DRIVER
@@@ -745,6 -744,15 +745,15 @@@ S:       Supporte
  F:    Documentation/networking/device_drivers/amazon/ena.txt
  F:    drivers/net/ethernet/amazon/
  
+ AMAZON RDMA EFA DRIVER
+ M:    Gal Pressman <[email protected]>
+ R:    Yossi Leybovich <[email protected]>
+ L:    [email protected]
+ Q:    https://patchwork.kernel.org/project/linux-rdma/list/
+ S:    Supported
+ F:    drivers/infiniband/hw/efa/
+ F:    include/uapi/rdma/efa-abi.h
  AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
  M:    Tom Lendacky <[email protected]>
  M:    Gary Hook <[email protected]>
@@@ -765,7 -773,7 +774,7 @@@ AMD FAM15H PROCESSOR POWER MONITORING D
  M:    Huang Rui <[email protected]>
  L:    [email protected]
  S:    Supported
 -F:    Documentation/hwmon/fam15h_power
 +F:    Documentation/hwmon/fam15h_power.rst
  F:    drivers/hwmon/fam15h_power.c
  
  AMD FCH GPIO DRIVER
  W:    http://ez.analog.com/community/linux-device-drivers
  S:    Supported
  F:    drivers/iio/adc/ad7606.c
 -F:    Documentation/devicetree/bindings/iio/adc/ad7606.txt
 +F:    Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
  
  ANALOG DEVICES INC AD7768-1 DRIVER
  M:    Stefan Popa <[email protected]>
@@@ -951,7 -959,6 +960,7 @@@ F: drivers/dma/dma-axi-dmac.
  ANALOG DEVICES INC IIO DRIVERS
  M:    Lars-Peter Clausen <[email protected]>
  M:    Michael Hennerich <[email protected]>
 +M:    Stefan Popa <[email protected]>
  W:    http://wiki.analog.com/
  W:    http://ez.analog.com/community/linux-device-drivers
  S:    Supported
@@@ -1169,7 -1176,7 +1178,7 @@@ S:      Supporte
  T:    git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
  F:    drivers/gpu/drm/arm/display/include/
  F:    drivers/gpu/drm/arm/display/komeda/
 -F:    Documentation/devicetree/bindings/display/arm/arm,komeda.txt
 +F:    Documentation/devicetree/bindings/display/arm,komeda.txt
  F:    Documentation/gpu/komeda-kms.rst
  
  ARM MALI-DP DRM DRIVER
@@@ -1182,15 -1189,6 +1191,15 @@@ F:    drivers/gpu/drm/arm
  F:    Documentation/devicetree/bindings/display/arm,malidp.txt
  F:    Documentation/gpu/afbc.rst
  
 +ARM MALI PANFROST DRM DRIVER
 +M:    Rob Herring <[email protected]>
 +M:    Tomeu Vizoso <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    drivers/gpu/drm/panfrost/
 +F:    include/uapi/drm/panfrost_drm.h
 +
  ARM MFM AND FLOPPY DRIVERS
  M:    Ian Molton <[email protected]>
  S:    Maintained
@@@ -1427,9 -1425,7 +1436,9 @@@ M:      Manivannan Sadhasivam <manivannan.sa
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm64/boot/dts/bitmain/
 +F:    drivers/pinctrl/pinctrl-bm1880.c
  F:    Documentation/devicetree/bindings/arm/bitmain.yaml
 +F:    Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
  
  ARM/CALXEDA HIGHBANK ARCHITECTURE
  M:    Rob Herring <[email protected]>
@@@ -2526,7 -2522,7 +2535,7 @@@ ASC7621 HARDWARE MONITOR DRIVE
  M:    George Joseph <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/asc7621
 +F:    Documentation/hwmon/asc7621.rst
  F:    drivers/hwmon/asc7621.c
  
  ASPEED VIDEO ENGINE DRIVER
@@@ -2807,13 -2803,10 +2816,13 @@@ M:   Simon Wunderlich <sw@simonwunderlich
  M:    Antonio Quartulli <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  W:    https://www.open-mesh.org/
 +B:    https://www.open-mesh.org/projects/batman-adv/issues
 +C:    irc://chat.freenode.net/batman
  Q:    https://patchwork.open-mesh.org/project/batman/list/
 +T:    git https://git.open-mesh.org/linux-merge.git
  S:    Maintained
 -F:    Documentation/ABI/testing/sysfs-class-net-batman-adv
 -F:    Documentation/ABI/testing/sysfs-class-net-mesh
 +F:    Documentation/ABI/obsolete/sysfs-class-net-batman-adv
 +F:    Documentation/ABI/obsolete/sysfs-class-net-mesh
  F:    Documentation/networking/batman-adv.rst
  F:    include/uapi/linux/batadv_packet.h
  F:    include/uapi/linux/batman_adv.h
@@@ -3137,7 -3130,6 +3146,7 @@@ F:      drivers/cpufreq/bmips-cpufreq.
  BROADCOM BMIPS MIPS ARCHITECTURE
  M:    Kevin Cernekee <[email protected]>
  M:    Florian Fainelli <[email protected]>
 +L:    [email protected]
  L:    [email protected]
  T:    git git://github.com/broadcom/stblinux.git
  S:    Maintained
@@@ -3369,7 -3361,7 +3378,7 @@@ F:      include/uapi/linux/bsg.
  BT87X AUDIO DRIVER
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 -T:    git git://git.alsa-project.org/alsa-kernel.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
  S:    Maintained
  F:    Documentation/sound/cards/bt87x.rst
  F:    sound/pci/bt87x.c
@@@ -3422,7 -3414,7 +3431,7 @@@ F:      drivers/scsi/FlashPoint.
  C-MEDIA CMI8788 DRIVER
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 -T:    git git://git.alsa-project.org/alsa-kernel.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
  S:    Maintained
  F:    sound/pci/oxygen/
  
@@@ -3746,8 -3738,8 +3755,8 @@@ F:      scripts/checkpatch.p
  
  CHINESE DOCUMENTATION
  M:    Harry Wei <[email protected]>
 +M:    Alex Shi <[email protected]>
  L:    [email protected] (subscribers-only)
 -L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/translations/zh_CN/
  
@@@ -3814,21 -3806,16 +3823,21 @@@ M:   Richard Fitzgerald <[email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/clk/clk-lochnagar.c
 +F:    drivers/hwmon/lochnagar-hwmon.c
  F:    drivers/mfd/lochnagar-i2c.c
  F:    drivers/pinctrl/cirrus/pinctrl-lochnagar.c
  F:    drivers/regulator/lochnagar-regulator.c
 +F:    sound/soc/codecs/lochnagar-sc.c
  F:    include/dt-bindings/clk/lochnagar.h
  F:    include/dt-bindings/pinctrl/lochnagar.h
  F:    include/linux/mfd/lochnagar*
  F:    Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
  F:    Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
 +F:    Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
  F:    Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
  F:    Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
 +F:    Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
 +F:    Documentation/hwmon/lochnagar
  
  CISCO FCOE HBA DRIVER
  M:    Satish Kharat <[email protected]>
@@@ -4066,7 -4053,7 +4075,7 @@@ CORETEMP HARDWARE MONITORING DRIVE
  M:    Fenghua Yu <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/coretemp
 +F:    Documentation/hwmon/coretemp.rst
  F:    drivers/hwmon/coretemp.c
  
  COSA/SRP SYNC SERIAL DRIVER
@@@ -4075,16 -4062,6 +4084,16 @@@ W:    http://www.fi.muni.cz/~kas/cosa
  S:    Maintained
  F:    drivers/net/wan/cosa*
  
 +COUNTER SUBSYSTEM
 +M:    William Breathitt Gray <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-counter*
 +F:    Documentation/driver-api/generic-counter.rst
 +F:    drivers/counter/
 +F:    include/linux/counter.h
 +F:    include/linux/counter_enum.h
 +
  CPMAC ETHERNET DRIVER
  M:    Florian Fainelli <[email protected]>
  L:    [email protected]
@@@ -4279,7 -4256,7 +4288,7 @@@ S:      Supporte
  F:    drivers/scsi/cxgbi/cxgb3i
  
  CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
- M:    Steve Wise <swise@chelsio.com>
+ M:    Potnuri Bharat Teja <bharat@chelsio.com>
  L:    [email protected]
  W:    http://www.openfabrics.org
  S:    Supported
@@@ -4308,7 -4285,7 +4317,7 @@@ S:      Supporte
  F:    drivers/scsi/cxgbi/cxgb4i
  
  CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
- M:    Steve Wise <swise@chelsio.com>
+ M:    Potnuri Bharat Teja <bharat@chelsio.com>
  L:    [email protected]
  W:    http://www.openfabrics.org
  S:    Supported
@@@ -4584,7 -4561,6 +4593,7 @@@ S:      Maintaine
  F:    drivers/devfreq/
  F:    include/linux/devfreq.h
  F:    Documentation/devicetree/bindings/devfreq/
 +F:    include/trace/events/devfreq.h
  
  DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
  M:    Chanwoo Choi <[email protected]>
@@@ -4632,7 -4608,7 +4641,7 @@@ DIALOG SEMICONDUCTOR DRIVER
  M:    Support Opensource <[email protected]>
  W:    http://www.dialog-semiconductor.com/products
  S:    Supported
 -F:    Documentation/hwmon/da90??
 +F:    Documentation/hwmon/da90??.rst
  F:    Documentation/devicetree/bindings/mfd/da90*.txt
  F:    Documentation/devicetree/bindings/input/da90??-onkey.txt
  F:    Documentation/devicetree/bindings/thermal/da90??-thermal.txt
@@@ -4783,7 -4759,7 +4792,7 @@@ DME1737 HARDWARE MONITOR DRIVE
  M:    Juerg Haefliger <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/dme1737
 +F:    Documentation/hwmon/dme1737.rst
  F:    drivers/hwmon/dme1737.c
  
  DMI/SMBIOS SUPPORT
@@@ -4928,14 -4904,6 +4937,14 @@@ M:    Dave Airlie <[email protected]
  S:    Odd Fixes
  F:    drivers/gpu/drm/ast/
  
 +DRM DRIVER FOR ASPEED BMC GFX
 +M:    Joel Stanley <[email protected]>
 +L:    [email protected]
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Supported
 +F:    drivers/gpu/drm/aspeed/
 +F:    Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
 +
  DRM DRIVER FOR BOCHS VIRTUAL GPU
  M:    Gerd Hoffmann <[email protected]>
  L:    [email protected]
@@@ -4949,12 -4917,6 +4958,12 @@@ T:    git git://anongit.freedesktop.org/dr
  S:    Maintained
  F:    drivers/gpu/drm/tve200/
  
 +DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
 +M:    Jagan Teki <[email protected]>
 +S:    Maintained
 +F:    drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
 +F:    Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
 +
  DRM DRIVER FOR ILITEK ILI9225 PANELS
  M:    David Lechner <[email protected]>
  S:    Maintained
@@@ -5046,12 -5008,6 +5055,12 @@@ S:    Orphan / Obsolet
  F:    drivers/gpu/drm/r128/
  F:    include/uapi/drm/r128_drm.h
  
 +DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
 +M:    Guido Günther <[email protected]>
 +S:    Maintained
 +F:    drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
 +F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
 +
  DRM DRIVER FOR SAVAGE VIDEO CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/savage/
@@@ -5099,13 -5055,6 +5108,13 @@@ S:    Odd Fixe
  F:    drivers/gpu/drm/udl/
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
 +DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
 +M:    Hans de Goede <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/gpu/drm/vboxvideo/
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +
  DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
  M:    Rodrigo Siqueira <[email protected]>
  R:    Haneen Mohammed <[email protected]>
@@@ -5240,15 -5189,6 +5249,15 @@@ S:    Maintaine
  F:    drivers/gpu/drm/hisilicon/
  F:    Documentation/devicetree/bindings/display/hisilicon/
  
 +DRM DRIVERS FOR LIMA
 +M:    Qiang Yu <[email protected]>
 +L:    [email protected]
 +L:    [email protected] (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/gpu/drm/lima/
 +F:    include/uapi/drm/lima_drm.h
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +
  DRM DRIVERS FOR MEDIATEK
  M:    CK Hu <[email protected]>
  M:    Philipp Zabel <[email protected]>
  S:    Maintained
  F:    drivers/edac/ghes_edac.c
  
 +EDAC-I10NM
 +M:    Tony Luck <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/edac/i10nm_base.c
 +
  EDAC-I3000
  L:    [email protected]
  S:    Orphan
@@@ -5753,7 -5687,7 +5762,7 @@@ EDAC-SKYLAK
  M:    Tony Luck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/edac/skx_edac.c
 +F:    drivers/edac/skx_*.c
  
  EDAC-TI
  M:    Tero Kristo <[email protected]>
@@@ -5772,7 -5706,7 +5781,7 @@@ F:      drivers/edac/qcom_edac.
  EDIROL UA-101/UA-1000 DRIVER
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 -T:    git git://git.alsa-project.org/alsa-kernel.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
  S:    Maintained
  F:    sound/usb/misc/ua101.c
  
@@@ -6011,7 -5945,7 +6020,7 @@@ F71805F HARDWARE MONITORING DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/f71805f
 +F:    Documentation/hwmon/f71805f.rst
  F:    drivers/hwmon/f71805f.c
  
  FADDR2LINE
@@@ -6112,7 -6046,7 +6121,7 @@@ F:      include/linux/f75375s.
  FIREWIRE AUDIO DRIVERS
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 -T:    git git://git.alsa-project.org/alsa-kernel.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
  S:    Maintained
  F:    sound/firewire/
  
@@@ -6536,7 -6470,7 +6545,7 @@@ S:      Maintaine
  F:    drivers/media/radio/radio-gemtek*
  
  GENERIC GPIO I2C DRIVER
 -M:    Haavard Skinnemoen <hskinnemoen@gmail.com>
 +M:    Wolfram Sang <wsa+renesas@sang-engineering.com>
  S:    Supported
  F:    drivers/i2c/busses/i2c-gpio.c
  F:    include/linux/platform_data/i2c-gpio.h
@@@ -6668,7 -6602,7 +6677,7 @@@ M:      Andy Shevchenko <andriy.shevchenko@l
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/acpi/gpio-properties.txt
 +F:    Documentation/firmware-guide/acpi/gpio-properties.rst
  F:    drivers/gpio/gpiolib-acpi.c
  
  GPIO IR Transmitter
@@@ -7408,6 -7342,7 +7417,6 @@@ F:      Documentation/devicetree/bindings/i3
  F:    Documentation/driver-api/i3c
  F:    drivers/i3c/
  F:    include/linux/i3c/
 -F:    include/dt-bindings/i3c/
  
  I3C DRIVER FOR SYNOPSYS DESIGNWARE
  M:    Vitor Soares <[email protected]>
@@@ -7692,7 -7627,7 +7701,7 @@@ INA209 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/ina209
 +F:    Documentation/hwmon/ina209.rst
  F:    Documentation/devicetree/bindings/hwmon/ina2xx.txt
  F:    drivers/hwmon/ina209.c
  
@@@ -7700,7 -7635,7 +7709,7 @@@ INA2XX HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/ina2xx
 +F:    Documentation/hwmon/ina2xx.rst
  F:    drivers/hwmon/ina2xx.c
  F:    include/linux/platform_data/ina2xx.h
  
@@@ -7727,6 -7662,10 +7736,10 @@@ F:    drivers/infiniband
  F:    include/uapi/linux/if_infiniband.h
  F:    include/uapi/rdma/
  F:    include/rdma/
+ F:    include/trace/events/ib_mad.h
+ F:    include/trace/events/ib_umad.h
+ F:    samples/bpf/ibumad_kern.c
+ F:    samples/bpf/ibumad_user.c
  
  INGENIC JZ4780 DMA Driver
  M:    Zubair Lutfullah Kakakhel <[email protected]>
@@@ -8120,7 -8059,6 +8133,7 @@@ F:      drivers/gpio/gpio-intel-mid.
  
  INTERCONNECT API
  M:    Georgi Djakov <[email protected]>
 +L:    [email protected]
  S:    Maintained
  F:    Documentation/interconnect/
  F:    Documentation/devicetree/bindings/interconnect/
@@@ -8329,7 -8267,7 +8342,7 @@@ IT87 HARDWARE MONITORING DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/it87
 +F:    Documentation/hwmon/it87.rst
  F:    drivers/hwmon/it87.c
  
  IT913X MEDIA DRIVER
@@@ -8373,7 -8311,7 +8386,7 @@@ M:      Guenter Roeck <[email protected]
  L:    [email protected]
  S:    Maintained
  F:    drivers/hwmon/jc42.c
 -F:    Documentation/hwmon/jc42
 +F:    Documentation/hwmon/jc42.rst
  
  JFS FILESYSTEM
  M:    Dave Kleikamp <[email protected]>
@@@ -8421,14 -8359,14 +8434,14 @@@ K10TEMP HARDWARE MONITORING DRIVE
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/k10temp
 +F:    Documentation/hwmon/k10temp.rst
  F:    drivers/hwmon/k10temp.c
  
  K8TEMP HARDWARE MONITORING DRIVER
  M:    Rudolf Marek <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/k8temp
 +F:    Documentation/hwmon/k8temp.rst
  F:    drivers/hwmon/k8temp.c
  
  KASAN
@@@ -8783,7 -8721,6 +8796,7 @@@ F:      scripts/leaking_addresses.p
  LED SUBSYSTEM
  M:    Jacek Anaszewski <[email protected]>
  M:    Pavel Machek <[email protected]>
 +R:    Dan Murphy <[email protected]>
  L:    [email protected]
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
  S:    Maintained
@@@ -9069,7 -9006,7 +9082,7 @@@ R:      Daniel Lustig <[email protected]
  L:    [email protected]
  L:    [email protected]
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    tools/memory-model/
  F:    Documentation/atomic_bitops.txt
  F:    Documentation/atomic_t.txt
@@@ -9120,21 -9057,21 +9133,21 @@@ LM78 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/lm78
 +F:    Documentation/hwmon/lm78.rst
  F:    drivers/hwmon/lm78.c
  
  LM83 HARDWARE MONITOR DRIVER
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/lm83
 +F:    Documentation/hwmon/lm83.rst
  F:    drivers/hwmon/lm83.c
  
  LM90 HARDWARE MONITOR DRIVER
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/lm90
 +F:    Documentation/hwmon/lm90.rst
  F:    Documentation/devicetree/bindings/hwmon/lm90.txt
  F:    drivers/hwmon/lm90.c
  F:    include/dt-bindings/thermal/lm90.h
@@@ -9143,7 -9080,7 +9156,7 @@@ LM95234 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/lm95234
 +F:    Documentation/hwmon/lm95234.rst
  F:    drivers/hwmon/lm95234.c
  
  LME2510 MEDIA DRIVER
@@@ -9175,6 -9112,7 +9188,6 @@@ F:      arch/*/include/asm/spinlock*.
  F:    include/linux/rwlock*.h
  F:    include/linux/mutex*.h
  F:    include/linux/rwsem*.h
 -F:    arch/*/include/asm/rwsem.h
  F:    include/linux/seqlock.h
  F:    lib/locking*.[ch]
  F:    kernel/locking/
@@@ -9216,7 -9154,7 +9229,7 @@@ LTC4261 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/ltc4261
 +F:    Documentation/hwmon/ltc4261.rst
  F:    drivers/hwmon/ltc4261.c
  
  LTC4306 I2C MULTIPLEXER DRIVER
@@@ -9447,7 -9385,7 +9460,7 @@@ MAX16065 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/max16065
 +F:    Documentation/hwmon/max16065.rst
  F:    drivers/hwmon/max16065.c
  
  MAX2175 SDR TUNER DRIVER
@@@ -9463,14 -9401,14 +9476,14 @@@ F:   include/uapi/linux/max2175.
  MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
  L:    [email protected]
  S:    Orphan
 -F:    Documentation/hwmon/max6650
 +F:    Documentation/hwmon/max6650.rst
  F:    drivers/hwmon/max6650.c
  
  MAX6697 HARDWARE MONITOR DRIVER
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/max6697
 +F:    Documentation/hwmon/max6697.rst
  F:    Documentation/devicetree/bindings/hwmon/max6697.txt
  F:    drivers/hwmon/max6697.c
  F:    include/linux/platform_data/max6697.h
@@@ -9482,13 -9420,6 +9495,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/sound/max9860.txt
  F:    sound/soc/codecs/max9860.*
  
 +MAXBOTIX ULTRASONIC RANGER IIO DRIVER
 +M:    Andreas Klinger <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
 +F:    drivers/iio/proximity/mb1232.c
 +
  MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER
  M:    Javier Martinez Canillas <[email protected]>
  L:    [email protected]
@@@ -9857,17 -9788,9 +9870,17 @@@ F:    drivers/media/platform/mtk-vpu
  F:    Documentation/devicetree/bindings/media/mediatek-vcodec.txt
  F:    Documentation/devicetree/bindings/media/mediatek-vpu.txt
  
 +MEDIATEK MMC/SD/SDIO DRIVER
 +M:    Chaotian Jing <[email protected]>
 +S:    Maintained
 +F:    drivers/mmc/host/mtk-sd.c
 +F:    Documentation/devicetree/bindings/mmc/mtk-sd.txt
 +
  MEDIATEK MT76 WIRELESS LAN DRIVER
  M:    Felix Fietkau <[email protected]>
  M:    Lorenzo Bianconi <[email protected]>
 +R:    Ryder Lee <[email protected]>
 +R:    Roy Luo <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/net/wireless/mediatek/mt76/
@@@ -9966,6 -9889,15 +9979,6 @@@ F:     drivers/net/ethernet/mellanox/mlx5/c
  F:    drivers/net/ethernet/mellanox/mlx5/core/fpga/*
  F:    include/linux/mlx5/mlx5_ifc_fpga.h
  
 -MELLANOX ETHERNET INNOVA IPSEC DRIVER
 -R:    Boris Pismenny <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -W:    http://www.mellanox.com
 -Q:    http://patchwork.ozlabs.org/project/netdev/list/
 -F:    drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
 -F:    drivers/net/ethernet/mellanox/mlx5/core/ipsec*
 -
  MELLANOX ETHERNET SWITCH DRIVERS
  M:    Jiri Pirko <[email protected]>
  M:    Ido Schimmel <[email protected]>
@@@ -10122,7 -10054,7 +10135,7 @@@ F:   drivers/mfd/menf21bmc.
  F:    drivers/watchdog/menf21bmc_wdt.c
  F:    drivers/leds/leds-menf21bmc.c
  F:    drivers/hwmon/menf21bmc_hwmon.c
 -F:    Documentation/hwmon/menf21bmc
 +F:    Documentation/hwmon/menf21bmc.rst
  
  MEN Z069 WATCHDOG DRIVER
  M:    Johannes Thumshirn <[email protected]>
  W:    http://linux-meson.com/
  S:    Supported
  F:    drivers/media/platform/meson/ao-cec.c
 +F:    drivers/media/platform/meson/ao-cec-g12a.c
  F:    Documentation/devicetree/bindings/media/meson-ao-cec.txt
  T:    git git://linuxtv.org/media_tree.git
  
@@@ -10227,7 -10158,7 +10240,7 @@@ F:   drivers/spi/spi-at91-usart.
  F:    Documentation/devicetree/bindings/mfd/atmel-usart.txt
  
  MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
 -M:    Woojung Huh <Woojung.H[email protected]>
 +M:    Woojung Huh <woojung.h[email protected]>
  M:    Microchip Linux Driver Support <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -10516,7 -10447,7 +10529,7 @@@ F:   include/uapi/linux/meye.
  MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
  M:    Jiri Slaby <[email protected]>
  S:    Maintained
 -F:    Documentation/serial/moxa-smartio
 +F:    Documentation/serial/moxa-smartio.rst
  F:    drivers/tty/mxser.*
  
  MR800 AVERMEDIA USB FM RADIO DRIVER
@@@ -10751,7 -10682,7 +10764,7 @@@ NCT6775 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/nct6775
 +F:    Documentation/hwmon/nct6775.rst
  F:    drivers/hwmon/nct6775.c
  
  NET_FAILOVER MODULE
  L:    [email protected]
  F:    Documentation/blockdev/nbd.txt
  F:    drivers/block/nbd.c
 +F:    include/trace/events/nbd.h
  F:    include/uapi/linux/nbd.h
  
  NETWORK DROP MONITOR
@@@ -11200,16 -11130,6 +11213,16 @@@ F: Documentation/ABI/stable/sysfs-bus-n
  F:    include/linux/nvmem-consumer.h
  F:    include/linux/nvmem-provider.h
  
 +NXP FXAS21002C DRIVER
 +M:    Rui Miguel Silva <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/gyroscope/fxas21002c.txt
 +F:    drivers/iio/gyro/fxas21002c_core.c
 +F:    drivers/iio/gyro/fxas21002c.h
 +F:    drivers/iio/gyro/fxas21002c_i2c.c
 +F:    drivers/iio/gyro/fxas21002c_spi.c
 +
  NXP SGTL5000 DRIVER
  M:    Fabio Estevam <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
@@@ -11217,12 -11137,6 +11230,12 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/sound/sgtl5000.txt
  F:    sound/soc/codecs/sgtl5000*
  
 +NXP SJA1105 ETHERNET SWITCH DRIVER
 +M:    Vladimir Oltean <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/net/dsa/sja1105
 +
  NXP TDA998X DRM DRIVER
  M:    Russell King <[email protected]>
  S:    Maintained
@@@ -11702,7 -11616,7 +11715,7 @@@ F:   Documentation/devicetree/bindings/op
  OPL4 DRIVER
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 -T:    git git://git.alsa-project.org/alsa-kernel.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
  S:    Maintained
  F:    sound/drivers/opl4/
  
  S:    Orphan
  F:    drivers/i2c/busses/i2c-pasemi.c
  
 +PACKING
 +M:    Vladimir Oltean <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    lib/packing.c
 +F:    include/linux/packing.h
 +F:    Documentation/packing.txt
 +
  PADATA PARALLEL EXECUTION MECHANISM
  M:    Steffen Klassert <[email protected]>
  L:    [email protected]
@@@ -11870,7 -11776,7 +11883,7 @@@ PC87360 HARDWARE MONITORING DRIVE
  M:    Jim Cromie <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/pc87360
 +F:    Documentation/hwmon/pc87360.rst
  F:    drivers/hwmon/pc87360.c
  
  PC8736x GPIO DRIVER
@@@ -11882,7 -11788,7 +11895,7 @@@ PC87427 HARDWARE MONITORING DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/pc87427
 +F:    Documentation/hwmon/pc87427.rst
  F:    drivers/hwmon/pc87427.c
  
  PCA9532 LED DRIVER
@@@ -12282,7 -12188,6 +12295,7 @@@ F:   arch/*/kernel/*/*/perf_event*.
  F:    arch/*/include/asm/perf_event.h
  F:    arch/*/kernel/perf_callchain.c
  F:    arch/*/events/*
 +F:    arch/*/events/*/*
  F:    tools/perf/
  
  PERSONALITY HANDLING
@@@ -12451,23 -12356,23 +12464,23 @@@ S:        Maintaine
  F:    Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
  F:    Documentation/devicetree/bindings/hwmon/max31785.txt
  F:    Documentation/devicetree/bindings/hwmon/ltc2978.txt
 -F:    Documentation/hwmon/adm1275
 -F:    Documentation/hwmon/ibm-cffps
 -F:    Documentation/hwmon/ir35221
 -F:    Documentation/hwmon/lm25066
 -F:    Documentation/hwmon/ltc2978
 -F:    Documentation/hwmon/ltc3815
 -F:    Documentation/hwmon/max16064
 -F:    Documentation/hwmon/max20751
 -F:    Documentation/hwmon/max31785
 -F:    Documentation/hwmon/max34440
 -F:    Documentation/hwmon/max8688
 -F:    Documentation/hwmon/pmbus
 -F:    Documentation/hwmon/pmbus-core
 -F:    Documentation/hwmon/tps40422
 -F:    Documentation/hwmon/ucd9000
 -F:    Documentation/hwmon/ucd9200
 -F:    Documentation/hwmon/zl6100
 +F:    Documentation/hwmon/adm1275.rst
 +F:    Documentation/hwmon/ibm-cffps.rst
 +F:    Documentation/hwmon/ir35221.rst
 +F:    Documentation/hwmon/lm25066.rst
 +F:    Documentation/hwmon/ltc2978.rst
 +F:    Documentation/hwmon/ltc3815.rst
 +F:    Documentation/hwmon/max16064.rst
 +F:    Documentation/hwmon/max20751.rst
 +F:    Documentation/hwmon/max31785.rst
 +F:    Documentation/hwmon/max34440.rst
 +F:    Documentation/hwmon/max8688.rst
 +F:    Documentation/hwmon/pmbus.rst
 +F:    Documentation/hwmon/pmbus-core.rst
 +F:    Documentation/hwmon/tps40422.rst
 +F:    Documentation/hwmon/ucd9000.rst
 +F:    Documentation/hwmon/ucd9200.rst
 +F:    Documentation/hwmon/zl6100.rst
  F:    drivers/hwmon/pmbus/
  F:    include/linux/pmbus.h
  
@@@ -12523,7 -12428,7 +12536,7 @@@ M:   Mark Rutland <[email protected]
  M:    Lorenzo Pieralisi <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    drivers/firmware/psci*.c
 +F:    drivers/firmware/psci/
  F:    include/linux/psci.h
  F:    include/uapi/linux/psci.h
  
@@@ -12731,7 -12636,7 +12744,7 @@@ M:   Bartlomiej Zolnierkiewicz <b.zolnier
  L:    [email protected]
  S:    Supported
  F:    Documentation/devicetree/bindings/hwmon/pwm-fan.txt
 -F:    Documentation/hwmon/pwm-fan
 +F:    Documentation/hwmon/pwm-fan.rst
  F:    drivers/hwmon/pwm-fan.c
  
  PWM IR Transmitter
@@@ -13149,9 -13054,9 +13162,9 @@@ M:   Josh Triplett <[email protected]
  R:    Steven Rostedt <[email protected]>
  R:    Mathieu Desnoyers <[email protected]>
  R:    Lai Jiangshan <[email protected]>
 -L:    linux-kernel@vger.kernel.org
 +L:    rcu@vger.kernel.org
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    tools/testing/selftests/rcutorture
  
  RDC R-321X SoC
@@@ -13197,10 -13102,10 +13210,10 @@@ R:        Steven Rostedt <[email protected]
  R:    Mathieu Desnoyers <[email protected]>
  R:    Lai Jiangshan <[email protected]>
  R:    Joel Fernandes <[email protected]>
 -L:    linux-kernel@vger.kernel.org
 +L:    rcu@vger.kernel.org
  W:    http://www.rdrop.com/users/paulmck/RCU/
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    Documentation/RCU/
  X:    Documentation/RCU/torture.txt
  F:    include/linux/rcu*
@@@ -13426,7 -13331,7 +13439,7 @@@ ROCKETPORT DRIVE
  P:    Comtrol Corp.
  W:    http://www.comtrol.com
  S:    Maintained
 -F:    Documentation/serial/rocket.txt
 +F:    Documentation/serial/rocket.rst
  F:    drivers/tty/rocket*
  
  ROCKETPORT EXPRESS/INFINITY DRIVER
@@@ -13510,12 -13415,6 +13523,12 @@@ T: git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    drivers/net/wireless/realtek/rtlwifi/
  
 +REALTEK WIRELESS DRIVER (rtw88)
 +M:    Yan-Hsuan Chuang <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/net/wireless/realtek/rtw88/
 +
  RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
  M:    Jes Sorensen <[email protected]>
  L:    [email protected]
@@@ -14061,7 -13960,7 +14074,7 @@@ W:   https://selinuxproject.or
  W:    https://github.com/SELinuxProject
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
  S:    Supported
 -F:    include/linux/selinux*
 +F:    include/uapi/linux/selinux_netlink.h
  F:    security/selinux/
  F:    scripts/selinux/
  F:    Documentation/admin-guide/LSM/SELinux.rst
@@@ -14358,10 -14257,10 +14371,10 @@@ M:        "Paul E. McKenney" <[email protected]
  M:    Josh Triplett <[email protected]>
  R:    Steven Rostedt <[email protected]>
  R:    Mathieu Desnoyers <[email protected]>
 -L:    linux-kernel@vger.kernel.org
 +L:    rcu@vger.kernel.org
  W:    http://www.rdrop.com/users/paulmck/RCU/
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    include/linux/srcu*.h
  F:    kernel/rcu/srcu*.c
  
@@@ -14402,21 -14301,21 +14415,21 @@@ SMM665 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/smm665
 +F:    Documentation/hwmon/smm665.rst
  F:    drivers/hwmon/smm665.c
  
  SMSC EMC2103 HARDWARE MONITOR DRIVER
  M:    Steve Glendinning <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/emc2103
 +F:    Documentation/hwmon/emc2103.rst
  F:    drivers/hwmon/emc2103.c
  
  SMSC SCH5627 HARDWARE MONITOR DRIVER
  M:    Hans de Goede <[email protected]>
  L:    [email protected]
  S:    Supported
 -F:    Documentation/hwmon/sch5627
 +F:    Documentation/hwmon/sch5627.rst
  F:    drivers/hwmon/sch5627.c
  
  SMSC UFX6000 and UFX7000 USB to VGA DRIVER
@@@ -14429,7 -14328,7 +14442,7 @@@ SMSC47B397 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/smsc47b397
 +F:    Documentation/hwmon/smsc47b397.rst
  F:    drivers/hwmon/smsc47b397.c
  
  SMSC911x ETHERNET DRIVER
@@@ -14449,8 -14348,9 +14462,8 @@@ SOC-CAMERA V4L2 SUBSYSTE
  L:    [email protected]
  T:    git git://linuxtv.org/media_tree.git
  S:    Orphan
 -F:    include/media/soc*
 -F:    drivers/media/i2c/soc_camera/
 -F:    drivers/media/platform/soc_camera/
 +F:    include/media/soc_camera.h
 +F:    drivers/staging/media/soc_camera/
  
  SOCIONEXT SYNQUACER I2C DRIVER
  M:    Ard Biesheuvel <[email protected]>
@@@ -14586,15 -14486,16 +14599,15 @@@ T:        git git://linuxtv.org/media_tree.gi
  S:    Maintained
  F:    drivers/media/i2c/imx355.c
  
 -SONY MEMORYSTICK CARD SUPPORT
 -M:    Alex Dubov <[email protected]>
 -W:    http://tifmxx.berlios.de/
 -S:    Maintained
 -F:    drivers/memstick/host/tifm_ms.c
 -
 -SONY MEMORYSTICK STANDARD SUPPORT
 +SONY MEMORYSTICK SUBSYSTEM
  M:    Maxim Levitsky <[email protected]>
 +M:    Alex Dubov <[email protected]>
 +M:    Ulf Hansson <[email protected]>
 +L:    [email protected]
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
  S:    Maintained
 -F:    drivers/memstick/core/ms_block.*
 +F:    drivers/memstick/
 +F:    include/linux/memstick.h
  
  SONY VAIO CONTROL DEVICE DRIVER
  M:    Mattia Dongili <[email protected]>
@@@ -14612,6 -14513,7 +14625,6 @@@ M:   Takashi Iwai <[email protected]
  L:    [email protected] (moderated for non-subscribers)
  W:    http://www.alsa-project.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
 -T:    git git://git.alsa-project.org/alsa-kernel.git
  Q:    http://patchwork.kernel.org/project/alsa-devel/list/
  S:    Maintained
  F:    Documentation/sound/
@@@ -14789,14 -14691,6 +14802,14 @@@ S: Maintaine
  F:    drivers/iio/imu/st_lsm6dsx/
  F:    Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
  
 +ST MIPID02 CSI-2 TO PARALLEL BRIDGE DRIVER
 +M:    Mickael Guene <[email protected]>
 +L:    [email protected]
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/i2c/st-mipid02.c
 +F:    Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
 +
  ST STM32 I2C/SMBUS DRIVER
  M:    Pierre-Yves MORDRET <[email protected]>
  L:    [email protected]
@@@ -15433,11 -15327,6 +15446,11 @@@ M: Laxman Dewangan <[email protected]
  S:    Supported
  F:    drivers/spi/spi-tegra*
  
 +TEGRA XUSB PADCTL DRIVER
 +M:    JC Kuo <[email protected]>
 +S:    Supported
 +F:    drivers/phy/tegra/xusb*
 +
  TEHUTI ETHERNET DRIVER
  M:    Andy Gospodarek <[email protected]>
  L:    [email protected]
@@@ -15631,11 -15520,9 +15644,11 @@@ S: Maintaine
  F:    drivers/net/ethernet/ti/cpsw*
  F:    drivers/net/ethernet/ti/davinci*
  
 -TI FLASH MEDIA INTERFACE DRIVER
 +TI FLASH MEDIA MEMORYSTICK/MMC DRIVERS
  M:    Alex Dubov <[email protected]>
  S:    Maintained
 +W:    http://tifmxx.berlios.de/
 +F:    drivers/memstick/host/tifm_ms.c
  F:    drivers/misc/tifm*
  F:    drivers/mmc/host/tifm_sd.c
  F:    include/linux/tifm.h
@@@ -15787,7 -15674,7 +15800,7 @@@ TMP401 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/tmp401
 +F:    Documentation/hwmon/tmp401.rst
  F:    drivers/hwmon/tmp401.c
  
  TMPFS (SHMEM FILESYSTEM)
@@@ -15820,7 -15707,7 +15833,7 @@@ M:   "Paul E. McKenney" <[email protected]
  M:    Josh Triplett <[email protected]>
  L:    [email protected]
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    Documentation/RCU/torture.txt
  F:    kernel/torture.c
  F:    kernel/rcu/rcutorture.c
@@@ -16062,12 -15949,6 +16075,12 @@@ F: drivers/uwb
  F:    include/linux/uwb.h
  F:    include/linux/uwb/
  
 +UNICODE SUBSYSTEM:
 +M:    Gabriel Krisman Bertazi <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    fs/unicode/
 +
  UNICORE32 ARCHITECTURE:
  M:    Guan Xuetao <[email protected]>
  W:    http://mprc.pku.edu.cn/~guanxuetao/linux
  S:    Supported
  F:    drivers/scsi/ufs/*dwc*
  
 +UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
 +M:    Stanley Chu <[email protected]>
 +L:    [email protected]
 +L:    [email protected] (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/scsi/ufs/ufs-mediatek*
 +
  UNSORTED BLOCK IMAGES (UBI)
  M:    Artem Bityutskiy <[email protected]>
  M:    Richard Weinberger <[email protected]>
  S:    Maintained
  F:    drivers/usb/roles/intel-xhci-usb-role-switch.c
  
 +USB IP DRIVER FOR HISILICON KIRIN
 +M:    Yu Chen <[email protected]>
 +M:    Binghui Wang <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt
 +F:    drivers/phy/hisilicon/phy-hi3660-usb3.c
 +
  USB ISP116X DRIVER
  M:    Olav Kongas <[email protected]>
  L:    [email protected]
@@@ -16257,7 -16123,7 +16270,7 @@@ F:   drivers/usb/storage
  USB MIDI DRIVER
  M:    Clemens Ladisch <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
 -T:    git git://git.alsa-project.org/alsa-kernel.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
  S:    Maintained
  F:    sound/usb/midi.*
  
@@@ -16846,7 -16712,7 +16859,7 @@@ VT1211 HARDWARE MONITOR DRIVE
  M:    Juerg Haefliger <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/vt1211
 +F:    Documentation/hwmon/vt1211.rst
  F:    drivers/hwmon/vt1211.c
  
  VT8231 HARDWARE MONITOR DRIVER
@@@ -16874,14 -16740,14 +16887,14 @@@ W83791D HARDWARE MONITORING DRIVE
  M:    Marc Hulsman <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/w83791d
 +F:    Documentation/hwmon/w83791d.rst
  F:    drivers/hwmon/w83791d.c
  
  W83793 HARDWARE MONITORING DRIVER
  M:    Rudolf Marek <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/hwmon/w83793
 +F:    Documentation/hwmon/w83793.rst
  F:    drivers/hwmon/w83793.c
  
  W83795 HARDWARE MONITORING DRIVER
  T:    git https://github.com/CirrusLogic/linux-drivers.git
  W:    https://github.com/CirrusLogic/linux-drivers/wiki
  S:    Supported
 -F:    Documentation/hwmon/wm83??
 +F:    Documentation/hwmon/wm83??.rst
  F:    Documentation/devicetree/bindings/extcon/extcon-arizona.txt
  F:    Documentation/devicetree/bindings/regulator/arizona-regulator.txt
  F:    Documentation/devicetree/bindings/mfd/arizona.txt
@@@ -17080,7 -16946,7 +17093,7 @@@ M:   Tony Luck <[email protected]
  M:    Borislav Petkov <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    arch/x86/kernel/cpu/mcheck/*
 +F:    arch/x86/kernel/cpu/mce/*
  
  X86 MICROCODE UPDATE SUPPORT
  M:    Borislav Petkov <[email protected]>
index d318bab25860fb603fdff7fbe5ece8509593801f,e549be36dffe91573c90b282dab038fd1f313f56..cbfbea49f126cd108f95cd4dda2bafa1cb4e454e
@@@ -25,6 -25,7 +25,6 @@@ config INFINIBAND_USER_MA
  
  config INFINIBAND_USER_ACCESS
        tristate "InfiniBand userspace access (verbs and CM)"
 -      select ANON_INODES
        depends on MMU
        ---help---
          Userspace InfiniBand access support.  This enables the
@@@ -93,6 -94,7 +93,7 @@@ source "drivers/infiniband/hw/mthca/Kco
  source "drivers/infiniband/hw/qib/Kconfig"
  source "drivers/infiniband/hw/cxgb3/Kconfig"
  source "drivers/infiniband/hw/cxgb4/Kconfig"
+ source "drivers/infiniband/hw/efa/Kconfig"
  source "drivers/infiniband/hw/i40iw/Kconfig"
  source "drivers/infiniband/hw/mlx4/Kconfig"
  source "drivers/infiniband/hw/mlx5/Kconfig"
index 744b6ec0acb0b2bf4323d636f21a3494e2f8fc14,2b791ce7597f20f458bd36c46dc4decc0071321c..ba01b90c04e7756391c966b900a87f52adc69e44
  #include <net/neighbour.h>
  #include <net/route.h>
  #include <net/netevent.h>
 -#include <net/addrconf.h>
 +#include <net/ipv6_stubs.h>
  #include <net/ip6_route.h>
  #include <rdma/ib_addr.h>
+ #include <rdma/ib_cache.h>
  #include <rdma/ib_sa.h>
  #include <rdma/ib.h>
  #include <rdma/rdma_netlink.h>
@@@ -86,8 -87,8 +87,8 @@@ static inline bool ib_nl_is_good_ip_res
        if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
                return false;
  
 -      ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
 -                      nlmsg_len(nlh), ib_nl_addr_policy, NULL);
 +      ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
 +                                 nlmsg_len(nlh), ib_nl_addr_policy, NULL);
        if (ret)
                return false;
  
@@@ -351,7 -352,7 +352,7 @@@ static bool has_gateway(const struct ds
  
        if (family == AF_INET) {
                rt = container_of(dst, struct rtable, dst);
 -              return rt->rt_uses_gateway;
 +              return rt->rt_gw_family == AF_INET;
        }
  
        rt6 = container_of(dst, struct rt6_info, dst);
index 85324012bf078de90f0f483e1026c11fa95a1a4b,bced945a456d3eba62af3e339e3ebd6d2ce1ff2f..98eadd3089cead9a73b54f876b7ffdfa202f441c
@@@ -116,6 -116,10 +116,10 @@@ static const struct nla_policy nldev_po
        [RDMA_NLDEV_ATTR_RES_CTXN]              = { .type = NLA_U32 },
        [RDMA_NLDEV_ATTR_LINK_TYPE]             = { .type = NLA_NUL_STRING,
                                    .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
+       [RDMA_NLDEV_SYS_ATTR_NETNS_MODE]        = { .type = NLA_U8 },
+       [RDMA_NLDEV_ATTR_DEV_PROTOCOL]          = { .type = NLA_NUL_STRING,
+                                   .len = RDMA_NLDEV_ATTR_ENTRY_STRLEN },
+       [RDMA_NLDEV_NET_NS_FD]                  = { .type = NLA_U32 },
  };
  
  static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@@ -198,6 -202,8 +202,8 @@@ static int fill_nldev_handle(struct sk_
  static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
  {
        char fw[IB_FW_VERSION_NAME_MAX];
+       int ret = 0;
+       u8 port;
  
        if (fill_nldev_handle(msg, device))
                return -EMSGSIZE;
                return -EMSGSIZE;
        if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
                return -EMSGSIZE;
-       return 0;
+       /*
+        * Link type is determined on first port and mlx4 device
+        * which can potentially have two different link type for the same
+        * IB device is considered as better to be avoided in the future,
+        */
+       port = rdma_start_port(device);
+       if (rdma_cap_opa_mad(device, port))
+               ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa");
+       else if (rdma_protocol_ib(device, port))
+               ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib");
+       else if (rdma_protocol_iwarp(device, port))
+               ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw");
+       else if (rdma_protocol_roce(device, port))
+               ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce");
+       else if (rdma_protocol_usnic(device, port))
+               ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL,
+                                    "usnic");
+       return ret;
  }
  
  static int fill_port_info(struct sk_buff *msg,
@@@ -292,8 -316,7 +316,8 @@@ static int fill_res_info_entry(struct s
  {
        struct nlattr *entry_attr;
  
 -      entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
 +      entry_attr = nla_nest_start_noflag(msg,
 +                                         RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
        if (!entry_attr)
                return -EMSGSIZE;
  
@@@ -328,7 -351,7 +352,7 @@@ static int fill_res_info(struct sk_buf
        if (fill_nldev_handle(msg, device))
                return -EMSGSIZE;
  
 -      table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
 +      table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
        if (!table_attr)
                return -EMSGSIZE;
  
@@@ -608,14 -631,14 +632,14 @@@ static int nldev_get_doit(struct sk_buf
        u32 index;
        int err;
  
 -      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, extack);
 +      err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
  
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
@@@ -653,13 -676,13 +677,13 @@@ static int nldev_set_doit(struct sk_buf
        u32 index;
        int err;
  
 -      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
 -                        extack);
 +      err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
                nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
                            IB_DEVICE_NAME_MAX);
                err = ib_device_rename(device, name);
+               goto done;
        }
  
+       if (tb[RDMA_NLDEV_NET_NS_FD]) {
+               u32 ns_fd;
+               ns_fd = nla_get_u32(tb[RDMA_NLDEV_NET_NS_FD]);
+               err = ib_device_set_netns_put(skb, device, ns_fd);
+               goto put_done;
+       }
+ done:
        ib_device_put(device);
+ put_done:
        return err;
  }
  
@@@ -707,7 -741,7 +742,7 @@@ static int nldev_get_dumpit(struct sk_b
  {
        /*
         * There is no need to take lock, because
-        * we are relying on ib_core's lists_rwsem
+        * we are relying on ib_core's locking.
         */
        return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
  }
@@@ -722,15 -756,15 +757,15 @@@ static int nldev_port_get_doit(struct s
        u32 port;
        int err;
  
 -      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, extack);
 +      err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (err ||
            !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
            !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
@@@ -778,13 -812,13 +813,13 @@@ static int nldev_port_get_dumpit(struc
        int err;
        unsigned int p;
  
 -      err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, NULL);
 +      err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, NULL);
        if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
                return -EINVAL;
  
        ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(ifindex);
+       device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
        if (!device)
                return -EINVAL;
  
@@@ -833,13 -867,13 +868,13 @@@ static int nldev_res_get_doit(struct sk
        u32 index;
        int ret;
  
 -      ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, extack);
 +      ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
@@@ -887,7 -921,6 +922,6 @@@ static int _nldev_res_get_dumpit(struc
                nlmsg_cancel(skb, nlh);
                goto out;
        }
        nlmsg_end(skb, nlh);
  
        idx++;
@@@ -982,13 -1015,13 +1016,13 @@@ static int res_get_common_doit(struct s
        struct sk_buff *msg;
        int ret;
  
 -      ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, extack);
 +      ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
@@@ -1071,8 -1104,8 +1105,8 @@@ static int res_get_common_dumpit(struc
        u32 index, port = 0;
        bool filled = false;
  
 -      err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, NULL);
 +      err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, NULL);
        /*
         * Right now, we are expecting the device index to get res information,
         * but it is possible to extend this code to return all devices in
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
                goto err;
        }
  
 -      table_attr = nla_nest_start(skb, fe->nldev_attr);
 +      table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
        if (!table_attr) {
                ret = -EMSGSIZE;
                goto err;
  
                filled = true;
  
 -              entry_attr = nla_nest_start(skb, fe->entry);
 +              entry_attr = nla_nest_start_noflag(skb, fe->entry);
                if (!entry_attr) {
                        ret = -EMSGSIZE;
                        rdma_restrack_put(res);
@@@ -1250,8 -1283,8 +1284,8 @@@ static int nldev_newlink(struct sk_buf
        char type[IFNAMSIZ];
        int err;
  
 -      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, extack);
 +      err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
            !tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
                return -EINVAL;
@@@ -1294,13 -1327,13 +1328,13 @@@ static int nldev_dellink(struct sk_buf
        u32 index;
        int err;
  
 -      err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 -                        nldev_policy, extack);
 +      err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
 +                                   nldev_policy, extack);
        if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
                return -EINVAL;
  
        index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
-       device = ib_device_get_by_index(index);
+       device = ib_device_get_by_index(sock_net(skb->sk), index);
        if (!device)
                return -EINVAL;
  
        return 0;
  }
  
+ static int nldev_get_sys_get_dumpit(struct sk_buff *skb,
+                                   struct netlink_callback *cb)
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       struct nlmsghdr *nlh;
+       int err;
+       err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+                         nldev_policy, NULL);
+       if (err)
+               return err;
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                       RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+                                        RDMA_NLDEV_CMD_SYS_GET),
+                       0, 0);
+       err = nla_put_u8(skb, RDMA_NLDEV_SYS_ATTR_NETNS_MODE,
+                        (u8)ib_devices_shared_netns);
+       if (err) {
+               nlmsg_cancel(skb, nlh);
+               return err;
+       }
+       nlmsg_end(skb, nlh);
+       return skb->len;
+ }
+ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+                                 struct netlink_ext_ack *extack)
+ {
+       struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+       u8 enable;
+       int err;
+       err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+                         nldev_policy, extack);
+       if (err || !tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE])
+               return -EINVAL;
+       enable = nla_get_u8(tb[RDMA_NLDEV_SYS_ATTR_NETNS_MODE]);
+       /* Only 0 and 1 are supported */
+       if (enable > 1)
+               return -EINVAL;
+       err = rdma_compatdev_set(enable);
+       return err;
+ }
  static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
        [RDMA_NLDEV_CMD_GET] = {
                .doit = nldev_get_doit,
                .doit = nldev_res_get_pd_doit,
                .dump = nldev_res_get_pd_dumpit,
        },
+       [RDMA_NLDEV_CMD_SYS_GET] = {
+               .dump = nldev_get_sys_get_dumpit,
+       },
+       [RDMA_NLDEV_CMD_SYS_SET] = {
+               .doit = nldev_set_sys_set_doit,
+               .flags = RDMA_NL_ADMIN_PERM,
+       },
  };
  
  void __init nldev_init(void)
index bb534959abf0a575c3e69da9f6389a92d9d1c2b1,114f890ab425854339bf0c57cd8a5393d668dc82..7d8071c7e56428c22f04341881fc02c4cb92106c
@@@ -40,7 -40,7 +40,7 @@@
  #include <linux/slab.h>
  #include <linux/dma-mapping.h>
  #include <linux/kref.h>
- #include <linux/idr.h>
+ #include <linux/xarray.h>
  #include <linux/workqueue.h>
  #include <uapi/linux/if_ether.h>
  #include <rdma/ib_pack.h>
@@@ -183,8 -183,7 +183,7 @@@ static struct ib_client sa_client = 
        .remove = ib_sa_remove_one
  };
  
- static DEFINE_SPINLOCK(idr_lock);
- static DEFINE_IDR(query_idr);
+ static DEFINE_XARRAY_FLAGS(queries, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
  
  static DEFINE_SPINLOCK(tid_lock);
  static u32 tid;
@@@ -1028,8 -1027,8 +1027,8 @@@ int ib_nl_handle_set_timeout(struct sk_
            !(NETLINK_CB(skb).sk))
                return -EPERM;
  
 -      ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
 -                      nlmsg_len(nlh), ib_nl_policy, NULL);
 +      ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
 +                                 nlmsg_len(nlh), ib_nl_policy, NULL);
        attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
        if (ret || !attr)
                goto settimeout_out;
@@@ -1080,8 -1079,8 +1079,8 @@@ static inline int ib_nl_is_good_resolve
        if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
                return 0;
  
 -      ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
 -                      nlmsg_len(nlh), ib_nl_policy, NULL);
 +      ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
 +                                 nlmsg_len(nlh), ib_nl_policy, NULL);
        if (ret)
                return 0;
  
@@@ -1180,14 -1179,14 +1179,14 @@@ void ib_sa_cancel_query(int id, struct 
        struct ib_mad_agent *agent;
        struct ib_mad_send_buf *mad_buf;
  
-       spin_lock_irqsave(&idr_lock, flags);
-       if (idr_find(&query_idr, id) != query) {
-               spin_unlock_irqrestore(&idr_lock, flags);
+       xa_lock_irqsave(&queries, flags);
+       if (xa_load(&queries, id) != query) {
+               xa_unlock_irqrestore(&queries, flags);
                return;
        }
        agent = query->port->agent;
        mad_buf = query->mad_buf;
-       spin_unlock_irqrestore(&idr_lock, flags);
+       xa_unlock_irqrestore(&queries, flags);
  
        /*
         * If the query is still on the netlink request list, schedule
@@@ -1363,21 -1362,14 +1362,14 @@@ static void init_mad(struct ib_sa_quer
  static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
                    gfp_t gfp_mask)
  {
-       bool preload = gfpflags_allow_blocking(gfp_mask);
        unsigned long flags;
        int ret, id;
  
-       if (preload)
-               idr_preload(gfp_mask);
-       spin_lock_irqsave(&idr_lock, flags);
-       id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
-       spin_unlock_irqrestore(&idr_lock, flags);
-       if (preload)
-               idr_preload_end();
-       if (id < 0)
-               return id;
+       xa_lock_irqsave(&queries, flags);
+       ret = __xa_alloc(&queries, &id, query, xa_limit_32b, gfp_mask);
+       xa_unlock_irqrestore(&queries, flags);
+       if (ret < 0)
+               return ret;
  
        query->mad_buf->timeout_ms  = timeout_ms;
        query->mad_buf->context[0] = query;
  
        ret = ib_post_send_mad(query->mad_buf, NULL);
        if (ret) {
-               spin_lock_irqsave(&idr_lock, flags);
-               idr_remove(&query_idr, id);
-               spin_unlock_irqrestore(&idr_lock, flags);
+               xa_lock_irqsave(&queries, flags);
+               __xa_erase(&queries, id);
+               xa_unlock_irqrestore(&queries, flags);
        }
  
        /*
@@@ -2188,9 -2180,9 +2180,9 @@@ static void send_handler(struct ib_mad_
                        break;
                }
  
-       spin_lock_irqsave(&idr_lock, flags);
-       idr_remove(&query_idr, query->id);
-       spin_unlock_irqrestore(&idr_lock, flags);
+       xa_lock_irqsave(&queries, flags);
+       __xa_erase(&queries, query->id);
+       xa_unlock_irqrestore(&queries, flags);
  
        free_mad(query);
        if (query->client)
@@@ -2475,5 -2467,5 +2467,5 @@@ void ib_sa_cleanup(void
        destroy_workqueue(ib_nl_wq);
        mcast_cleanup();
        ib_unregister_client(&sa_client);
-       idr_destroy(&query_idr);
+       WARN_ON(!xa_empty(&queries));
  }
index 65c3230f56631925402ea74f61fec32977a18ab6,94fac8fda75facfd9f125d0188330b18cfaf5a06..8e7da2d41fd80f11ca7e1ae7bc49d70887f689f5
@@@ -42,7 -42,7 +42,7 @@@
  #include <linux/file.h>
  #include <linux/mount.h>
  #include <linux/cdev.h>
- #include <linux/idr.h>
+ #include <linux/xarray.h>
  #include <linux/mutex.h>
  #include <linux/slab.h>
  
@@@ -125,23 -125,22 +125,22 @@@ static struct ib_client ucm_client = 
        .remove = ib_ucm_remove_one
  };
  
- static DEFINE_MUTEX(ctx_id_mutex);
- static DEFINE_IDR(ctx_id_table);
+ static DEFINE_XARRAY_ALLOC(ctx_id_table);
  static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
  
  static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
  {
        struct ib_ucm_context *ctx;
  
-       mutex_lock(&ctx_id_mutex);
-       ctx = idr_find(&ctx_id_table, id);
+       xa_lock(&ctx_id_table);
+       ctx = xa_load(&ctx_id_table, id);
        if (!ctx)
                ctx = ERR_PTR(-ENOENT);
        else if (ctx->file != file)
                ctx = ERR_PTR(-EINVAL);
        else
                atomic_inc(&ctx->ref);
-       mutex_unlock(&ctx_id_mutex);
+       xa_unlock(&ctx_id_table);
  
        return ctx;
  }
@@@ -194,10 -193,7 +193,7 @@@ static struct ib_ucm_context *ib_ucm_ct
        ctx->file = file;
        INIT_LIST_HEAD(&ctx->events);
  
-       mutex_lock(&ctx_id_mutex);
-       ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
-       mutex_unlock(&ctx_id_mutex);
-       if (ctx->id < 0)
+       if (xa_alloc(&ctx_id_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
                goto error;
  
        list_add_tail(&ctx->file_list, &file->ctxs);
@@@ -514,9 -510,7 +510,7 @@@ static ssize_t ib_ucm_create_id(struct 
  err2:
        ib_destroy_cm_id(ctx->cm_id);
  err1:
-       mutex_lock(&ctx_id_mutex);
-       idr_remove(&ctx_id_table, ctx->id);
-       mutex_unlock(&ctx_id_mutex);
+       xa_erase(&ctx_id_table, ctx->id);
        kfree(ctx);
        return result;
  }
@@@ -536,15 -530,15 +530,15 @@@ static ssize_t ib_ucm_destroy_id(struc
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
  
-       mutex_lock(&ctx_id_mutex);
-       ctx = idr_find(&ctx_id_table, cmd.id);
+       xa_lock(&ctx_id_table);
+       ctx = xa_load(&ctx_id_table, cmd.id);
        if (!ctx)
                ctx = ERR_PTR(-ENOENT);
        else if (ctx->file != file)
                ctx = ERR_PTR(-EINVAL);
        else
-               idr_remove(&ctx_id_table, ctx->id);
-       mutex_unlock(&ctx_id_mutex);
+               __xa_erase(&ctx_id_table, ctx->id);
+       xa_unlock(&ctx_id_table);
  
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
@@@ -1175,7 -1169,7 +1169,7 @@@ static int ib_ucm_open(struct inode *in
        file->filp = filp;
        file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev);
  
 -      return nonseekable_open(inode, filp);
 +      return stream_open(inode, filp);
  }
  
  static int ib_ucm_close(struct inode *inode, struct file *filp)
                                 struct ib_ucm_context, file_list);
                mutex_unlock(&file->file_mutex);
  
-               mutex_lock(&ctx_id_mutex);
-               idr_remove(&ctx_id_table, ctx->id);
-               mutex_unlock(&ctx_id_mutex);
+               xa_erase(&ctx_id_table, ctx->id);
                ib_destroy_cm_id(ctx->cm_id);
                ib_ucm_cleanup_events(ctx);
                kfree(ctx);
@@@ -1352,7 -1343,7 +1343,7 @@@ static void __exit ib_ucm_cleanup(void
        class_remove_file(&cm_class, &class_attr_abi_version.attr);
        unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_NUM_FIXED_MINOR);
        unregister_chrdev_region(dynamic_ucm_dev, IB_UCM_NUM_DYNAMIC_MINOR);
-       idr_destroy(&ctx_id_table);
+       WARN_ON(!xa_empty(&ctx_id_table));
  }
  
  module_init(ib_ucm_init);
index b58b07c03cfb6487ca7173b7d6371cbd3e53b4c8,56aa342061100c64c4760ff9ba494e1d996df94e..671f07ba1fad66e8300d93c9b85a65091bcbc9ae
@@@ -129,6 -129,9 +129,9 @@@ struct ib_umad_packet 
        struct ib_user_mad mad;
  };
  
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/ib_umad.h>
  static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
  static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
                                   IB_UMAD_NUM_FIXED_MINOR;
@@@ -334,6 -337,9 +337,9 @@@ static ssize_t copy_recv_mad(struct ib_
                                return -EFAULT;
                }
        }
+       trace_ib_umad_read_recv(file, &packet->mad.hdr, &recv_buf->mad->mad_hdr);
        return hdr_size(file) + packet->length;
  }
  
@@@ -353,6 -359,9 +359,9 @@@ static ssize_t copy_send_mad(struct ib_
        if (copy_to_user(buf, packet->mad.data, packet->length))
                return -EFAULT;
  
+       trace_ib_umad_read_send(file, &packet->mad.hdr,
+                               (struct ib_mad_hdr *)&packet->mad.data);
        return size;
  }
  
@@@ -508,6 -517,9 +517,9 @@@ static ssize_t ib_umad_write(struct fil
  
        mutex_lock(&file->mutex);
  
+       trace_ib_umad_write(file, &packet->mad.hdr,
+                           (struct ib_mad_hdr *)&packet->mad.data);
        agent = __get_agent(file, packet->mad.hdr.id);
        if (!agent) {
                ret = -EINVAL;
@@@ -968,6 -980,11 +980,11 @@@ static int ib_umad_open(struct inode *i
                goto out;
        }
  
+       if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
+               ret = -EPERM;
+               goto out;
+       }
        file = kzalloc(sizeof(*file), GFP_KERNEL);
        if (!file) {
                ret = -ENOMEM;
  
        list_add_tail(&file->port_list, &port->file_list);
  
 -      nonseekable_open(inode, filp);
 +      stream_open(inode, filp);
  out:
        mutex_unlock(&port->file_mutex);
        return ret;
@@@ -1061,6 -1078,11 +1078,11 @@@ static int ib_umad_sm_open(struct inod
                }
        }
  
+       if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) {
+               ret = -EPERM;
+               goto err_up_sem;
+       }
        ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
        if (ret)
                goto err_up_sem;
index 8b43dd96d3b20e030956fbf441b03041ae0e99f3,d01a2f861119f30768551d099902af6ff5b8db46..84a5e9a6d483e8933502f76e1004267fef509063
@@@ -723,7 -723,7 +723,7 @@@ static ssize_t ib_uverbs_write(struct f
                         * then the command request structure starts
                         * with a '__aligned u64 response' member.
                         */
-                       ret = get_user(response, (const u64 *)buf);
+                       ret = get_user(response, (const u64 __user *)buf);
                        if (ret)
                                goto out_unlock;
  
@@@ -895,7 -895,7 +895,7 @@@ static vm_fault_t rdma_umap_fault(struc
  
        /* Read only pages can just use the system zero page. */
        if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
 -              vmf->page = ZERO_PAGE(vmf->vm_start);
 +              vmf->page = ZERO_PAGE(vmf->address);
                get_page(vmf->page);
                return 0;
        }
@@@ -926,43 -926,32 +926,32 @@@ static const struct vm_operations_struc
        .fault = rdma_umap_fault,
  };
  
- static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
-                                                struct vm_area_struct *vma,
-                                                unsigned long size)
+ /*
+  * Map IO memory into a process. This is to be called by drivers as part of
+  * their mmap() functions if they wish to send something like PCI-E BAR memory
+  * to userspace.
+  */
+ int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+                     unsigned long pfn, unsigned long size, pgprot_t prot)
  {
        struct ib_uverbs_file *ufile = ucontext->ufile;
        struct rdma_umap_priv *priv;
  
        if (!(vma->vm_flags & VM_SHARED))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
  
        if (vma->vm_end - vma->vm_start != size)
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
  
        /* Driver is using this wrong, must be called by ib_uverbs_mmap */
        if (WARN_ON(!vma->vm_file ||
                    vma->vm_file->private_data != ufile))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
        lockdep_assert_held(&ufile->device->disassociate_srcu);
  
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
-               return ERR_PTR(-ENOMEM);
-       return priv;
- }
- /*
-  * Map IO memory into a process. This is to be called by drivers as part of
-  * their mmap() functions if they wish to send something like PCI-E BAR memory
-  * to userspace.
-  */
- int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
-                     unsigned long pfn, unsigned long size, pgprot_t prot)
- {
-       struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
-       if (IS_ERR(priv))
-               return PTR_ERR(priv);
+               return -ENOMEM;
  
        vma->vm_page_prot = prot;
        if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
  }
  EXPORT_SYMBOL(rdma_user_mmap_io);
  
- /*
-  * The page case is here for a slightly different reason, the driver expects
-  * to be able to free the page it is sharing to user space when it destroys
-  * its ucontext, which means we need to zap the user space references.
-  *
-  * We could handle this differently by providing an API to allocate a shared
-  * page and then only freeing the shared page when the last ufile is
-  * destroyed.
-  */
- int rdma_user_mmap_page(struct ib_ucontext *ucontext,
-                       struct vm_area_struct *vma, struct page *page,
-                       unsigned long size)
- {
-       struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
-       if (IS_ERR(priv))
-               return PTR_ERR(priv);
-       if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
-                           vma->vm_page_prot)) {
-               kfree(priv);
-               return -EAGAIN;
-       }
-       rdma_umap_priv_init(priv, vma);
-       return 0;
- }
- EXPORT_SYMBOL(rdma_user_mmap_page);
  void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
  {
        struct rdma_umap_priv *priv, *next_priv;
                 * will only be one mm, so no big deal.
                 */
                down_read(&mm->mmap_sem);
 +              if (!mmget_still_valid(mm))
 +                      goto skip_mm;
                mutex_lock(&ufile->umap_lock);
                list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
                                          list) {
                                     vma->vm_end - vma->vm_start);
                }
                mutex_unlock(&ufile->umap_lock);
 +      skip_mm:
                up_read(&mm->mmap_sem);
                mmput(mm);
        }
@@@ -1094,6 -1051,11 +1054,11 @@@ static int ib_uverbs_open(struct inode 
                goto err;
        }
  
+       if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) {
+               ret = -EPERM;
+               goto err;
+       }
        /* In case IB device supports disassociate ucontext, there is no hard
         * dependency between uverbs device and its low level device.
         */
  
        setup_ufile_idr_uobject(file);
  
 -      return nonseekable_open(inode, filp);
 +      return stream_open(inode, filp);
  
  err_module:
        module_put(ib_dev->owner);
index addefae16c9c9ec1a21a403ed108b7e6da1ed86b,ec2df39c2f602d5e23ae3dc14ede77e2392368ba..310105d4e3de04e6169afcb5947923b81f89696e
@@@ -4104,6 -4104,9 +4104,9 @@@ def_access_ibp_counter(seq_naks)
  
  static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
  [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
+ [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
+ [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
+ [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
  [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
                        CNTR_NORMAL),
  [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
@@@ -8365,6 -8368,7 +8368,6 @@@ static inline void clear_recv_intr(stru
        struct hfi1_devdata *dd = rcd->dd;
        u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
  
 -      mmiowb();       /* make sure everything before is written */
        write_csr(dd, addr, rcd->imask);
        /* force the above write on the chip and get a value back */
        (void)read_csr(dd, addr);
@@@ -11802,10 -11806,12 +11805,10 @@@ void update_usrhead(struct hfi1_ctxtdat
                        << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
                write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
        }
 -      mmiowb();
        reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
                (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
                        << RCV_HDR_HEAD_HEAD_SHIFT);
        write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
 -      mmiowb();
  }
  
  u32 hdrqempty(struct hfi1_ctxtdata *rcd)
@@@ -13294,15 -13300,18 +13297,18 @@@ static int set_up_context_variables(str
        /*
         * The RMT entries are currently allocated as shown below:
         * 1. QOS (0 to 128 entries);
-        * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
+        * 2. FECN (num_kernel_context - 1 + num_user_contexts +
+        *    num_vnic_contexts);
         * 3. VNIC (num_vnic_contexts).
-        * It should be noted that PSM FECN oversubscribe num_vnic_contexts
+        * It should be noted that FECN oversubscribe num_vnic_contexts
         * entries of RMT because both VNIC and PSM could allocate any receive
         * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
         * and PSM FECN must reserve an RMT entry for each possible PSM receive
         * context.
         */
        rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
+       if (HFI1_CAP_IS_KSET(TID_RDMA))
+               rmt_count += num_kernel_contexts - 1;
        if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
                user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
                dd_dev_err(dd,
        init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
  }
  
- static void init_user_fecn_handling(struct hfi1_devdata *dd,
-                                   struct rsm_map_table *rmt)
+ static void init_fecn_handling(struct hfi1_devdata *dd,
+                              struct rsm_map_table *rmt)
  {
        struct rsm_rule_data rrd;
        u64 reg;
-       int i, idx, regoff, regidx;
+       int i, idx, regoff, regidx, start;
        u8 offset;
        u32 total_cnt;
  
+       if (HFI1_CAP_IS_KSET(TID_RDMA))
+               /* Exclude context 0 */
+               start = 1;
+       else
+               start = dd->first_dyn_alloc_ctxt;
+       total_cnt = dd->num_rcv_contexts - start;
        /* there needs to be enough room in the map table */
-       total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
        if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
-               dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
+               dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
                return;
        }
  
        /*
         * RSM will extract the destination context as an index into the
         * map table.  The destination contexts are a sequential block
-        * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
+        * in the range start...num_rcv_contexts-1 (inclusive).
         * Map entries are accessed as offset + extracted value.  Adjust
         * the added offset so this sequence can be placed anywhere in
         * the table - as long as the entries themselves do not wrap.
         * There are only enough bits in offset for the table size, so
         * start with that to allow for a "negative" offset.
         */
-       offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
-                                               (int)dd->first_dyn_alloc_ctxt);
+       offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
  
-       for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
-                               i < dd->num_rcv_contexts; i++, idx++) {
+       for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
+            i++, idx++) {
                /* replace with identity mapping */
                regoff = (idx % 8) * 8;
                regidx = idx / 8;
@@@ -14437,7 -14452,7 +14449,7 @@@ static void init_rxe(struct hfi1_devdat
        rmt = alloc_rsm_map_table(dd);
        /* set up QOS, including the QPN map table */
        init_qos(dd, rmt);
-       init_user_fecn_handling(dd, rmt);
+       init_fecn_handling(dd, rmt);
        complete_rsm_map_table(dd, rmt);
        /* record number of used rsm map entries for vnic */
        dd->vnic.rmt_start = rmt->used;
@@@ -14663,8 -14678,8 +14675,8 @@@ void hfi1_start_cleanup(struct hfi1_dev
   */
  static int init_asic_data(struct hfi1_devdata *dd)
  {
-       unsigned long flags;
-       struct hfi1_devdata *tmp, *peer = NULL;
+       unsigned long index;
+       struct hfi1_devdata *peer;
        struct hfi1_asic_data *asic_data;
        int ret = 0;
  
        if (!asic_data)
                return -ENOMEM;
  
-       spin_lock_irqsave(&hfi1_devs_lock, flags);
+       xa_lock_irq(&hfi1_dev_table);
        /* Find our peer device */
-       list_for_each_entry(tmp, &hfi1_dev_list, list) {
-               if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
-                   dd->unit != tmp->unit) {
-                       peer = tmp;
+       xa_for_each(&hfi1_dev_table, index, peer) {
+               if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
+                   dd->unit != peer->unit)
                        break;
-               }
        }
  
        if (peer) {
                mutex_init(&dd->asic_data->asic_resource_mutex);
        }
        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-       spin_unlock_irqrestore(&hfi1_devs_lock, flags);
+       xa_unlock_irq(&hfi1_dev_table);
  
        /* first one through - set up i2c devices */
        if (!peer)
index 2b07032dbddaacb213742cee71c503c858e77da4,4d5683919b1fbc3bc5fbde4855830cb3f24733ed..b49e60e8397d2fdc7be9a10afc132c2e4b5d808d
@@@ -162,12 -162,12 +162,12 @@@ static void deallocate_vnic_ctxt(struc
  
  void hfi1_vnic_setup(struct hfi1_devdata *dd)
  {
-       idr_init(&dd->vnic.vesw_idr);
+       xa_init(&dd->vnic.vesws);
  }
  
  void hfi1_vnic_cleanup(struct hfi1_devdata *dd)
  {
-       idr_destroy(&dd->vnic.vesw_idr);
+       WARN_ON(!xa_empty(&dd->vnic.vesws));
  }
  
  #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do {            \
@@@ -423,7 -423,8 +423,7 @@@ tx_finish
  
  static u16 hfi1_vnic_select_queue(struct net_device *netdev,
                                  struct sk_buff *skb,
 -                                struct net_device *sb_dev,
 -                                select_queue_fallback_t fallback)
 +                                struct net_device *sb_dev)
  {
        struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
        struct opa_vnic_skb_mdata *mdata;
@@@ -533,7 -534,7 +533,7 @@@ void hfi1_vnic_bypass_rcv(struct hfi1_p
        l4_type = hfi1_16B_get_l4(packet->ebuf);
        if (likely(l4_type == OPA_16B_L4_ETHR)) {
                vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf);
-               vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id);
+               vinfo = xa_load(&dd->vnic.vesws, vesw_id);
  
                /*
                 * In case of invalid vesw id, count the error on
                 */
                if (unlikely(!vinfo)) {
                        struct hfi1_vnic_vport_info *vinfo_tmp;
-                       int id_tmp = 0;
+                       unsigned long index = 0;
  
-                       vinfo_tmp =  idr_get_next(&dd->vnic.vesw_idr, &id_tmp);
+                       vinfo_tmp = xa_find(&dd->vnic.vesws, &index, ULONG_MAX,
+                                       XA_PRESENT);
                        if (vinfo_tmp) {
                                spin_lock(&vport_cntr_lock);
                                vinfo_tmp->stats[0].netstats.rx_nohandler++;
@@@ -597,8 -599,7 +598,7 @@@ static int hfi1_vnic_up(struct hfi1_vni
        if (!vinfo->vesw_id)
                return -EINVAL;
  
-       rc = idr_alloc(&dd->vnic.vesw_idr, vinfo, vinfo->vesw_id,
-                      vinfo->vesw_id + 1, GFP_NOWAIT);
+       rc = xa_insert(&dd->vnic.vesws, vinfo->vesw_id, vinfo, GFP_KERNEL);
        if (rc < 0)
                return rc;
  
@@@ -624,7 -625,7 +624,7 @@@ static void hfi1_vnic_down(struct hfi1_
        clear_bit(HFI1_VNIC_UP, &vinfo->flags);
        netif_carrier_off(vinfo->netdev);
        netif_tx_disable(vinfo->netdev);
-       idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id);
+       xa_erase(&dd->vnic.vesws, vinfo->vesw_id);
  
        /* ensure irqs see the change */
        msix_vnic_synchronize_irq(dd);
index c8555f7704d84e45c82197a06eba22e7692c1caf,26d4ed447bea1fe05f06ea4f405dd6ee7150e85b..4c5d0f160c106f8974621bc0c459de2462e51300
@@@ -730,7 -730,7 +730,7 @@@ static int hns_roce_v1_rsv_lp_qp(struc
        /* Reserved cq for loop qp */
        cq_init_attr.cqe                = HNS_ROCE_MIN_WQE_NUM * 2;
        cq_init_attr.comp_vector        = 0;
-       cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
+       cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL);
        if (IS_ERR(cq)) {
                dev_err(dev, "Create cq for reserved loop qp failed!");
                return -ENOMEM;
                goto alloc_mem_failed;
  
        pd->device  = ibdev;
-       ret = hns_roce_alloc_pd(pd, NULL, NULL);
+       ret = hns_roce_alloc_pd(pd, NULL);
        if (ret)
                goto alloc_pd_failed;
  
  create_lp_qp_failed:
        for (i -= 1; i >= 0; i--) {
                hr_qp = free_mr->mr_free_qp[i];
-               if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
+               if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
                        dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
        }
  
-       hns_roce_dealloc_pd(pd);
+       hns_roce_dealloc_pd(pd, NULL);
  
  alloc_pd_failed:
        kfree(pd);
  
  alloc_mem_failed:
-       if (hns_roce_ib_destroy_cq(cq))
+       if (hns_roce_ib_destroy_cq(cq, NULL))
                dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
  
        return ret;
@@@ -888,17 -888,17 +888,17 @@@ static void hns_roce_v1_release_lp_qp(s
                if (!hr_qp)
                        continue;
  
-               ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
+               ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
                if (ret)
                        dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
                                i, ret);
        }
  
-       ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
+       ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
        if (ret)
                dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
  
-       hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
+       hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
  }
  
  static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
@@@ -1096,7 -1096,7 +1096,7 @@@ free_work
  }
  
  static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
-                               struct hns_roce_mr *mr)
+                               struct hns_roce_mr *mr, struct ib_udata *udata)
  {
        struct device *dev = &hr_dev->pdev->dev;
        struct hns_roce_mr_free_work *mr_work;
@@@ -1511,38 -1511,6 +1511,6 @@@ static int hns_roce_v1_reset(struct hns
        return ret;
  }
  
- static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
- {
-       struct device *dev = &hr_dev->pdev->dev;
-       struct hns_roce_v1_priv *priv;
-       struct hns_roce_des_qp *des_qp;
-       priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-       des_qp = &priv->des_qp;
-       des_qp->requeue_flag = 1;
-       des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
-       if (!des_qp->qp_wq) {
-               dev_err(dev, "Create destroy qp workqueue failed!\n");
-               return -ENOMEM;
-       }
-       return 0;
- }
- static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
- {
-       struct hns_roce_v1_priv *priv;
-       struct hns_roce_des_qp *des_qp;
-       priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-       des_qp = &priv->des_qp;
-       des_qp->requeue_flag = 0;
-       flush_workqueue(des_qp->qp_wq);
-       destroy_workqueue(des_qp->qp_wq);
- }
  static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
  {
        int i = 0;
@@@ -1661,12 -1629,6 +1629,6 @@@ static int hns_roce_v1_init(struct hns_
                goto error_failed_tptr_init;
        }
  
-       ret = hns_roce_des_qp_init(hr_dev);
-       if (ret) {
-               dev_err(dev, "des qp init failed!\n");
-               goto error_failed_des_qp_init;
-       }
        ret = hns_roce_free_mr_init(hr_dev);
        if (ret) {
                dev_err(dev, "free mr init failed!\n");
        return 0;
  
  error_failed_free_mr_init:
-       hns_roce_des_qp_free(hr_dev);
- error_failed_des_qp_init:
        hns_roce_tptr_free(hr_dev);
  
  error_failed_tptr_init:
@@@ -1698,7 -1657,6 +1657,6 @@@ static void hns_roce_v1_exit(struct hns
  {
        hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
        hns_roce_free_mr_free(hr_dev);
-       hns_roce_des_qp_free(hr_dev);
        hns_roce_tptr_free(hr_dev);
        hns_roce_bt_free(hr_dev);
        hns_roce_raq_free(hr_dev);
@@@ -1750,6 -1708,8 +1708,6 @@@ static int hns_roce_v1_post_mbox(struc
  
        writel(val, hcr + 5);
  
 -      mmiowb();
 -
        return 0;
  }
  
@@@ -3642,307 -3602,22 +3600,22 @@@ static int hns_roce_v1_query_qp(struct 
                hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
  }
  
- static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
-                                     u32 *old_send, u32 *old_retry,
-                                     u32 *tsp_st, u32 *success_flags)
- {
-       __le32 *old_send_tmp, *old_retry_tmp;
-       u32 sdb_retry_cnt;
-       u32 sdb_send_ptr;
-       u32 cur_cnt, old_cnt;
-       __le32 tmp, tmp1;
-       u32 send_ptr;
-       sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
-       sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
-       tmp = cpu_to_le32(sdb_send_ptr);
-       tmp1 = cpu_to_le32(sdb_retry_cnt);
-       cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-                                ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
-                 roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
-                                ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-       old_send_tmp = (__le32 *)old_send;
-       old_retry_tmp = (__le32 *)old_retry;
-       if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
-               old_cnt = roce_get_field(*old_send_tmp,
-                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
-                         roce_get_field(*old_retry_tmp,
-                                        ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
-                                        ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-               if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
-                       *success_flags = 1;
-       } else {
-               old_cnt = roce_get_field(*old_send_tmp,
-                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-                                        ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
-               if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
-                       *success_flags = 1;
-               } else {
-                       send_ptr = roce_get_field(*old_send_tmp,
-                                           ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-                                           ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
-                                  roce_get_field(tmp1,
-                                           ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
-                                           ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
-                       roce_set_field(*old_send_tmp,
-                                      ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-                                      ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
-                                      send_ptr);
-               }
-       }
- }
- static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
-                                     struct hns_roce_qp *hr_qp,
-                                     u32 sdb_issue_ptr,
-                                     u32 *sdb_inv_cnt,
-                                     u32 *wait_stage)
- {
-       struct device *dev = &hr_dev->pdev->dev;
-       u32 sdb_send_ptr, old_send;
-       __le32 sdb_issue_ptr_tmp;
-       __le32 sdb_send_ptr_tmp;
-       u32 success_flags = 0;
-       unsigned long end;
-       u32 old_retry;
-       u32 inv_cnt;
-       u32 tsp_st;
-       __le32 tmp;
-       if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
-           *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
-               dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
-                       hr_qp->qpn, *wait_stage);
-               return -EINVAL;
-       }
-       /* Calculate the total timeout for the entire verification process */
-       end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
-       if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
-               /* Query db process status, until hw process completely */
-               sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
-               while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
-                                           ROCEE_SDB_PTR_CMP_BITS)) {
-                       if (!time_before(jiffies, end)) {
-                               dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
-                                       hr_qp->qpn, sdb_issue_ptr,
-                                       sdb_send_ptr);
-                               return 0;
-                       }
-                       msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
-                       sdb_send_ptr = roce_read(hr_dev,
-                                                ROCEE_SDB_SEND_PTR_REG);
-               }
-               sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr);
-               sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr);
-               if (roce_get_field(sdb_issue_ptr_tmp,
-                                  ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
-                                  ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
-                   roce_get_field(sdb_send_ptr_tmp,
-                                  ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
-                                  ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
-                       old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
-                       old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
-                       do {
-                               tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
-                               tmp = cpu_to_le32(tsp_st);
-                               if (roce_get_bit(tmp,
-                                       ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
-                                       *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
-                                       return 0;
-                               }
-                               if (!time_before(jiffies, end)) {
-                                       dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
-                                                    "issue 0x%x send 0x%x.\n",
-                                               hr_qp->qpn,
-                                               le32_to_cpu(sdb_issue_ptr_tmp),
-                                               le32_to_cpu(sdb_send_ptr_tmp));
-                                       return 0;
-                               }
-                               msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
-                               hns_roce_check_sdb_status(hr_dev, &old_send,
-                                                         &old_retry, &tsp_st,
-                                                         &success_flags);
-                       } while (!success_flags);
-               }
-               *wait_stage = HNS_ROCE_V1_DB_STAGE2;
-               /* Get list pointer */
-               *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
-               dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
-                       hr_qp->qpn, *sdb_inv_cnt);
-       }
-       if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
-               /* Query db's list status, until hw reversal */
-               inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
-               while (roce_hw_index_cmp_lt(inv_cnt,
-                                           *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
-                                           ROCEE_SDB_CNT_CMP_BITS)) {
-                       if (!time_before(jiffies, end)) {
-                               dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
-                                       hr_qp->qpn, inv_cnt);
-                               return 0;
-                       }
-                       msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
-                       inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
-               }
-               *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
-       }
-       return 0;
- }
- static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
-                               struct hns_roce_qp *hr_qp,
-                               struct hns_roce_qp_work *qp_work_entry,
-                               int *is_timeout)
- {
-       struct device *dev = &hr_dev->pdev->dev;
-       u32 sdb_issue_ptr;
-       int ret;
-       if (hr_qp->state != IB_QPS_RESET) {
-               /* Set qp to ERR, waiting for hw complete processing all dbs */
-               ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
-                                           IB_QPS_ERR);
-               if (ret) {
-                       dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
-                               hr_qp->qpn);
-                       return ret;
-               }
-               /* Record issued doorbell */
-               sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
-               qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
-               qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
-               /* Query db process status, until hw process completely */
-               ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
-                                                &qp_work_entry->sdb_inv_cnt,
-                                                &qp_work_entry->db_wait_stage);
-               if (ret) {
-                       dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
-                               hr_qp->qpn);
-                       return ret;
-               }
-               if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
-                       qp_work_entry->sche_cnt = 0;
-                       *is_timeout = 1;
-                       return 0;
-               }
-               /* Modify qp to reset before destroying qp */
-               ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
-                                           IB_QPS_RESET);
-               if (ret) {
-                       dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
-                               hr_qp->qpn);
-                       return ret;
-               }
-       }
-       return 0;
- }
- static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
- {
-       struct hns_roce_qp_work *qp_work_entry;
-       struct hns_roce_v1_priv *priv;
-       struct hns_roce_dev *hr_dev;
-       struct hns_roce_qp *hr_qp;
-       struct device *dev;
-       unsigned long qpn;
-       int ret;
-       qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
-       hr_dev = to_hr_dev(qp_work_entry->ib_dev);
-       dev = &hr_dev->pdev->dev;
-       priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-       hr_qp = qp_work_entry->qp;
-       qpn = hr_qp->qpn;
-       dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
-       qp_work_entry->sche_cnt++;
-       /* Query db process status, until hw process completely */
-       ret = check_qp_db_process_status(hr_dev, hr_qp,
-                                        qp_work_entry->sdb_issue_ptr,
-                                        &qp_work_entry->sdb_inv_cnt,
-                                        &qp_work_entry->db_wait_stage);
-       if (ret) {
-               dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
-                       qpn);
-               return;
-       }
-       if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
-           priv->des_qp.requeue_flag) {
-               queue_work(priv->des_qp.qp_wq, work);
-               return;
-       }
-       /* Modify qp to reset before destroying qp */
-       ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
-                                   IB_QPS_RESET);
-       if (ret) {
-               dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
-               return;
-       }
-       hns_roce_qp_remove(hr_dev, hr_qp);
-       hns_roce_qp_free(hr_dev, hr_qp);
-       if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
-               /* RC QP, release QPN */
-               hns_roce_release_range_qp(hr_dev, qpn, 1);
-               kfree(hr_qp);
-       } else
-               kfree(hr_to_hr_sqp(hr_qp));
-       kfree(qp_work_entry);
-       dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
- }
- int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
+ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
  {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
        struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-       struct device *dev = &hr_dev->pdev->dev;
-       struct hns_roce_qp_work qp_work_entry;
-       struct hns_roce_qp_work *qp_work;
-       struct hns_roce_v1_priv *priv;
        struct hns_roce_cq *send_cq, *recv_cq;
-       bool is_user = ibqp->uobject;
-       int is_timeout = 0;
        int ret;
  
-       ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
-       if (ret) {
-               dev_err(dev, "QP reset state check failed(%d)!\n", ret);
+       ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
+       if (ret)
                return ret;
-       }
  
        send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
        recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
  
        hns_roce_lock_cqs(send_cq, recv_cq);
-       if (!is_user) {
+       if (!udata) {
                __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
                                       to_hr_srq(hr_qp->ibqp.srq) : NULL);
                if (send_cq != recv_cq)
        }
        hns_roce_unlock_cqs(send_cq, recv_cq);
  
-       if (!is_timeout) {
-               hns_roce_qp_remove(hr_dev, hr_qp);
-               hns_roce_qp_free(hr_dev, hr_qp);
+       hns_roce_qp_remove(hr_dev, hr_qp);
+       hns_roce_qp_free(hr_dev, hr_qp);
  
-               /* RC QP, release QPN */
-               if (hr_qp->ibqp.qp_type == IB_QPT_RC)
-                       hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
-       }
+       /* RC QP, release QPN */
+       if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+               hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
  
        hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
  
-       if (is_user)
+       if (udata)
                ib_umem_release(hr_qp->umem);
        else {
                kfree(hr_qp->sq.wrid);
                hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
        }
  
-       if (!is_timeout) {
-               if (hr_qp->ibqp.qp_type == IB_QPT_RC)
-                       kfree(hr_qp);
-               else
-                       kfree(hr_to_hr_sqp(hr_qp));
-       } else {
-               qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
-               if (!qp_work)
-                       return -ENOMEM;
-               INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
-               qp_work->ib_dev = &hr_dev->ib_dev;
-               qp_work->qp             = hr_qp;
-               qp_work->db_wait_stage  = qp_work_entry.db_wait_stage;
-               qp_work->sdb_issue_ptr  = qp_work_entry.sdb_issue_ptr;
-               qp_work->sdb_inv_cnt    = qp_work_entry.sdb_inv_cnt;
-               qp_work->sche_cnt       = qp_work_entry.sche_cnt;
-               priv = (struct hns_roce_v1_priv *)hr_dev->priv;
-               queue_work(priv->des_qp.qp_wq, &qp_work->work);
-               dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
-       }
+       if (hr_qp->ibqp.qp_type == IB_QPT_RC)
+               kfree(hr_qp);
+       else
+               kfree(hr_to_hr_sqp(hr_qp));
        return 0;
  }
  
- static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
+ static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
  {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
index 60cf9f03e9414e98e97f325cc7f0b937af36bcc0,af777d549bd9940990264089e46eab4779c4884a..8db2817a249e94a6e3d246b955c605f8153b0068
  
  void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
  {
-       struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
        struct device *dev = hr_dev->dev;
        struct hns_roce_qp *qp;
  
-       spin_lock(&qp_table->lock);
+       xa_lock(&hr_dev->qp_table_xa);
        qp = __hns_roce_qp_lookup(hr_dev, qpn);
        if (qp)
                atomic_inc(&qp->refcount);
-       spin_unlock(&qp_table->lock);
+       xa_unlock(&hr_dev->qp_table_xa);
  
        if (!qp) {
                dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
@@@ -147,29 -144,20 +144,20 @@@ EXPORT_SYMBOL_GPL(to_hns_roce_state)
  static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn,
                                 struct hns_roce_qp *hr_qp)
  {
-       struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+       struct xarray *xa = &hr_dev->qp_table_xa;
        int ret;
  
        if (!qpn)
                return -EINVAL;
  
        hr_qp->qpn = qpn;
-       spin_lock_irq(&qp_table->lock);
-       ret = radix_tree_insert(&hr_dev->qp_table_tree,
-                               hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
-       spin_unlock_irq(&qp_table->lock);
-       if (ret) {
-               dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
-               goto err_put_irrl;
-       }
        atomic_set(&hr_qp->refcount, 1);
        init_completion(&hr_qp->free);
  
-       return 0;
- err_put_irrl:
+       ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1),
+                               hr_qp, GFP_KERNEL));
+       if (ret)
+               dev_err(hr_dev->dev, "QPC xa_store failed\n");
  
        return ret;
  }
@@@ -220,17 -208,9 +208,9 @@@ static int hns_roce_qp_alloc(struct hns
                }
        }
  
-       spin_lock_irq(&qp_table->lock);
-       ret = radix_tree_insert(&hr_dev->qp_table_tree,
-                               hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
-       spin_unlock_irq(&qp_table->lock);
-       if (ret) {
-               dev_err(dev, "QPC radix_tree_insert failed\n");
+       ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp);
+       if (ret)
                goto err_put_sccc;
-       }
-       atomic_set(&hr_qp->refcount, 1);
-       init_completion(&hr_qp->free);
  
        return 0;
  
@@@ -255,13 -235,12 +235,12 @@@ err_out
  
  void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
  {
-       struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
+       struct xarray *xa = &hr_dev->qp_table_xa;
        unsigned long flags;
  
-       spin_lock_irqsave(&qp_table->lock, flags);
-       radix_tree_delete(&hr_dev->qp_table_tree,
-                         hr_qp->qpn & (hr_dev->caps.num_qps - 1));
-       spin_unlock_irqrestore(&qp_table->lock, flags);
+       xa_lock_irqsave(xa, flags);
+       __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
+       xa_unlock_irqrestore(xa, flags);
  }
  EXPORT_SYMBOL_GPL(hns_roce_qp_remove);
  
@@@ -533,7 -512,7 +512,7 @@@ static int hns_roce_set_kernel_sq_size(
  
  static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
  {
 -      if (attr->qp_type == IB_QPT_XRC_TGT)
 +      if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
                return 0;
  
        return 1;
@@@ -1154,8 -1133,7 +1133,7 @@@ int hns_roce_init_qp_table(struct hns_r
        int ret;
  
        mutex_init(&qp_table->scc_mutex);
-       spin_lock_init(&qp_table->lock);
-       INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+       xa_init(&hr_dev->qp_table_xa);
  
        /* In hw v1, a port include two SQP, six ports total 12 */
        if (hr_dev->caps.max_sq_sg <= 2)
index 9426936460f8e19e1458e0b758b2c7b3ad88b6f1,bb1c6eb31b3211bf16977e9659fa18762a5ad9ba..5221c0794d1d0a88ce5ccff5be3b6b10c0363947
@@@ -1041,11 -1041,11 +1041,11 @@@ static int create_qp_common(struct mlx4
                        goto err_mtt;
  
                if (qp_has_rq(init_attr)) {
-                       err = mlx4_ib_db_map_user(
-                               context, udata,
-                               (src == MLX4_IB_QP_SRC) ? ucmd.qp.db_addr :
+                       err = mlx4_ib_db_map_user(udata,
+                                                 (src == MLX4_IB_QP_SRC) ?
+                                                         ucmd.qp.db_addr :
                                                          ucmd.wq.db_addr,
-                               &qp->db);
+                                                 &qp->db);
                        if (err)
                                goto err_mtt;
                }
@@@ -1338,7 -1338,8 +1338,8 @@@ static void destroy_qp_rss(struct mlx4_
  }
  
  static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
-                             enum mlx4_ib_source_type src, bool is_user)
+                             enum mlx4_ib_source_type src,
+                             struct ib_udata *udata)
  {
        struct mlx4_ib_cq *send_cq, *recv_cq;
        unsigned long flags;
        list_del(&qp->qps_list);
        list_del(&qp->cq_send_list);
        list_del(&qp->cq_recv_list);
-       if (!is_user) {
+       if (!udata) {
                __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
                                 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);
                if (send_cq != recv_cq)
                if (qp->flags & MLX4_IB_QP_NETIF)
                        mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1);
                else if (src == MLX4_IB_RWQ_SRC)
-                       mlx4_ib_release_wqn(to_mucontext(
-                                           qp->ibwq.uobject->context), qp, 1);
+                       mlx4_ib_release_wqn(
+                               rdma_udata_to_drv_context(
+                                       udata,
+                                       struct mlx4_ib_ucontext,
+                                       ibucontext),
+                               qp, 1);
                else
                        mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1);
        }
  
        mlx4_mtt_cleanup(dev->dev, &qp->mtt);
  
-       if (is_user) {
+       if (udata) {
                if (qp->rq.wqe_cnt) {
-                       struct mlx4_ib_ucontext *mcontext = !src ?
-                               to_mucontext(qp->ibqp.uobject->context) :
-                               to_mucontext(qp->ibwq.uobject->context);
+                       struct mlx4_ib_ucontext *mcontext =
+                               rdma_udata_to_drv_context(
+                                       udata,
+                                       struct mlx4_ib_ucontext,
+                                       ibucontext);
                        mlx4_ib_db_unmap_user(mcontext, &qp->db);
                }
                ib_umem_release(qp->umem);
@@@ -1594,7 -1602,7 +1602,7 @@@ struct ib_qp *mlx4_ib_create_qp(struct 
        return ibqp;
  }
  
- static int _mlx4_ib_destroy_qp(struct ib_qp *qp)
+ static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
  {
        struct mlx4_ib_dev *dev = to_mdev(qp->device);
        struct mlx4_ib_qp *mqp = to_mqp(qp);
        if (qp->rwq_ind_tbl) {
                destroy_qp_rss(dev, mqp);
        } else {
-               destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, qp->uobject);
+               destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata);
        }
  
        if (is_sqp(dev, mqp))
        return 0;
  }
  
- int mlx4_ib_destroy_qp(struct ib_qp *qp)
+ int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
  {
        struct mlx4_ib_qp *mqp = to_mqp(qp);
  
                        ib_destroy_qp(sqp->roce_v2_gsi);
        }
  
-       return _mlx4_ib_destroy_qp(qp);
+       return _mlx4_ib_destroy_qp(qp, udata);
  }
  
  static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type)
@@@ -2240,8 -2248,10 +2248,10 @@@ static int __mlx4_ib_modify_qp(void *sr
  
                if (is_eth) {
                        gid_attr = attr->ah_attr.grh.sgid_attr;
-                       vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
-                       memcpy(smac, gid_attr->ndev->dev_addr, ETH_ALEN);
+                       err = rdma_read_gid_l2_fields(gid_attr, &vlan,
+                                                     &smac[0]);
+                       if (err)
+                               goto out;
                }
  
                if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path,
@@@ -3744,6 -3754,12 +3754,6 @@@ out
                writel_relaxed(qp->doorbell_qpn,
                        to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);
  
 -              /*
 -               * Make sure doorbells don't leak out of SQ spinlock
 -               * and reach the HCA out of order.
 -               */
 -              mmiowb();
 -
                stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1);
  
                qp->sq_next_wqe = ind;
@@@ -4238,7 -4254,7 +4248,7 @@@ int mlx4_ib_modify_wq(struct ib_wq *ibw
        return err;
  }
  
- int mlx4_ib_destroy_wq(struct ib_wq *ibwq)
+ int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
  {
        struct mlx4_ib_dev *dev = to_mdev(ibwq->device);
        struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq);
        if (qp->counter_index)
                mlx4_ib_free_qp_counter(dev, qp);
  
-       destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, 1);
+       destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata);
  
        kfree(qp);
  
index 1aaa2056d188c221431821303f1f8c5013e5570f,687f99172037bee7d2ecf0b061b79a58149cd1dc..abac70ad5c7c46db82856e7522059611470773de
@@@ -156,6 -156,34 +156,34 @@@ static int get_port_state(struct ib_dev
        return ret;
  }
  
+ static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
+                                          struct net_device *ndev,
+                                          u8 *port_num)
+ {
+       struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+       struct net_device *rep_ndev;
+       struct mlx5_ib_port *port;
+       int i;
+       for (i = 0; i < dev->num_ports; i++) {
+               port  = &dev->port[i];
+               if (!port->rep)
+                       continue;
+               read_lock(&port->roce.netdev_lock);
+               rep_ndev = mlx5_ib_get_rep_netdev(esw,
+                                                 port->rep->vport);
+               if (rep_ndev == ndev) {
+                       read_unlock(&port->roce.netdev_lock);
+                       *port_num = i + 1;
+                       return &port->roce;
+               }
+               read_unlock(&port->roce.netdev_lock);
+       }
+       return NULL;
+ }
  static int mlx5_netdev_event(struct notifier_block *this,
                             unsigned long event, void *ptr)
  {
  
        switch (event) {
        case NETDEV_REGISTER:
+               /* Should already be registered during the load */
+               if (ibdev->is_rep)
+                       break;
                write_lock(&roce->netdev_lock);
-               if (ibdev->rep) {
-                       struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
-                       struct net_device *rep_ndev;
-                       rep_ndev = mlx5_ib_get_rep_netdev(esw,
-                                                         ibdev->rep->vport);
-                       if (rep_ndev == ndev)
-                               roce->netdev = ndev;
-               } else if (ndev->dev.parent == mdev->device) {
 -              if (ndev->dev.parent == &mdev->pdev->dev)
++              if (ndev->dev.parent == mdev->device)
                        roce->netdev = ndev;
-               }
                write_unlock(&roce->netdev_lock);
                break;
  
        case NETDEV_UNREGISTER:
+               /* In case of reps, ib device goes away before the netdevs */
                write_lock(&roce->netdev_lock);
                if (roce->netdev == ndev)
                        roce->netdev = NULL;
                        dev_put(lag_ndev);
                }
  
+               if (ibdev->is_rep)
+                       roce = mlx5_get_rep_roce(ibdev, ndev, &port_num);
+               if (!roce)
+                       return NOTIFY_DONE;
                if ((upper == ndev || (!upper && ndev == roce->netdev))
                    && ibdev->ib_active) {
                        struct ib_event ibev = { };
@@@ -257,11 -284,11 +284,11 @@@ static struct net_device *mlx5_ib_get_n
  
        /* Ensure ndev does not disappear before we invoke dev_hold()
         */
-       read_lock(&ibdev->roce[port_num - 1].netdev_lock);
-       ndev = ibdev->roce[port_num - 1].netdev;
+       read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
+       ndev = ibdev->port[port_num - 1].roce.netdev;
        if (ndev)
                dev_hold(ndev);
-       read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
+       read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
  
  out:
        mlx5_ib_put_native_port_mdev(ibdev, port_num);
@@@ -479,9 -506,14 +506,14 @@@ static int mlx5_query_port_roce(struct 
  
        /* Possible bad flows are checked before filling out props so in case
         * of an error it will still be zeroed out.
+        * Use native port in case of reps
         */
-       err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
-                                  mdev_port_num);
+       if (dev->is_rep)
+               err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+                                          1);
+       else
+               err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+                                          mdev_port_num);
        if (err)
                goto out;
        ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
@@@ -542,52 -574,22 +574,22 @@@ out
        return err;
  }
  
- struct mlx5_ib_vlan_info {
-       u16 vlan_id;
-       bool vlan;
- };
- static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
- {
-       struct mlx5_ib_vlan_info *vlan_info = data;
-       if (is_vlan_dev(lower_dev)) {
-               vlan_info->vlan = true;
-               vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev);
-       }
-       /* We are interested only in first level vlan device, so
-        * always return 1 to stop iterating over next level devices.
-        */
-       return 1;
- }
  static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
                         unsigned int index, const union ib_gid *gid,
                         const struct ib_gid_attr *attr)
  {
        enum ib_gid_type gid_type = IB_GID_TYPE_IB;
-       struct mlx5_ib_vlan_info vlan_info = { };
+       u16 vlan_id = 0xffff;
        u8 roce_version = 0;
        u8 roce_l3_type = 0;
        u8 mac[ETH_ALEN];
+       int ret;
  
        if (gid) {
                gid_type = attr->gid_type;
-               ether_addr_copy(mac, attr->ndev->dev_addr);
-               if (is_vlan_dev(attr->ndev)) {
-                       vlan_info.vlan = true;
-                       vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev);
-               } else {
-                       /* If the netdev is upper device and if it's lower
-                        * lower device is vlan device, consider vlan id of
-                        * the lower vlan device for this gid entry.
-                        */
-                       rcu_read_lock();
-                       netdev_walk_all_lower_dev_rcu(attr->ndev,
-                                       get_lower_dev_vlan, &vlan_info);
-                       rcu_read_unlock();
-               }
+               ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+               if (ret)
+                       return ret;
        }
  
        switch (gid_type) {
  
        return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
                                      roce_l3_type, gid->raw, mac,
-                                     vlan_info.vlan, vlan_info.vlan_id,
+                                     vlan_id < VLAN_CFI_MASK, vlan_id,
                                      port_num);
  }
  
@@@ -1407,7 -1409,9 +1409,9 @@@ static int mlx5_ib_rep_query_port(struc
  {
        int ret;
  
-       /* Only link layer == ethernet is valid for representors */
+       /* Only link layer == ethernet is valid for representors
+        * and we always use port 1
+        */
        ret = mlx5_query_port_roce(ibdev, port, props);
        if (ret || !props)
                return ret;
@@@ -1954,11 -1958,11 +1958,11 @@@ static int mlx5_ib_alloc_ucontext(struc
        print_lib_caps(dev, context->lib_caps);
  
        if (dev->lag_active) {
-               u8 port = mlx5_core_native_port_num(dev->mdev);
+               u8 port = mlx5_core_native_port_num(dev->mdev) - 1;
  
                atomic_set(&context->tx_port_affinity,
                           atomic_add_return(
-                                  1, &dev->roce[port].tx_port_affinity));
+                                  1, &dev->port[port].roce.tx_port_affinity));
        }
  
        return 0;
@@@ -2060,21 -2064,22 +2064,22 @@@ static int mlx5_ib_mmap_clock_info_page
                                        struct vm_area_struct *vma,
                                        struct mlx5_ib_ucontext *context)
  {
-       if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+       if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
+           !(vma->vm_flags & VM_SHARED))
                return -EINVAL;
  
        if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
                return -EOPNOTSUPP;
  
-       if (vma->vm_flags & VM_WRITE)
+       if (vma->vm_flags & (VM_WRITE | VM_EXEC))
                return -EPERM;
        vma->vm_flags &= ~VM_MAYWRITE;
  
-       if (!dev->mdev->clock_info_page)
+       if (!dev->mdev->clock_info)
                return -EOPNOTSUPP;
  
-       return rdma_user_mmap_page(&context->ibucontext, vma,
-                                  dev->mdev->clock_info_page, PAGE_SIZE);
+       return vm_insert_page(vma, vma->vm_start,
+                             virt_to_page(dev->mdev->clock_info));
  }
  
  static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
@@@ -2259,89 -2264,200 +2264,200 @@@ static int mlx5_ib_mmap(struct ib_ucont
        return 0;
  }
  
- struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
-                              struct ib_ucontext *context,
-                              struct ib_dm_alloc_attr *attr,
-                              struct uverbs_attr_bundle *attrs)
+ static inline int check_dm_type_support(struct mlx5_ib_dev *dev,
+                                       u32 type)
  {
-       u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
-       struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
-       phys_addr_t memic_addr;
-       struct mlx5_ib_dm *dm;
+       switch (type) {
+       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+               if (!MLX5_CAP_DEV_MEM(dev->mdev, memic))
+                       return -EOPNOTSUPP;
+               break;
+       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+               if (!capable(CAP_SYS_RAWIO) ||
+                   !capable(CAP_NET_RAW))
+                       return -EPERM;
+               if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+                     MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, sw_owner)))
+                       return -EOPNOTSUPP;
+               break;
+       }
+       return 0;
+ }
+ static int handle_alloc_dm_memic(struct ib_ucontext *ctx,
+                                struct mlx5_ib_dm *dm,
+                                struct ib_dm_alloc_attr *attr,
+                                struct uverbs_attr_bundle *attrs)
+ {
+       struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
        u64 start_offset;
        u32 page_idx;
        int err;
  
-       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
-       if (!dm)
-               return ERR_PTR(-ENOMEM);
-       mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
-                   attr->length, act_size, attr->alignment);
+       dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
  
-       err = mlx5_cmd_alloc_memic(memic, &memic_addr,
-                                  act_size, attr->alignment);
+       err = mlx5_cmd_alloc_memic(dm_db, &dm->dev_addr,
+                                  dm->size, attr->alignment);
        if (err)
-               goto err_free;
+               return err;
  
-       start_offset = memic_addr & ~PAGE_MASK;
-       page_idx = (memic_addr - memic->dev->bar_addr -
-                   MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
+       page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) -
+                   MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >>
                    PAGE_SHIFT;
  
+       err = uverbs_copy_to(attrs,
+                            MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+                            &page_idx, sizeof(page_idx));
+       if (err)
+               goto err_dealloc;
+       start_offset = dm->dev_addr & ~PAGE_MASK;
        err = uverbs_copy_to(attrs,
                             MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
                             &start_offset, sizeof(start_offset));
        if (err)
                goto err_dealloc;
  
+       bitmap_set(to_mucontext(ctx)->dm_pages, page_idx,
+                  DIV_ROUND_UP(dm->size, PAGE_SIZE));
+       return 0;
+ err_dealloc:
+       mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
+       return err;
+ }
+ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+                                 struct mlx5_ib_dm *dm,
+                                 struct ib_dm_alloc_attr *attr,
+                                 struct uverbs_attr_bundle *attrs,
+                                 int type)
+ {
+       struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+       u64 act_size;
+       int err;
+       /* Allocation size must a multiple of the basic block size
+        * and a power of 2.
+        */
+       act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+       act_size = roundup_pow_of_two(act_size);
+       dm->size = act_size;
+       err = mlx5_cmd_alloc_sw_icm(dm_db, type, act_size,
+                                   to_mucontext(ctx)->devx_uid, &dm->dev_addr,
+                                   &dm->icm_dm.obj_id);
+       if (err)
+               return err;
        err = uverbs_copy_to(attrs,
-                            MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
-                            &page_idx, sizeof(page_idx));
+                            MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+                            &dm->dev_addr, sizeof(dm->dev_addr));
        if (err)
-               goto err_dealloc;
+               mlx5_cmd_dealloc_sw_icm(dm_db, type, dm->size,
+                                       to_mucontext(ctx)->devx_uid,
+                                       dm->dev_addr, dm->icm_dm.obj_id);
+       return err;
+ }
+ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+                              struct ib_ucontext *context,
+                              struct ib_dm_alloc_attr *attr,
+                              struct uverbs_attr_bundle *attrs)
+ {
+       struct mlx5_ib_dm *dm;
+       enum mlx5_ib_uapi_dm_type type;
+       int err;
  
-       bitmap_set(to_mucontext(context)->dm_pages, page_idx,
-                  DIV_ROUND_UP(act_size, PAGE_SIZE));
+       err = uverbs_get_const_default(&type, attrs,
+                                      MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+                                      MLX5_IB_UAPI_DM_TYPE_MEMIC);
+       if (err)
+               return ERR_PTR(err);
  
-       dm->dev_addr = memic_addr;
+       mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+                   type, attr->length, attr->alignment);
+       err = check_dm_type_support(to_mdev(ibdev), type);
+       if (err)
+               return ERR_PTR(err);
+       dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+       if (!dm)
+               return ERR_PTR(-ENOMEM);
+       dm->type = type;
+       switch (type) {
+       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+               err = handle_alloc_dm_memic(context, dm,
+                                           attr,
+                                           attrs);
+               break;
+       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+               err = handle_alloc_dm_sw_icm(context, dm, attr, attrs, type);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+       }
+       if (err)
+               goto err_free;
  
        return &dm->ibdm;
  
- err_dealloc:
-       mlx5_cmd_dealloc_memic(memic, memic_addr,
-                              act_size);
  err_free:
        kfree(dm);
        return ERR_PTR(err);
  }
  
- int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
+ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
  {
-       struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
+       struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+               &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+       struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm;
        struct mlx5_ib_dm *dm = to_mdm(ibdm);
-       u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
        u32 page_idx;
        int ret;
  
-       ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
-       if (ret)
-               return ret;
+       switch (dm->type) {
+       case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+               ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size);
+               if (ret)
+                       return ret;
  
-       page_idx = (dm->dev_addr - memic->dev->bar_addr -
-                   MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
-                   PAGE_SHIFT;
-       bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
-                    page_idx,
-                    DIV_ROUND_UP(act_size, PAGE_SIZE));
+               page_idx = (dm->dev_addr -
+                           pci_resource_start(dm_db->dev->pdev, 0) -
+                           MLX5_CAP64_DEV_MEM(dm_db->dev,
+                                              memic_bar_start_addr)) >>
+                          PAGE_SHIFT;
+               bitmap_clear(ctx->dm_pages, page_idx,
+                            DIV_ROUND_UP(dm->size, PAGE_SIZE));
+               break;
+       case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+       case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+               ret = mlx5_cmd_dealloc_sw_icm(dm_db, dm->type, dm->size,
+                                             ctx->devx_uid, dm->dev_addr,
+                                             dm->icm_dm.obj_id);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
  
        kfree(dm);
  
        return 0;
  }
  
- static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
-                           struct ib_udata *udata)
+ static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
  {
        struct mlx5_ib_pd *pd = to_mpd(ibpd);
        struct ib_device *ibdev = ibpd->device;
        u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
        u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
        u16 uid = 0;
+       struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+               udata, struct mlx5_ib_ucontext, ibucontext);
  
-       uid = context ? to_mucontext(context)->devx_uid : 0;
+       uid = context ? context->devx_uid : 0;
        MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
        MLX5_SET(alloc_pd_in, in, uid, uid);
        err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
  
        pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
        pd->uid = uid;
-       if (context) {
+       if (udata) {
                resp.pdn = pd->pdn;
                if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
                        mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
        return 0;
  }
  
- static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
+ static void mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
  {
        struct mlx5_ib_dev *mdev = to_mdev(pd->device);
        struct mlx5_ib_pd *mpd = to_mpd(pd);
@@@ -3151,10 -3269,10 +3269,10 @@@ static struct mlx5_ib_flow_prio *get_fl
                if (ft_type == MLX5_IB_FT_RX) {
                        fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
                        prio = &dev->flow_db->prios[priority];
-                       if (!dev->rep &&
+                       if (!dev->is_rep &&
                            MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
                                flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
-                       if (!dev->rep &&
+                       if (!dev->is_rep &&
                            MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
                                        reformat_l3_tunnel_to_l2))
                                flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
                                                              log_max_ft_size));
                        fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
                        prio = &dev->flow_db->egress_prios[priority];
-                       if (!dev->rep &&
+                       if (!dev->is_rep &&
                            MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
                                flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
                }
        if (!ns)
                return ERR_PTR(-ENOTSUPP);
  
-       if (num_entries > max_table_size)
-               return ERR_PTR(-ENOMEM);
+       max_table_size = min_t(int, num_entries, max_table_size);
  
        ft = prio->flow_table;
        if (!ft)
-               return _get_prio(ns, prio, priority, num_entries, num_groups,
+               return _get_prio(ns, prio, priority, max_table_size, num_groups,
                                 flags);
  
        return prio;
@@@ -3370,7 -3487,7 +3487,7 @@@ static struct mlx5_ib_flow_handler *_cr
        if (!is_valid_attr(dev->mdev, flow_attr))
                return ERR_PTR(-EINVAL);
  
-       if (dev->rep && is_egress)
+       if (dev->is_rep && is_egress)
                return ERR_PTR(-EINVAL);
  
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!flow_is_multicast_only(flow_attr))
                set_underlay_qp(dev, spec, underlay_qpn);
  
-       if (dev->rep) {
+       if (dev->is_rep) {
                void *misc;
  
+               if (!dev->port[flow_attr->port - 1].rep) {
+                       err = -EINVAL;
+                       goto free;
+               }
                misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
                                    misc_parameters);
                MLX5_SET(fte_match_set_misc, misc, source_port,
-                        dev->rep->vport);
+                        dev->port[flow_attr->port - 1].rep->vport);
                misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                    misc_parameters);
                MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@@ -3769,11 -3890,16 +3890,16 @@@ _get_flow_table(struct mlx5_ib_dev *dev
                bool mcast)
  {
        struct mlx5_flow_namespace *ns = NULL;
-       struct mlx5_ib_flow_prio *prio;
-       int max_table_size;
+       struct mlx5_ib_flow_prio *prio = NULL;
+       int max_table_size = 0;
        u32 flags = 0;
        int priority;
  
+       if (mcast)
+               priority = MLX5_IB_FLOW_MCAST_PRIO;
+       else
+               priority = ib_prio_to_core_prio(fs_matcher->priority, false);
        if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
                max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
                                        log_max_ft_size));
                if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
                                              reformat_l3_tunnel_to_l2))
                        flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
-       } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
-               max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
-                                       log_max_ft_size));
+       } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
+               max_table_size = BIT(
+                       MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
                if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
                        flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+       } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
+               max_table_size = BIT(
+                       MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+               priority = FDB_BYPASS_PATH;
        }
  
-       if (max_table_size < MLX5_FS_MAX_ENTRIES)
-               return ERR_PTR(-ENOMEM);
-       if (mcast)
-               priority = MLX5_IB_FLOW_MCAST_PRIO;
-       else
-               priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+       max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
  
        ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
        if (!ns)
  
        if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
                prio = &dev->flow_db->prios[priority];
-       else
+       else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS)
                prio = &dev->flow_db->egress_prios[priority];
+       else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+               prio = &dev->flow_db->fdb;
+       if (!prio)
+               return ERR_PTR(-EINVAL);
  
        if (prio->flow_table)
                return prio;
  
-       return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
+       return _get_prio(ns, prio, priority, max_table_size,
                         MLX5_FS_MAX_TYPES, flags);
  }
  
@@@ -4356,13 -4485,9 +4485,13 @@@ static void delay_drop_handler(struct w
  static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
                                 struct ib_event *ibev)
  {
 +      u8 port = (eqe->data.port.port >> 4) & 0xf;
 +
        switch (eqe->sub_type) {
        case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
 -              schedule_work(&ibdev->delay_drop.delay_drop_work);
 +              if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
 +                                          IB_LINK_LAYER_ETHERNET)
 +                      schedule_work(&ibdev->delay_drop.delay_drop_work);
                break;
        default: /* do nothing */
                return;
@@@ -4509,7 -4634,7 +4638,7 @@@ static int set_has_smi_cap(struct mlx5_
        int err;
        int port;
  
-       for (port = 1; port <= dev->num_ports; port++) {
+       for (port = 1; port <= ARRAY_SIZE(dev->mdev->port_caps); port++) {
                dev->mdev->port_caps[port - 1].has_smi = false;
                if (MLX5_CAP_GEN(dev->mdev, port_type) ==
                    MLX5_CAP_PORT_TYPE_IB) {
@@@ -4540,7 -4665,7 +4669,7 @@@ static void get_ext_port_caps(struct ml
                mlx5_query_ext_port_caps(dev, port);
  }
  
- static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+ static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
  {
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
        if (!dprops)
                goto out;
  
-       err = set_has_smi_cap(dev);
-       if (err)
-               goto out;
        err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
        if (err) {
                mlx5_ib_warn(dev, "query_device failed %d\n", err);
        return err;
  }
  
+ static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
+ {
+       /* For representors use port 1, is this is the only native
+        * port
+        */
+       if (dev->is_rep)
+               return __get_port_caps(dev, 1);
+       return __get_port_caps(dev, port);
+ }
  static void destroy_umrc_res(struct mlx5_ib_dev *dev)
  {
        int err;
                mlx5_ib_warn(dev, "mr cache cleanup failed\n");
  
        if (dev->umrc.qp)
-               mlx5_ib_destroy_qp(dev->umrc.qp);
+               mlx5_ib_destroy_qp(dev->umrc.qp, NULL);
        if (dev->umrc.cq)
                ib_free_cq(dev->umrc.cq);
        if (dev->umrc.pd)
@@@ -4701,7 -4832,7 +4836,7 @@@ static int create_umr_res(struct mlx5_i
        return 0;
  
  error_4:
-       mlx5_ib_destroy_qp(qp);
+       mlx5_ib_destroy_qp(qp, NULL);
        dev->umrc.qp = NULL;
  
  error_3:
@@@ -4752,11 -4883,11 +4887,11 @@@ static int create_dev_resources(struct 
        devr->p0->uobject = NULL;
        atomic_set(&devr->p0->usecnt, 0);
  
-       ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
+       ret = mlx5_ib_alloc_pd(devr->p0, NULL);
        if (ret)
                goto error0;
  
-       devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
+       devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
        if (IS_ERR(devr->c0)) {
                ret = PTR_ERR(devr->c0);
                goto error1;
        devr->c0->cq_context    = NULL;
        atomic_set(&devr->c0->usecnt, 0);
  
-       devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+       devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
        if (IS_ERR(devr->x0)) {
                ret = PTR_ERR(devr->x0);
                goto error2;
        mutex_init(&devr->x0->tgt_qp_mutex);
        INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
  
-       devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+       devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
        if (IS_ERR(devr->x1)) {
                ret = PTR_ERR(devr->x1);
                goto error3;
        attr.ext.cq = devr->c0;
        attr.ext.xrc.xrcd = devr->x0;
  
-       devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
-       if (IS_ERR(devr->s0)) {
-               ret = PTR_ERR(devr->s0);
+       devr->s0 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+       if (!devr->s0) {
+               ret = -ENOMEM;
                goto error4;
        }
        devr->s0->device        = &dev->ib_dev;
        devr->s0->pd            = devr->p0;
-       devr->s0->uobject       = NULL;
-       devr->s0->event_handler = NULL;
-       devr->s0->srq_context   = NULL;
        devr->s0->srq_type      = IB_SRQT_XRC;
        devr->s0->ext.xrc.xrcd  = devr->x0;
        devr->s0->ext.cq        = devr->c0;
+       ret = mlx5_ib_create_srq(devr->s0, &attr, NULL);
+       if (ret)
+               goto err_create;
        atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
        atomic_inc(&devr->s0->ext.cq->usecnt);
        atomic_inc(&devr->p0->usecnt);
        attr.attr.max_sge = 1;
        attr.attr.max_wr = 1;
        attr.srq_type = IB_SRQT_BASIC;
-       devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
-       if (IS_ERR(devr->s1)) {
-               ret = PTR_ERR(devr->s1);
+       devr->s1 = rdma_zalloc_drv_obj(ibdev, ib_srq);
+       if (!devr->s1) {
+               ret = -ENOMEM;
                goto error5;
        }
        devr->s1->device        = &dev->ib_dev;
        devr->s1->pd            = devr->p0;
-       devr->s1->uobject       = NULL;
-       devr->s1->event_handler = NULL;
-       devr->s1->srq_context   = NULL;
        devr->s1->srq_type      = IB_SRQT_BASIC;
        devr->s1->ext.cq        = devr->c0;
+       ret = mlx5_ib_create_srq(devr->s1, &attr, NULL);
+       if (ret)
+               goto error6;
        atomic_inc(&devr->p0->usecnt);
        atomic_set(&devr->s1->usecnt, 0);
  
  
        return 0;
  
+ error6:
+       kfree(devr->s1);
  error5:
-       mlx5_ib_destroy_srq(devr->s0);
+       mlx5_ib_destroy_srq(devr->s0, NULL);
+ err_create:
+       kfree(devr->s0);
  error4:
-       mlx5_ib_dealloc_xrcd(devr->x1);
+       mlx5_ib_dealloc_xrcd(devr->x1, NULL);
  error3:
-       mlx5_ib_dealloc_xrcd(devr->x0);
+       mlx5_ib_dealloc_xrcd(devr->x0, NULL);
  error2:
-       mlx5_ib_destroy_cq(devr->c0);
+       mlx5_ib_destroy_cq(devr->c0, NULL);
  error1:
-       mlx5_ib_dealloc_pd(devr->p0);
+       mlx5_ib_dealloc_pd(devr->p0, NULL);
  error0:
        kfree(devr->p0);
        return ret;
  
  static void destroy_dev_resources(struct mlx5_ib_resources *devr)
  {
-       struct mlx5_ib_dev *dev =
-               container_of(devr, struct mlx5_ib_dev, devr);
        int port;
  
-       mlx5_ib_destroy_srq(devr->s1);
-       mlx5_ib_destroy_srq(devr->s0);
-       mlx5_ib_dealloc_xrcd(devr->x0);
-       mlx5_ib_dealloc_xrcd(devr->x1);
-       mlx5_ib_destroy_cq(devr->c0);
-       mlx5_ib_dealloc_pd(devr->p0);
+       mlx5_ib_destroy_srq(devr->s1, NULL);
+       kfree(devr->s1);
+       mlx5_ib_destroy_srq(devr->s0, NULL);
+       kfree(devr->s0);
+       mlx5_ib_dealloc_xrcd(devr->x0, NULL);
+       mlx5_ib_dealloc_xrcd(devr->x1, NULL);
+       mlx5_ib_destroy_cq(devr->c0, NULL);
+       mlx5_ib_dealloc_pd(devr->p0, NULL);
        kfree(devr->p0);
  
        /* Make sure no change P_Key work items are still executing */
-       for (port = 0; port < dev->num_ports; ++port)
+       for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
                cancel_work_sync(&devr->ports[port].pkey_change_work);
  }
  
@@@ -5015,10 -5155,10 +5159,10 @@@ static int mlx5_add_netdev_notifier(str
  {
        int err;
  
-       dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
-       err = register_netdevice_notifier(&dev->roce[port_num].nb);
+       dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+       err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
        if (err) {
-               dev->roce[port_num].nb.notifier_call = NULL;
+               dev->port[port_num].roce.nb.notifier_call = NULL;
                return err;
        }
  
  
  static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
  {
-       if (dev->roce[port_num].nb.notifier_call) {
-               unregister_netdevice_notifier(&dev->roce[port_num].nb);
-               dev->roce[port_num].nb.notifier_call = NULL;
+       if (dev->port[port_num].roce.nb.notifier_call) {
+               unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+               dev->port[port_num].roce.nb.notifier_call = NULL;
        }
  }
  
@@@ -5578,7 -5718,7 +5722,7 @@@ static void mlx5_ib_unbind_slave_port(s
                mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
                            port_num + 1);
  
-       ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
+       ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
  }
  
  /* The mlx5_ib_multiport_mutex should be held when calling this function */
@@@ -5679,8 -5819,7 +5823,8 @@@ static int mlx5_ib_init_multiport_maste
                        }
  
                        if (bound) {
 -                              dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
 +                              dev_dbg(mpi->mdev->device,
 +                                      "removing port from unaffiliated list.\n");
                                mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
                                list_del(&mpi->list);
                                break;
@@@ -5738,7 -5877,10 +5882,10 @@@ ADD_UVERBS_ATTRIBUTES_SIMPLE
                            UA_MANDATORY),
        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
                            UVERBS_ATTR_TYPE(u16),
-                           UA_MANDATORY));
+                           UA_OPTIONAL),
+       UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+                            enum mlx5_ib_uapi_dm_type,
+                            UA_OPTIONAL));
  
  ADD_UVERBS_ATTRIBUTES_SIMPLE(
        mlx5_ib_flow_action,
@@@ -5829,35 -5971,58 +5976,58 @@@ static struct ib_counters *mlx5_ib_crea
        return &mcounters->ibcntrs;
  }
  
- void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
  {
+       struct mlx5_core_dev *mdev = dev->mdev;
        mlx5_ib_cleanup_multiport_master(dev);
        if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
                srcu_barrier(&dev->mr_srcu);
                cleanup_srcu_struct(&dev->mr_srcu);
        }
-       kfree(dev->port);
+       WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+       WARN_ON(dev->dm.steering_sw_icm_alloc_blocks &&
+               !bitmap_empty(
+                       dev->dm.steering_sw_icm_alloc_blocks,
+                       BIT(MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size) -
+                           MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
+       kfree(dev->dm.steering_sw_icm_alloc_blocks);
+       WARN_ON(dev->dm.header_modify_sw_icm_alloc_blocks &&
+               !bitmap_empty(dev->dm.header_modify_sw_icm_alloc_blocks,
+                             BIT(MLX5_CAP_DEV_MEM(
+                                         mdev, log_header_modify_sw_icm_size) -
+                                 MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev))));
+       kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
  }
  
- int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
  {
        struct mlx5_core_dev *mdev = dev->mdev;
+       u64 header_modify_icm_blocks = 0;
+       u64 steering_icm_blocks = 0;
        int err;
        int i;
  
-       dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
-                           GFP_KERNEL);
-       if (!dev->port)
-               return -ENOMEM;
        for (i = 0; i < dev->num_ports; i++) {
                spin_lock_init(&dev->port[i].mp.mpi_lock);
-               rwlock_init(&dev->roce[i].netdev_lock);
+               rwlock_init(&dev->port[i].roce.netdev_lock);
+               dev->port[i].roce.dev = dev;
+               dev->port[i].roce.native_port_num = i + 1;
+               dev->port[i].roce.last_port_state = IB_PORT_DOWN;
        }
  
        err = mlx5_ib_init_multiport_master(dev);
        if (err)
-               goto err_free_port;
+               return err;
+       err = set_has_smi_cap(dev);
+       if (err)
+               return err;
  
        if (!mlx5_core_mp_enabled(mdev)) {
                for (i = 1; i <= dev->num_ports; i++) {
        dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
        dev->ib_dev.phys_port_cnt       = dev->num_ports;
        dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
 -      dev->ib_dev.dev.parent          = &mdev->pdev->dev;
 +      dev->ib_dev.dev.parent          = mdev->device;
  
        mutex_init(&dev->cap_mask_mutex);
        INIT_LIST_HEAD(&dev->qp_list);
        spin_lock_init(&dev->reset_flow_resource_lock);
  
-       spin_lock_init(&dev->memic.memic_lock);
-       dev->memic.dev = mdev;
+       if (MLX5_CAP_GEN_64(mdev, general_obj_types) &
+           MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) {
+               if (MLX5_CAP64_DEV_MEM(mdev, steering_sw_icm_start_address)) {
+                       steering_icm_blocks =
+                               BIT(MLX5_CAP_DEV_MEM(mdev,
+                                                    log_steering_sw_icm_size) -
+                                   MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
+                       dev->dm.steering_sw_icm_alloc_blocks =
+                               kcalloc(BITS_TO_LONGS(steering_icm_blocks),
+                                       sizeof(unsigned long), GFP_KERNEL);
+                       if (!dev->dm.steering_sw_icm_alloc_blocks)
+                               goto err_mp;
+               }
+               if (MLX5_CAP64_DEV_MEM(mdev,
+                                      header_modify_sw_icm_start_address)) {
+                       header_modify_icm_blocks = BIT(
+                               MLX5_CAP_DEV_MEM(
+                                       mdev, log_header_modify_sw_icm_size) -
+                               MLX5_LOG_SW_ICM_BLOCK_SIZE(mdev));
+                       dev->dm.header_modify_sw_icm_alloc_blocks =
+                               kcalloc(BITS_TO_LONGS(header_modify_icm_blocks),
+                                       sizeof(unsigned long), GFP_KERNEL);
+                       if (!dev->dm.header_modify_sw_icm_alloc_blocks)
+                               goto err_dm;
+               }
+       }
+       spin_lock_init(&dev->dm.lock);
+       dev->dm.dev = mdev;
  
        if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
                err = init_srcu_struct(&dev->mr_srcu);
                if (err)
-                       goto err_mp;
+                       goto err_dm;
        }
  
        return 0;
+ err_dm:
+       kfree(dev->dm.steering_sw_icm_alloc_blocks);
+       kfree(dev->dm.header_modify_sw_icm_alloc_blocks);
  err_mp:
        mlx5_ib_cleanup_multiport_master(dev);
  
- err_free_port:
-       kfree(dev->port);
        return -ENOMEM;
  }
  
@@@ -5916,20 -6113,6 +6118,6 @@@ static int mlx5_ib_stage_flow_db_init(s
        return 0;
  }
  
- int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
- {
-       struct mlx5_ib_dev *nic_dev;
-       nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
-       if (!nic_dev)
-               return -EINVAL;
-       dev->flow_db = nic_dev->flow_db;
-       return 0;
- }
  static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
  {
        kfree(dev->flow_db);
@@@ -5989,7 -6172,10 +6177,10 @@@ static const struct ib_device_ops mlx5_
        .req_notify_cq = mlx5_ib_arm_cq,
        .rereg_user_mr = mlx5_ib_rereg_user_mr,
        .resize_cq = mlx5_ib_resize_cq,
+       INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
        INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+       INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
        INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
  };
  
@@@ -6025,7 -6211,7 +6216,7 @@@ static const struct ib_device_ops mlx5_
        .reg_dm_mr = mlx5_ib_reg_dm_mr,
  };
  
- int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
  {
        struct mlx5_core_dev *mdev = dev->mdev;
        int err;
                ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
        }
  
-       if (MLX5_CAP_DEV_MEM(mdev, memic))
+       if (MLX5_CAP_DEV_MEM(mdev, memic) ||
+           MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+           MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
                ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
  
        if (mlx5_accel_ipsec_device_caps(dev->mdev) &
@@@ -6131,7 -6319,7 +6324,7 @@@ static const struct ib_device_ops mlx5_
        .query_port = mlx5_ib_rep_query_port,
  };
  
- int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
  {
        ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
        return 0;
@@@ -6149,13 -6337,6 +6342,6 @@@ static const struct ib_device_ops mlx5_
  static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
  {
        u8 port_num;
-       int i;
-       for (i = 0; i < dev->num_ports; i++) {
-               dev->roce[i].dev = dev;
-               dev->roce[i].native_port_num = i + 1;
-               dev->roce[i].last_port_state = IB_PORT_DOWN;
-       }
  
        dev->ib_dev.uverbs_ex_cmd_mask |=
                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
  
        port_num = mlx5_core_native_port_num(dev->mdev) - 1;
  
+       /* Register only for native ports */
        return mlx5_add_netdev_notifier(dev, port_num);
  }
  
@@@ -6177,7 -6359,7 +6364,7 @@@ static void mlx5_ib_stage_common_roce_c
        mlx5_remove_netdev_notifier(dev, port_num);
  }
  
- int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
  {
        struct mlx5_core_dev *mdev = dev->mdev;
        enum rdma_link_layer ll;
        return err;
  }
  
- void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
  {
        mlx5_ib_stage_common_roce_cleanup(dev);
  }
@@@ -6240,12 -6422,12 +6427,12 @@@ static void mlx5_ib_stage_roce_cleanup(
        }
  }
  
- int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
  {
        return create_dev_resources(&dev->devr);
  }
  
- void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
  {
        destroy_dev_resources(&dev->devr);
  }
@@@ -6267,7 -6449,7 +6454,7 @@@ static const struct ib_device_ops mlx5_
        .get_hw_stats = mlx5_ib_get_hw_stats,
  };
  
- int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
  {
        if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
                ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
        return 0;
  }
  
- void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
  {
        if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
                mlx5_ib_dealloc_counters(dev);
@@@ -6308,7 -6490,7 +6495,7 @@@ static void mlx5_ib_stage_uar_cleanup(s
        mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
  }
  
- int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
  {
        int err;
  
        return err;
  }
  
- void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
  {
        mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
        mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  }
  
- int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
  {
        const char *name;
  
        return ib_register_device(&dev->ib_dev, name);
  }
  
- void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
  {
        destroy_umrc_res(dev);
  }
  
- void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
  {
        ib_unregister_device(&dev->ib_dev);
  }
  
- int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
  {
        return create_umr_res(dev);
  }
@@@ -6406,6 -6588,9 +6593,9 @@@ void __mlx5_ib_remove(struct mlx5_ib_de
                if (profile->stage[stage].cleanup)
                        profile->stage[stage].cleanup(dev);
        }
+       kfree(dev->port);
+       ib_dealloc_device(&dev->ib_dev);
  }
  
  void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
@@@ -6527,6 -6712,9 +6717,9 @@@ const struct mlx5_ib_profile uplink_rep
        STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
                     NULL,
                     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+       STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+                    mlx5_ib_stage_devx_init,
+                    mlx5_ib_stage_devx_cleanup),
        STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
                     mlx5_ib_stage_ib_reg_init,
                     mlx5_ib_stage_ib_reg_cleanup),
@@@ -6568,8 -6756,7 +6761,8 @@@ static void *mlx5_ib_add_slave_port(str
  
        if (!bound) {
                list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
 -              dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
 +              dev_dbg(mdev->device,
 +                      "no suitable IB device found to bind to, added to unaffiliated list.\n");
        }
        mutex_unlock(&mlx5_ib_multiport_mutex);
  
@@@ -6581,12 -6768,14 +6774,14 @@@ static void *mlx5_ib_add(struct mlx5_co
        enum rdma_link_layer ll;
        struct mlx5_ib_dev *dev;
        int port_type_cap;
+       int num_ports;
  
        printk_once(KERN_INFO "%s", mlx5_version);
  
        if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
-               mlx5_ib_register_vport_reps(mdev);
+               if (!mlx5_core_mp_enabled(mdev))
+                       mlx5_ib_register_vport_reps(mdev);
                return mdev;
        }
  
        if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
                return mlx5_ib_add_slave_port(mdev);
  
+       num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+                       MLX5_CAP_GEN(mdev, num_vhca_ports));
        dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
        if (!dev)
                return NULL;
+       dev->port = kcalloc(num_ports, sizeof(*dev->port),
+                            GFP_KERNEL);
+       if (!dev->port) {
+               ib_dealloc_device((struct ib_device *)dev);
+               return NULL;
+       }
  
        dev->mdev = mdev;
-       dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
-                            MLX5_CAP_GEN(mdev, num_vhca_ports));
+       dev->num_ports = num_ports;
  
        return __mlx5_ib_add(dev, &pf_profile);
  }
@@@ -6629,8 -6825,6 +6831,6 @@@ static void mlx5_ib_remove(struct mlx5_
  
        dev = context;
        __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
-       ib_dealloc_device((struct ib_device *)dev);
  }
  
  static struct mlx5_interface mlx5_ib_interface = {
index 581144e224e24442c645bb979a1bcd91fe15bd09,dac58c652876cfdef0c8ec05dd5b8bfef131cb2d..f6623c77443ab07d2ddece4ceb7ec6c360aa9ea8
@@@ -92,6 -92,7 +92,7 @@@ struct mlx5_modify_raw_qp_param 
        struct mlx5_rate_limit rl;
  
        u8 rq_q_ctr_id;
+       u16 port;
  };
  
  static void get_cqs(enum ib_qp_type qp_type,
@@@ -777,14 -778,17 +778,17 @@@ err_umem
  }
  
  static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
-                           struct mlx5_ib_rwq *rwq)
+                           struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
  {
-       struct mlx5_ib_ucontext *context;
+       struct mlx5_ib_ucontext *context =
+               rdma_udata_to_drv_context(
+                       udata,
+                       struct mlx5_ib_ucontext,
+                       ibucontext);
  
        if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
                atomic_dec(&dev->delay_drop.rqs_cnt);
  
-       context = to_mucontext(pd->uobject->context);
        mlx5_ib_db_unmap_user(context, &rwq->db);
        if (rwq->umem)
                ib_umem_release(rwq->umem);
@@@ -983,11 -987,15 +987,15 @@@ err_bfreg
  }
  
  static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
-                           struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
+                           struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base,
+                           struct ib_udata *udata)
  {
-       struct mlx5_ib_ucontext *context;
+       struct mlx5_ib_ucontext *context =
+               rdma_udata_to_drv_context(
+                       udata,
+                       struct mlx5_ib_ucontext,
+                       ibucontext);
  
-       context = to_mucontext(pd->uobject->context);
        mlx5_ib_db_unmap_user(context, &qp->db);
        if (base->ubuffer.umem)
                ib_umem_release(base->ubuffer.umem);
@@@ -1206,11 -1214,11 +1214,11 @@@ static void destroy_raw_packet_qp_tis(s
        mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
  }
  
- static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
-                                      struct mlx5_ib_sq *sq)
+ static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
  {
        if (sq->flow_rule)
                mlx5_del_flow_rules(sq->flow_rule);
+       sq->flow_rule = NULL;
  }
  
  static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
        if (err)
                goto err_umem;
  
-       err = create_flow_rule_vport_sq(dev, sq);
-       if (err)
-               goto err_flow;
        return 0;
  
- err_flow:
-       mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
  err_umem:
        ib_umem_release(sq->ubuffer.umem);
        sq->ubuffer.umem = NULL;
  static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
                                     struct mlx5_ib_sq *sq)
  {
-       destroy_flow_rule_vport_sq(dev, sq);
+       destroy_flow_rule_vport_sq(sq);
        mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
        ib_umem_release(sq->ubuffer.umem);
  }
@@@ -1402,7 -1403,8 +1403,8 @@@ static void destroy_raw_packet_qp_tir(s
  static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
                                    struct mlx5_ib_rq *rq, u32 tdn,
                                    u32 *qp_flags_en,
-                                   struct ib_pd *pd)
+                                   struct ib_pd *pd,
+                                   u32 *out, int outlen)
  {
        u8 lb_flag = 0;
        u32 *in;
        if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
                lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
  
-       if (dev->rep) {
+       if (dev->is_rep) {
                lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
                *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
        }
  
        MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
  
-       err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
+       err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
  
+       rq->tirn = MLX5_GET(create_tir_out, out, tirn);
        if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
                err = mlx5_ib_enable_lb(dev, false, true);
  
@@@ -1463,6 -1466,7 +1466,7 @@@ static int create_raw_packet_qp(struct 
        int err;
        u32 tdn = mucontext->tdn;
        u16 uid = to_mpd(pd)->uid;
+       u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
  
        if (qp->sq.wqe_cnt) {
                err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
                if (err)
                        goto err_destroy_sq;
  
-               err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd);
+               err = create_raw_packet_qp_tir(
+                       dev, rq, tdn, &qp->flags_en, pd, out,
+                       MLX5_ST_SZ_BYTES(create_tir_out));
                if (err)
                        goto err_destroy_rq;
  
                        resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
                        resp->tirn = rq->tirn;
                        resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+                       if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
+                               resp->tir_icm_addr = MLX5_GET(
+                                       create_tir_out, out, icm_address_31_0);
+                               resp->tir_icm_addr |=
+                                       (u64)MLX5_GET(create_tir_out, out,
+                                                     icm_address_39_32)
+                                       << 32;
+                               resp->tir_icm_addr |=
+                                       (u64)MLX5_GET(create_tir_out, out,
+                                                     icm_address_63_40)
+                                       << 40;
+                               resp->comp_mask |=
+                                       MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+                       }
                }
        }
  
@@@ -1577,8 -1597,10 +1597,10 @@@ static int create_rss_raw_qp_tir(struc
                udata, struct mlx5_ib_ucontext, ibucontext);
        struct mlx5_ib_create_qp_resp resp = {};
        int inlen;
+       int outlen;
        int err;
        u32 *in;
+       u32 *out;
        void *tirc;
        void *hfso;
        u32 selected_fields = 0;
                return -EOPNOTSUPP;
        }
  
-       if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) {
+       if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->is_rep) {
                lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
                qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
        }
        }
  
        inlen = MLX5_ST_SZ_BYTES(create_tir_in);
-       in = kvzalloc(inlen, GFP_KERNEL);
+       outlen = MLX5_ST_SZ_BYTES(create_tir_out);
+       in = kvzalloc(inlen + outlen, GFP_KERNEL);
        if (!in)
                return -ENOMEM;
  
+       out = in + MLX5_ST_SZ_DW(create_tir_in);
        MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
        tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
        MLX5_SET(tirc, tirc, disp_type,
        MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
  
  create_tir:
-       err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+       err = mlx5_core_create_tir_out(dev->mdev, in, inlen, out, outlen);
  
+       qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
        if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
                err = mlx5_ib_enable_lb(dev, false, true);
  
        if (mucontext->devx_uid) {
                resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
                resp.tirn = qp->rss_qp.tirn;
+               if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner)) {
+                       resp.tir_icm_addr =
+                               MLX5_GET(create_tir_out, out, icm_address_31_0);
+                       resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
+                                                          icm_address_39_32)
+                                            << 32;
+                       resp.tir_icm_addr |= (u64)MLX5_GET(create_tir_out, out,
+                                                          icm_address_63_40)
+                                            << 40;
+                       resp.comp_mask |=
+                               MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+               }
        }
  
        err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
@@@ -2287,7 -2324,7 +2324,7 @@@ static int create_qp_common(struct mlx5
  
  err_create:
        if (qp->create_type == MLX5_QP_USER)
-               destroy_qp_user(dev, pd, qp, base);
+               destroy_qp_user(dev, pd, qp, base, udata);
        else if (qp->create_type == MLX5_QP_KERNEL)
                destroy_qp_kernel(dev, qp);
  
@@@ -2398,7 -2435,8 +2435,8 @@@ static int modify_raw_packet_qp(struct 
                                const struct mlx5_modify_raw_qp_param *raw_qp_param,
                                u8 lag_tx_affinity);
  
- static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+                             struct ib_udata *udata)
  {
        struct mlx5_ib_cq *send_cq, *recv_cq;
        struct mlx5_ib_qp_base *base;
        if (qp->create_type == MLX5_QP_KERNEL)
                destroy_qp_kernel(dev, qp);
        else if (qp->create_type == MLX5_QP_USER)
-               destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
+               destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base, udata);
  }
  
  static const char *ib_qp_type_str(enum ib_qp_type type)
@@@ -2735,7 -2773,7 +2773,7 @@@ static int mlx5_ib_destroy_dct(struct m
        return 0;
  }
  
- int mlx5_ib_destroy_qp(struct ib_qp *qp)
+ int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
  {
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
        struct mlx5_ib_qp *mqp = to_mqp(qp);
        if (mqp->qp_sub_type == MLX5_IB_QPT_DCT)
                return mlx5_ib_destroy_dct(mqp);
  
-       destroy_qp_common(dev, mqp);
+       destroy_qp_common(dev, mqp, udata);
  
        kfree(mqp);
  
@@@ -2964,6 -3002,11 +3002,11 @@@ static enum mlx5_qp_optpar opt_mask[MLX
                        [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX     |
                                          MLX5_QP_OPTPAR_Q_KEY          |
                                          MLX5_QP_OPTPAR_PRI_PORT,
+                       [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE           |
+                                         MLX5_QP_OPTPAR_RAE            |
+                                         MLX5_QP_OPTPAR_RWE            |
+                                         MLX5_QP_OPTPAR_PKEY_INDEX     |
+                                         MLX5_QP_OPTPAR_PRI_PORT,
                },
                [MLX5_QP_STATE_RTR] = {
                        [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH  |
                                          MLX5_QP_OPTPAR_RWE            |
                                          MLX5_QP_OPTPAR_PM_STATE,
                        [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+                                         MLX5_QP_OPTPAR_RRE            |
+                                         MLX5_QP_OPTPAR_RAE            |
+                                         MLX5_QP_OPTPAR_RWE            |
+                                         MLX5_QP_OPTPAR_PM_STATE       |
+                                         MLX5_QP_OPTPAR_RNR_TIMEOUT,
                },
        },
        [MLX5_QP_STATE_RTS] = {
                        [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY          |
                                          MLX5_QP_OPTPAR_SRQN           |
                                          MLX5_QP_OPTPAR_CQN_RCV,
+                       [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE           |
+                                         MLX5_QP_OPTPAR_RAE            |
+                                         MLX5_QP_OPTPAR_RWE            |
+                                         MLX5_QP_OPTPAR_RNR_TIMEOUT    |
+                                         MLX5_QP_OPTPAR_PM_STATE       |
+                                         MLX5_QP_OPTPAR_ALT_ADDR_PATH,
                },
        },
        [MLX5_QP_STATE_SQER] = {
                                           MLX5_QP_OPTPAR_RWE           |
                                           MLX5_QP_OPTPAR_RAE           |
                                           MLX5_QP_OPTPAR_RRE,
+                       [MLX5_QP_ST_XRC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT  |
+                                          MLX5_QP_OPTPAR_RWE           |
+                                          MLX5_QP_OPTPAR_RAE           |
+                                          MLX5_QP_OPTPAR_RRE,
                },
        },
  };
@@@ -3264,6 -3323,8 +3323,8 @@@ static int modify_raw_packet_qp(struct 
        }
  
        if (modify_sq) {
+               struct mlx5_flow_handle *flow_rule;
                if (tx_affinity) {
                        err = modify_raw_packet_tx_affinity(dev->mdev, sq,
                                                            tx_affinity,
                                return err;
                }
  
-               return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
-                                              raw_qp_param, qp->ibqp.pd);
+               flow_rule = create_flow_rule_vport_sq(dev, sq,
+                                                     raw_qp_param->port);
+               if (IS_ERR(flow_rule))
+                       return PTR_ERR(flow_rule);
+               err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+                                             raw_qp_param, qp->ibqp.pd);
+               if (err) {
+                       if (flow_rule)
+                               mlx5_del_flow_rules(flow_rule);
+                       return err;
+               }
+               if (flow_rule) {
+                       destroy_flow_rule_vport_sq(sq);
+                       sq->flow_rule = flow_rule;
+               }
+               return err;
        }
  
        return 0;
@@@ -3298,7 -3376,7 +3376,7 @@@ static unsigned int get_tx_affinity(str
        } else {
                tx_port_affinity =
                        (unsigned int)atomic_add_return(
-                               1, &dev->roce[port_num].tx_port_affinity) %
+                               1, &dev->port[port_num].roce.tx_port_affinity) %
                                MLX5_MAX_PORTS +
                        1;
                mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
@@@ -3403,7 -3481,7 +3481,7 @@@ static int __mlx5_ib_modify_qp(struct i
                    (ibqp->qp_type == IB_QPT_XRC_INI) ||
                    (ibqp->qp_type == IB_QPT_XRC_TGT)) {
                        if (dev->lag_active) {
-                               u8 p = mlx5_core_native_port_num(dev->mdev);
+                               u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
                                tx_affinity = get_tx_affinity(dev, pd, base, p,
                                                              udata);
                                context->flags |= cpu_to_be32(tx_affinity << 24);
                        raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
                }
  
+               if (attr_mask & IB_QP_PORT)
+                       raw_qp_param.port = attr->port_num;
                if (attr_mask & IB_QP_RATE_LIMIT) {
                        raw_qp_param.rl.rate = attr->rate_limit;
  
@@@ -4729,16 -4810,15 +4810,15 @@@ static void set_linv_wr(struct mlx5_ib_
  static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
  {
        __be32 *p = NULL;
-       u32 tidx = idx;
        int i, j;
  
        pr_debug("dump WQE index %u:\n", idx);
        for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
                if ((i & 0xf) == 0) {
-                       tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
-                       p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, tidx);
+                       p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
                        pr_debug("WQBB at %p:\n", (void *)p);
                        j = 0;
+                       idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
                }
                pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
                         be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
@@@ -5126,6 -5206,7 +5206,6 @@@ out
                /* Make sure doorbells don't leak out of SQ spinlock
                 * and reach the HCA out of order.
                 */
 -              mmiowb();
                bf->offset ^= bf->buf_size;
        }
  
@@@ -5627,8 -5708,7 +5707,7 @@@ out
  }
  
  struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
-                                         struct ib_ucontext *context,
-                                         struct ib_udata *udata)
+                                  struct ib_udata *udata)
  {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
        struct mlx5_ib_xrcd *xrcd;
        return &xrcd->ibxrcd;
  }
  
- int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
  {
        struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
        u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
@@@ -5962,19 -6042,19 +6041,19 @@@ struct ib_wq *mlx5_ib_create_wq(struct 
  err_copy:
        mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
  err_user_rq:
-       destroy_user_rq(dev, pd, rwq);
+       destroy_user_rq(dev, pd, rwq, udata);
  err:
        kfree(rwq);
        return ERR_PTR(err);
  }
  
- int mlx5_ib_destroy_wq(struct ib_wq *wq)
+ int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
  {
        struct mlx5_ib_dev *dev = to_mdev(wq->device);
        struct mlx5_ib_rwq *rwq = to_mrwq(wq);
  
        mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
-       destroy_user_rq(dev, wq->pd, rwq);
+       destroy_user_rq(dev, wq->pd, rwq, udata);
        kfree(rwq);
  
        return 0;
index 877a6daffa98ab36e8b3ee4eb236a6ecea2db68f,97c7c0ff0f42ec023ce43b12ff1f4cf0c41d103f..c3cfea243af8c1da5338f0d14242d0cf5c2772c9
@@@ -77,7 -77,7 +77,7 @@@ struct mthca_cq_context 
        __be32 ci_db;           /* Arbel only */
        __be32 state_db;        /* Arbel only */
        u32    reserved;
- } __attribute__((packed));
+ } __packed;
  
  #define MTHCA_CQ_STATUS_OK          ( 0 << 28)
  #define MTHCA_CQ_STATUS_OVERFLOW    ( 9 << 28)
@@@ -211,6 -211,11 +211,6 @@@ static inline void update_cons_index(st
                mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1,
                              dev->kar + MTHCA_CQ_DOORBELL,
                              MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
 -              /*
 -               * Make sure doorbells don't leak out of CQ spinlock
 -               * and reach the HCA out of order:
 -               */
 -              mmiowb();
        }
  }
  
index d65b189f20ead4e448d11039193ba0233e0c0f1c,6d3a00d28e90e2b9b8f29daa00bc45c4e77389eb..d04c245359eb06db2a946345bbc8aa23b6de4859
@@@ -115,7 -115,7 +115,7 @@@ struct mthca_qp_path 
        u8     hop_limit;
        __be32 sl_tclass_flowlabel;
        u8     rgid[16];
- } __attribute__((packed));
+ } __packed;
  
  struct mthca_qp_context {
        __be32 flags;
        __be16 rq_wqe_counter;  /* reserved on Tavor */
        __be16 sq_wqe_counter;  /* reserved on Tavor */
        u32    reserved3[18];
- } __attribute__((packed));
+ } __packed;
  
  struct mthca_qp_param {
        __be32 opt_param_mask;
        u32    reserved1;
        struct mthca_qp_context context;
        u32    reserved2[62];
- } __attribute__((packed));
+ } __packed;
  
  enum {
        MTHCA_QP_OPTPAR_ALT_ADDR_PATH     = 1 << 0,
@@@ -1809,6 -1809,11 +1809,6 @@@ out
                              (qp->qpn << 8) | size0,
                              dev->kar + MTHCA_SEND_DOORBELL,
                              MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
 -              /*
 -               * Make sure doorbells don't leak out of SQ spinlock
 -               * and reach the HCA out of order:
 -               */
 -              mmiowb();
        }
  
        qp->sq.next_ind = ind;
@@@ -1919,6 -1924,12 +1919,6 @@@ out
        qp->rq.next_ind = ind;
        qp->rq.head    += nreq;
  
 -      /*
 -       * Make sure doorbells don't leak out of RQ spinlock and reach
 -       * the HCA out of order:
 -       */
 -      mmiowb();
 -
        spin_unlock_irqrestore(&qp->rq.lock, flags);
        return err;
  }
@@@ -2153,6 -2164,12 +2153,6 @@@ out
                              MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
        }
  
 -      /*
 -       * Make sure doorbells don't leak out of SQ spinlock and reach
 -       * the HCA out of order:
 -       */
 -      mmiowb();
 -
        spin_unlock_irqrestore(&qp->sq.lock, flags);
        return err;
  }
index 0010a3ed64f154b4220db3ee757cf1f44affb82c,79a43531c66dded5af20a55d3f8cc60c28bc7138..62bf986eba67b1578a1a589cfc83758e4d912850
@@@ -1407,7 -1407,7 +1407,7 @@@ static int nes_addr_resolve_neigh(struc
                if (neigh->nud_state & NUD_VALID) {
                        nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
                                  " is %pM, Gateway is 0x%08X \n", dst_ip,
 -                                neigh->ha, ntohl(rt->rt_gateway));
 +                                neigh->ha, ntohl(rt->rt_gw4));
  
                        if (arpindex >= 0) {
                                if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
@@@ -3033,7 -3033,8 +3033,8 @@@ static int nes_disconnect(struct nes_q
                /* Need to free the Last Streaming Mode Message */
                if (nesqp->ietf_frame) {
                        if (nesqp->lsmm_mr)
-                               nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr);
+                               nesibdev->ibdev.ops.dereg_mr(nesqp->lsmm_mr,
+                                                            NULL);
                        pci_free_consistent(nesdev->pcidev,
                                            nesqp->private_data_len + nesqp->ietf_frame_size,
                                            nesqp->ietf_frame, nesqp->ietf_frame_pbase);
index 8686a98e113d3fc5b24e2316c13aa9a0249c24e4,e52d8761d68133dcc376a78f0e32a022af0b7a88..3d7bde19838e7b7c6251f493af9fbb3ab7272153
@@@ -42,6 -42,7 +42,7 @@@
  #include <rdma/ib_umem.h>
  #include <rdma/ib_addr.h>
  #include <rdma/ib_cache.h>
+ #include <rdma/uverbs_ioctl.h>
  
  #include <linux/qed/common_hsi.h>
  #include "qedr_hsi_rdma.h"
@@@ -436,8 -437,7 +437,7 @@@ int qedr_mmap(struct ib_ucontext *conte
                                  vma->vm_page_prot);
  }
  
- int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
-                 struct ib_udata *udata)
+ int qedr_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
  {
        struct ib_device *ibdev = ibpd->device;
        struct qedr_dev *dev = get_qedr_dev(ibdev);
        int rc;
  
        DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
-                (udata && context) ? "User Lib" : "Kernel");
+                udata ? "User Lib" : "Kernel");
  
        if (!dev->rdma_ctx) {
                DP_ERR(dev, "invalid RDMA context\n");
  
        pd->pd_id = pd_id;
  
-       if (udata && context) {
+       if (udata) {
                struct qedr_alloc_pd_uresp uresp = {
                        .pd_id = pd_id,
                };
+               struct qedr_ucontext *context = rdma_udata_to_drv_context(
+                       udata, struct qedr_ucontext, ibucontext);
  
                rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp));
                if (rc) {
                        return rc;
                }
  
-               pd->uctx = get_qedr_ucontext(context);
+               pd->uctx = context;
                pd->uctx->pd = pd;
        }
  
        return 0;
  }
  
- void qedr_dealloc_pd(struct ib_pd *ibpd)
+ void qedr_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
  {
        struct qedr_dev *dev = get_qedr_dev(ibpd->device);
        struct qedr_pd *pd = get_qedr_pd(ibpd);
@@@ -773,6 -775,9 +775,6 @@@ static void doorbell_cq(struct qedr_cq 
        cq->db.data.agg_flags = flags;
        cq->db.data.value = cpu_to_le32(cons);
        writeq(cq->db.raw, cq->db_addr);
 -
 -      /* Make sure write would stick */
 -      mmiowb();
  }
  
  int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
  
  struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
                             const struct ib_cq_init_attr *attr,
-                            struct ib_ucontext *ib_ctx, struct ib_udata *udata)
+                            struct ib_udata *udata)
  {
-       struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
+       struct qedr_ucontext *ctx = rdma_udata_to_drv_context(
+               udata, struct qedr_ucontext, ibucontext);
        struct qed_rdma_destroy_cq_out_params destroy_oparams;
        struct qed_rdma_destroy_cq_in_params destroy_iparams;
        struct qedr_dev *dev = get_qedr_dev(ibdev);
        cq->sig = QEDR_CQ_MAGIC_NUMBER;
        spin_lock_init(&cq->cq_lock);
  
-       if (ib_ctx) {
+       if (udata) {
                rc = qedr_copy_cq_uresp(dev, cq, udata);
                if (rc)
                        goto err3;
@@@ -959,7 -965,7 +962,7 @@@ int qedr_resize_cq(struct ib_cq *ibcq, 
  #define QEDR_DESTROY_CQ_MAX_ITERATIONS                (10)
  #define QEDR_DESTROY_CQ_ITER_DURATION         (10)
  
- int qedr_destroy_cq(struct ib_cq *ibcq)
+ int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
  {
        struct qedr_dev *dev = get_qedr_dev(ibcq->device);
        struct qed_rdma_destroy_cq_out_params oparams;
  
        dev->ops->common->chain_free(dev->cdev, &cq->pbl);
  
-       if (ibcq->uobject && ibcq->uobject->context) {
+       if (udata) {
                qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
                ib_umem_release(cq->q.umem);
        }
@@@ -1044,10 -1050,13 +1047,13 @@@ static inline int get_gid_info_from_tab
        enum rdma_network_type nw_type;
        const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
        u32 ipv4_addr;
+       int ret;
        int i;
  
        gid_attr = grh->sgid_attr;
-       qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+       ret = rdma_read_gid_l2_fields(gid_attr, &qp_params->vlan_id, NULL);
+       if (ret)
+               return ret;
  
        nw_type = rdma_gid_attr_network_type(gid_attr);
        switch (nw_type) {
@@@ -1261,7 -1270,7 +1267,7 @@@ static void qedr_set_roce_db_info(struc
        }
  }
  
- static int qedr_check_srq_params(struct ib_pd *ibpd, struct qedr_dev *dev,
+ static int qedr_check_srq_params(struct qedr_dev *dev,
                                 struct ib_srq_init_attr *attrs,
                                 struct ib_udata *udata)
  {
@@@ -1377,38 -1386,28 +1383,28 @@@ err0
        return rc;
  }
  
- static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
-                       void *ptr, u32 id);
- static void qedr_idr_remove(struct qedr_dev *dev,
-                           struct qedr_idr *qidr, u32 id);
- struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
-                              struct ib_srq_init_attr *init_attr,
-                              struct ib_udata *udata)
+ int qedr_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+                   struct ib_udata *udata)
  {
        struct qed_rdma_destroy_srq_in_params destroy_in_params;
        struct qed_rdma_create_srq_in_params in_params = {};
-       struct qedr_dev *dev = get_qedr_dev(ibpd->device);
+       struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
        struct qed_rdma_create_srq_out_params out_params;
-       struct qedr_pd *pd = get_qedr_pd(ibpd);
+       struct qedr_pd *pd = get_qedr_pd(ibsrq->pd);
        struct qedr_create_srq_ureq ureq = {};
        u64 pbl_base_addr, phy_prod_pair_addr;
        struct qedr_srq_hwq_info *hw_srq;
        u32 page_cnt, page_size;
-       struct qedr_srq *srq;
+       struct qedr_srq *srq = get_qedr_srq(ibsrq);
        int rc = 0;
  
        DP_DEBUG(dev, QEDR_MSG_QP,
                 "create SRQ called from %s (pd %p)\n",
                 (udata) ? "User lib" : "kernel", pd);
  
-       rc = qedr_check_srq_params(ibpd, dev, init_attr, udata);
+       rc = qedr_check_srq_params(dev, init_attr, udata);
        if (rc)
-               return ERR_PTR(-EINVAL);
-       srq = kzalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq)
-               return ERR_PTR(-ENOMEM);
+               return -EINVAL;
  
        srq->dev = dev;
        hw_srq = &srq->hw_srq;
                        goto err2;
        }
  
-       rc = qedr_idr_add(dev, &dev->srqidr, srq, srq->srq_id);
+       rc = xa_insert_irq(&dev->srqs, srq->srq_id, srq, GFP_KERNEL);
        if (rc)
                goto err2;
  
        DP_DEBUG(dev, QEDR_MSG_SRQ,
                 "create srq: created srq with srq_id=0x%0x\n", srq->srq_id);
-       return &srq->ibsrq;
+       return 0;
  
  err2:
        destroy_in_params.srq_id = srq->srq_id;
@@@ -1482,18 -1481,16 +1478,16 @@@ err1
        else
                qedr_free_srq_kernel_params(srq);
  err0:
-       kfree(srq);
-       return ERR_PTR(-EFAULT);
+       return -EFAULT;
  }
  
int qedr_destroy_srq(struct ib_srq *ibsrq)
void qedr_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
  {
        struct qed_rdma_destroy_srq_in_params in_params = {};
        struct qedr_dev *dev = get_qedr_dev(ibsrq->device);
        struct qedr_srq *srq = get_qedr_srq(ibsrq);
  
-       qedr_idr_remove(dev, &dev->srqidr, srq->srq_id);
+       xa_erase_irq(&dev->srqs, srq->srq_id);
        in_params.srq_id = srq->srq_id;
        dev->ops->rdma_destroy_srq(dev->rdma_ctx, &in_params);
  
        DP_DEBUG(dev, QEDR_MSG_SRQ,
                 "destroy srq: destroyed srq with srq_id=0x%0x\n",
                 srq->srq_id);
-       kfree(srq);
-       return 0;
  }
  
  int qedr_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
@@@ -1593,29 -1587,6 +1584,6 @@@ static inline void qedr_qp_user_print(s
                 qp->usq.buf_len, qp->urq.buf_addr, qp->urq.buf_len);
  }
  
- static int qedr_idr_add(struct qedr_dev *dev, struct qedr_idr *qidr,
-                       void *ptr, u32 id)
- {
-       int rc;
-       idr_preload(GFP_KERNEL);
-       spin_lock_irq(&qidr->idr_lock);
-       rc = idr_alloc(&qidr->idr, ptr, id, id + 1, GFP_ATOMIC);
-       spin_unlock_irq(&qidr->idr_lock);
-       idr_preload_end();
-       return rc < 0 ? rc : 0;
- }
- static void qedr_idr_remove(struct qedr_dev *dev, struct qedr_idr *qidr, u32 id)
- {
-       spin_lock_irq(&qidr->idr_lock);
-       idr_remove(&qidr->idr, id);
-       spin_unlock_irq(&qidr->idr_lock);
- }
  static inline void
  qedr_iwarp_populate_user_qp(struct qedr_dev *dev,
                            struct qedr_qp *qp,
@@@ -1985,7 -1956,7 +1953,7 @@@ struct ib_qp *qedr_create_qp(struct ib_
        qp->ibqp.qp_num = qp->qp_id;
  
        if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
-               rc = qedr_idr_add(dev, &dev->qpidr, qp, qp->qp_id);
+               rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
                if (rc)
                        goto err;
        }
@@@ -2081,6 -2052,8 +2049,6 @@@ static int qedr_update_qp_state(struct 
  
                        if (rdma_protocol_roce(&dev->ibdev, 1)) {
                                writel(qp->rq.db_data.raw, qp->rq.db);
 -                              /* Make sure write takes effect */
 -                              mmiowb();
                        }
                        break;
                case QED_ROCE_QP_STATE_ERR:
@@@ -2493,7 -2466,8 +2461,8 @@@ err
        return rc;
  }
  
- static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp)
+ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
+                                 struct ib_udata *udata)
  {
        int rc = 0;
  
                        return rc;
        }
  
-       if (qp->ibqp.uobject && qp->ibqp.uobject->context)
+       if (udata)
                qedr_cleanup_user(dev, qp);
        else
                qedr_cleanup_kernel(dev, qp);
        return 0;
  }
  
- int qedr_destroy_qp(struct ib_qp *ibqp)
+ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
  {
        struct qedr_qp *qp = get_qedr_qp(ibqp);
        struct qedr_dev *dev = qp->dev;
        if (qp->qp_type == IB_QPT_GSI)
                qedr_destroy_gsi_qp(dev);
  
-       qedr_free_qp_resources(dev, qp);
+       qedr_free_qp_resources(dev, qp, udata);
  
        if (atomic_dec_and_test(&qp->refcnt) &&
            rdma_protocol_iwarp(&dev->ibdev, 1)) {
-               qedr_idr_remove(dev, &dev->qpidr, qp->qp_id);
+               xa_erase_irq(&dev->qps, qp->qp_id);
                kfree(qp);
        }
        return rc;
  }
  
struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
-                            u32 flags, struct ib_udata *udata)
int qedr_create_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr, u32 flags,
+                  struct ib_udata *udata)
  {
-       struct qedr_ah *ah;
-       ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
-       if (!ah)
-               return ERR_PTR(-ENOMEM);
+       struct qedr_ah *ah = get_qedr_ah(ibah);
  
        rdma_copy_ah_attr(&ah->attr, attr);
  
-       return &ah->ibah;
+       return 0;
  }
  
int qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
void qedr_destroy_ah(struct ib_ah *ibah, u32 flags)
  {
        struct qedr_ah *ah = get_qedr_ah(ibah);
  
        rdma_destroy_ah_attr(&ah->attr);
-       kfree(ah);
-       return 0;
  }
  
  static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
@@@ -2734,7 -2702,7 +2697,7 @@@ err0
        return ERR_PTR(rc);
  }
  
- int qedr_dereg_mr(struct ib_mr *ib_mr)
+ int qedr_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
  {
        struct qedr_mr *mr = get_qedr_mr(ib_mr);
        struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
@@@ -2826,8 -2794,8 +2789,8 @@@ err0
        return ERR_PTR(rc);
  }
  
- struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
-                           enum ib_mr_type mr_type, u32 max_num_sg)
+ struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+                           u32 max_num_sg, struct ib_udata *udata)
  {
        struct qedr_mr *mr;
  
@@@ -3497,6 -3465,9 +3460,6 @@@ int qedr_post_send(struct ib_qp *ibqp, 
        smp_wmb();
        writel(qp->sq.db_data.raw, qp->sq.db);
  
 -      /* Make sure write sticks */
 -      mmiowb();
 -
        spin_unlock_irqrestore(&qp->q_lock, flags);
  
        return rc;
@@@ -3687,8 -3658,12 +3650,8 @@@ int qedr_post_recv(struct ib_qp *ibqp, 
  
                writel(qp->rq.db_data.raw, qp->rq.db);
  
 -              /* Make sure write sticks */
 -              mmiowb();
 -
                if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
                        writel(qp->rq.iwarp_db2_data.raw, qp->rq.iwarp_db2);
 -                      mmiowb();       /* for second doorbell */
                }
  
                wr = wr->next;
index ac6a84f11ad082d389542a3418d977dfe3f5d168,5f4aa36e5ca4ea977d82c6db673d5cfd1617e3d1..dd4843379f51de80f3d0219f13fec4005445abe3
@@@ -3793,6 -3793,7 +3793,6 @@@ static void qib_7322_put_tid(struct qib
                pa = chippa;
        }
        writeq(pa, tidptr);
 -      mmiowb();
  }
  
  /**
@@@ -4439,8 -4440,10 +4439,8 @@@ static void qib_update_7322_usrhead(str
                adjust_rcv_timeout(rcd, npkts);
        if (updegr)
                qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
 -      mmiowb();
        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
        qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 -      mmiowb();
  }
  
  static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
@@@ -6137,7 -6140,7 +6137,7 @@@ static void set_no_qsfp_atten(struct qi
  static int setup_txselect(const char *str, const struct kernel_param *kp)
  {
        struct qib_devdata *dd;
-       unsigned long val;
+       unsigned long index, val;
        char *n;
  
        if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
        }
        strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
  
-       list_for_each_entry(dd, &qib_dev_list, list)
+       xa_for_each(&qib_dev_table, index, dd)
                if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
                        set_no_qsfp_atten(dd, 1);
        return 0;
index 5a39b323c52e9e82b5c455c1723964d4d98a169f,9510392531056d4f44506c613394fefe844d0f10..5a27246db883c4a62e94eb339dc990fa2c1d210b
@@@ -56,6 -56,7 +56,6 @@@
  
  enum {
        MLX5_BOARD_ID_LEN = 64,
 -      MLX5_MAX_NAME_LEN = 16,
  };
  
  enum {
@@@ -512,13 -513,8 +512,13 @@@ struct mlx5_rl_table 
        struct mlx5_rl_entry   *rl_entry;
  };
  
 +struct mlx5_core_roce {
 +      struct mlx5_flow_table *ft;
 +      struct mlx5_flow_group *fg;
 +      struct mlx5_flow_handle *allow_rule;
 +};
 +
  struct mlx5_priv {
 -      char                    name[MLX5_MAX_NAME_LEN];
        struct mlx5_eq_table    *eq_table;
  
        /* pages stuff */
        struct mlx5_lag         *lag;
        struct mlx5_devcom      *devcom;
        unsigned long           pci_dev_data;
 +      struct mlx5_core_roce   roce;
        struct mlx5_fc_stats            fc_stats;
        struct mlx5_rl_table            rl_table;
  
@@@ -648,7 -643,6 +648,7 @@@ struct mlx5_fw_tracer
  struct mlx5_vxlan;
  
  struct mlx5_core_dev {
 +      struct device *device;
        struct pci_dev         *pdev;
        /* sync pci state */
        struct mutex            pci_status_mutex;
  #endif
        struct mlx5_clock        clock;
        struct mlx5_ib_clock_info  *clock_info;
-       struct page             *clock_info_page;
        struct mlx5_fw_tracer   *tracer;
  };
  
This page took 0.38626 seconds and 4 git commands to generate.