]> Git Repo - linux.git/commitdiff
Merge tag 'drm-next-2018-08-15' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <[email protected]>
Thu, 16 Aug 2018 00:39:07 +0000 (17:39 -0700)
committerLinus Torvalds <[email protected]>
Thu, 16 Aug 2018 00:39:07 +0000 (17:39 -0700)
Pull drm updates from Dave Airlie:
 "This is the main drm pull request for 4.19.

  Rob has some new hardware support for new qualcomm hw that I'll send
  along separately. This has the display part of it, the remaining pull
  is for the acceleration engine.

  This also contains a wound-wait/wait-die mutex rework, Peter has acked
  it for merging via my tree.

  Otherwise mostly the usual level of activity. Summary:

  core:
   - Wound-wait/wait-die mutex rework
   - Add writeback connector type
   - Add "content type" property for HDMI
   - Move GEM bo to drm_framebuffer
   - Initial gpu scheduler documentation
   - GPU scheduler fixes for dying processes
   - Console deferred fbcon takeover support
   - Displayport support for CEC tunneling over AUX

  panel:
   - otm8009a panel driver fixes
   - Innolux TV123WAM and G070Y2-L01 panel driver
   - Ilitek ILI9881c panel driver
   - Rocktech RK070ER9427 LCD
   - EDT ETM0700G0EDH6 and EDT ETM0700G0BDH6
   - DLC DLC0700YZG-1
   - BOE HV070WSA-100
   - newhaven, nhd-4.3-480272ef-atxl LCD
   - DataImage SCF0700C48GGU18
   - Sharp LQ035Q7DB03
   - p079zca: Refactor to support multiple panels

  tinydrm:
   - ILI9341 display panel

  New driver:
   - vkms - virtual kms driver to testing.

  i915:
   - Icelake:
        Display enablement
        DSI support
        IRQ support
        Powerwell support
   - GPU reset fixes and improvements
   - Full ppgtt support refactoring
   - PSR fixes and improvements
   - Execlist improvments
   - GuC related fixes

  amdgpu:
   - Initial amdgpu documentation
   - JPEG engine support on VCN
   - CIK uses powerplay by default
   - Move to using core PCIE functionality for gens/lanes
   - DC/Powerplay interface rework
   - Stutter mode support for RV
   - Vega12 Powerplay updates
   - GFXOFF fixes
   - GPUVM fault debugging
   - Vega12 GFXOFF
   - DC improvements
   - DC i2c/aux changes
   - UVD 7.2 fixes
   - Powerplay fixes for Polaris12, CZ/ST
   - command submission bo_list fixes

  amdkfd:
   - Raven support
   - Power management fixes

  udl:
   - Cleanups and fixes

  nouveau:
   - misc fixes and cleanups.

  msm:
   - DPU1 support display controller in sdm845
   - GPU coredump support.

  vmwgfx:
   - Atomic modesetting validation fixes
   - Support for multisample surfaces

  armada:
   - Atomic modesetting support completed.

  exynos:
   - IPPv2 fixes
   - Move g2d to component framework
   - Suspend/resume support cleanups
   - Driver cleanups

  imx:
   - CSI configuration improvements
   - Driver cleanups
   - Use atomic suspend/resume helpers
   - ipu-v3 V4L2 XRGB32/XBGR32 support

  pl111:
   - Add Nomadik LCDC variant

  v3d:
   - GPU scheduler jobs management

  sun4i:
   - R40 display engine support
   - TCON TOP driver

  mediatek:
   - MT2712 SoC support

  rockchip:
   - vop fixes

  omapdrm:
   - Workaround for DRA7 errata i932
   - Fix mm_list locking

  mali-dp:
   - Writeback implementation
        PM improvements
   - Internal error reporting debugfs

  tilcdc:
   - Single fix for deferred probing

  hdlcd:
   - Teardown fixes

  tda998x:
   - Converted to a bridge driver.

  etnaviv:
   - Misc fixes"

* tag 'drm-next-2018-08-15' of git://anongit.freedesktop.org/drm/drm: (1506 commits)
  drm/amdgpu/sriov: give 8s for recover vram under RUNTIME
  drm/scheduler: fix param documentation
  drm/i2c: tda998x: correct PLL divider calculation
  drm/i2c: tda998x: get rid of private fill_modes function
  drm/i2c: tda998x: move mode_valid() to bridge
  drm/i2c: tda998x: register bridge outside of component helper
  drm/i2c: tda998x: cleanup from previous changes
  drm/i2c: tda998x: allocate tda998x_priv inside tda998x_create()
  drm/i2c: tda998x: convert to bridge driver
  drm/scheduler: fix timeout worker setup for out of order job completions
  drm/amd/display: display connected to dp-1 does not light up
  drm/amd/display: update clk for various HDMI color depths
  drm/amd/display: program display clock on cache match
  drm/amd/display: Add NULL check for enabling dp ss
  drm/amd/display: add vbios table check for enabling dp ss
  drm/amd/display: Don't share clk source between DP and HDMI
  drm/amd/display: Fix DP HBR2 Eye Diagram Pattern on Carrizo
  drm/amd/display: Use calculated disp_clk_khz value for dce110
  drm/amd/display: Implement custom degamma lut on dcn
  drm/amd/display: Destroy aux_engines only once
  ...

13 files changed:
1  2 
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
MAINTAINERS
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/xen/gntdev-dmabuf.c
include/linux/pci.h
kernel/locking/locktorture.c
kernel/printk/printk.c

index 5eaffca2e28c23c01f88cd82e74d096c6bdfd03e,d22237a88eae7e439c10e50496ca8a82ecb80601..dfc743219bd88e4ab858470fac7d77752ff261a3
@@@ -43,6 -43,8 +43,6 @@@ Optional properties
    the master link of the 2-DSI panel.
  - qcom,sync-dual-dsi: Boolean value indicating if the DSI controller is
    driving a 2-DSI panel whose 2 links need receive command simultaneously.
 -- interrupt-parent: phandle to the MDP block if the interrupt signal is routed
 -  through MDP block
  - pinctrl-names: the pin control state names; should contain "default"
  - pinctrl-0: the default pinctrl state (active)
  - pinctrl-n: the "sleep" pinctrl state
@@@ -119,6 -121,20 +119,20 @@@ Required properties
  Optional properties:
  - qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
    regulator is wanted.
+ - qcom,mdss-mdp-transfer-time-us:     Specifies the dsi transfer time for command mode
+                                       panels in microseconds. Driver uses this number to adjust
+                                       the clock rate according to the expected transfer time.
+                                       Increasing this value would slow down the mdp processing
+                                       and can result in slower performance.
+                                       Decreasing this value can speed up the mdp processing,
+                                       but this can also impact power consumption.
+                                       As a rule this time should not be higher than the time
+                                       that would be expected with the processing at the
+                                       dsi link rate since anyways this would be the maximum
+                                       transfer time that could be achieved.
+                                       If ping pong split is enabled, this time should not be higher
+                                       than two times the dsi link rate time.
+                                       If the property is not specified, then the default value is 14000 us.
  
  [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
  [2] Documentation/devicetree/bindings/graph.txt
@@@ -169,6 -185,8 +183,8 @@@ Example
                qcom,master-dsi;
                qcom,sync-dual-dsi;
  
+               qcom,mdss-mdp-transfer-time-us = <12000>;
                pinctrl-names = "default", "sleep";
                pinctrl-0 = <&dsi_active>;
                pinctrl-1 = <&dsi_suspend>;
index 41f0b97eb933e6197a93b9a4b388f716a0c3856b,2afaa633ffc893e4e994d84876033d858ef4d1cd..f32b79814dd75f93fea2cb25ca794df207b4bbdb
@@@ -8,6 -8,7 +8,7 @@@ abracon  Abracon Corporatio
  actions       Actions Semiconductor Co., Ltd.
  active-semi   Active-Semi International Inc
  ad    Avionic Design GmbH
+ adafruit      Adafruit Industries, LLC
  adapteva      Adapteva, Inc.
  adaptrum      Adaptrum, Inc.
  adh   AD Holdings Plc.
@@@ -41,7 -42,6 +42,7 @@@ arrow Arrow Electronic
  artesyn       Artesyn Embedded Technologies Inc.
  asahi-kasei   Asahi Kasei Corp.
  aspeed        ASPEED Technology Inc.
 +asus  AsusTek Computer Inc.
  atlas Atlas Scientific LLC
  atmel Atmel Corporation
  auo   AU Optronics Corporation
@@@ -54,7 -54,6 +55,7 @@@ axentia       Axentia Technologies A
  axis  Axis Communications AB
  bananapi BIPAI KEJI LIMITED
  bhf   Beckhoff Automation GmbH & Co. KG
 +bitmain       Bitmain Technologies
  boe   BOE Technology Group Co., Ltd.
  bosch Bosch Sensortec GmbH
  boundary      Boundary Devices Inc.
@@@ -87,6 -86,7 +88,7 @@@ cubietech     Cubietech, Ltd
  cypress       Cypress Semiconductor Corporation
  cznic CZ.NIC, z.s.p.o.
  dallas        Maxim Integrated Products (formerly Dallas Semiconductor)
+ dataimage     DataImage, Inc.
  davicom       DAVICOM Semiconductor, Inc.
  delta Delta Electronics, Inc.
  denx  Denx Software Engineering
@@@ -95,6 -95,7 +97,7 @@@ dh    DH electronics Gmb
  digi  Digi International Inc.
  digilent      Diglent, Inc.
  dioo  Dioo Microcircuit Co., Ltd
+ dlc   DLC Display Co., Ltd.
  dlg   Dialog Semiconductor
  dlink D-Link Corporation
  dmo   Data Modul AG
@@@ -190,6 -191,7 +193,7 @@@ keymile    Keymile Gmb
  khadas        Khadas
  kiebackpeter    Kieback & Peter GmbH
  kinetic Kinetic Technologies
+ kingdisplay   King & Display Technology Co., Ltd.
  kingnovel     Kingnovel Technology Co., Ltd.
  koe   Kaohsiung Opto-Electronics Inc.
  kosagi        Sutajio Ko-Usagi PTE Ltd.
@@@ -397,7 -399,6 +401,7 @@@ v3 V3 Semiconducto
  variscite     Variscite Ltd.
  via   VIA Technologies, Inc.
  virtio        Virtual I/O Device Specification, developed by the OASIS consortium
 +vitesse       Vitesse Semiconductor Corporation
  vivante       Vivante Corporation
  vocore VoCore Studio
  voipac        Voipac Technologies s.r.o.
@@@ -415,7 -416,6 +419,7 @@@ xes        Extreme Engineering Solutions (X-ES
  xillybus      Xillybus Ltd.
  xlnx  Xilinx
  xunlong       Shenzhen Xunlong Software CO.,Limited
 +ysoft Y Soft Corporation a.s.
  zarlink       Zarlink Semiconductor
  zeitec        ZEITEC Semiconductor Co., LTD.
  zidoo Shenzhen Zidoo Technology Co., Ltd.
diff --combined MAINTAINERS
index 967ce8cdd1cc930cec5b916871fbf466c1d97023,9b2bf134964e7a42b8ca332d2869a470dab0877d..5ca346e6140b4a6f2cfe5c335d01454be6cfdc2e
@@@ -367,12 -367,6 +367,12 @@@ L:       [email protected]
  S:    Maintained
  F:    drivers/acpi/arm64
  
 +ACPI I2C MULTI INSTANTIATE DRIVER
 +M:    Hans de Goede <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/platform/x86/i2c-multi-instantiate.c
 +
  ACPI PMIC DRIVERS
  M:    "Rafael J. Wysocki" <[email protected]>
  M:    Len Brown <[email protected]>
@@@ -734,6 -728,14 +734,14 @@@ S:       Supporte
  F:    drivers/crypto/ccp/
  F:    include/linux/ccp.h
  
+ AMD DISPLAY CORE
+ M:    Harry Wentland <[email protected]>
+ M:    Leo Li <[email protected]>
+ L:    [email protected]
+ T:    git git://people.freedesktop.org/~agd5f/linux
+ S:    Supported
+ F:    drivers/gpu/drm/amd/display/
  AMD FAM15H PROCESSOR POWER MONITORING DRIVER
  M:    Huang Rui <[email protected]>
  L:    [email protected]
@@@ -783,6 -785,14 +791,14 @@@ F:       drivers/gpu/drm/amd/include/vi_struc
  F:    drivers/gpu/drm/amd/include/v9_structs.h
  F:    include/uapi/linux/kfd_ioctl.h
  
+ AMD POWERPLAY
+ M:    Rex Zhu <[email protected]>
+ M:    Evan Quan <[email protected]>
+ L:    [email protected]
+ S:    Supported
+ F:    drivers/gpu/drm/amd/powerplay/
+ T:    git git://people.freedesktop.org/~agd5f/linux
  AMD SEATTLE DEVICE TREE SUPPORT
  M:    Brijesh Singh <[email protected]>
  M:    Suravee Suthikulpanit <[email protected]>
@@@ -2270,7 -2280,6 +2286,7 @@@ L:      [email protected]
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git
  S:    Maintained
  F:    arch/arm64/
 +X:    arch/arm64/boot/dts/
  F:    Documentation/arm64/
  
  AS3645A LED FLASH CONTROLLER DRIVER
@@@ -4407,12 -4416,6 +4423,12 @@@ X:    Documentation/sp
  X:    Documentation/media
  T:    git git://git.lwn.net/linux.git docs-next
  
 +DOCUMENTATION/ITALIAN
 +M:    Federico Vaga <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/translations/it_IT
 +
  DONGWOON DW9714 LENS VOICE COIL DRIVER
  M:    Sakari Ailus <[email protected]>
  L:    [email protected]
@@@ -4897,7 -4900,8 +4913,8 @@@ F:      Documentation/gpu/xen-front.rs
  
  DRM TTM SUBSYSTEM
  M:    Christian Koenig <[email protected]>
- M:    Roger He <[email protected]>
+ M:    Huang Rui <[email protected]>
+ M:    Junwei Zhang <[email protected]>
  T:    git git://people.freedesktop.org/~agd5f/linux
  S:    Maintained
  L:    [email protected]
@@@ -5457,7 -5461,6 +5474,7 @@@ F:      drivers/iommu/exynos-iommu.
  
  EZchip NPS platform support
  M:    Vineet Gupta <[email protected]>
 +M:    Ofer Levi <[email protected]>
  S:    Supported
  F:    arch/arc/plat-eznps
  F:    arch/arc/boot/dts/eznps.dts
@@@ -5943,7 -5946,7 +5960,7 @@@ F:      Documentation/dev-tools/gcov.rs
  
  GDB KERNEL DEBUGGING HELPER SCRIPTS
  M:    Jan Kiszka <[email protected]>
 -M:    Kieran Bingham <k[email protected]>
 +M:    Kieran Bingham <k[email protected]>
  S:    Supported
  F:    scripts/gdb/
  
@@@ -7041,7 -7044,7 +7058,7 @@@ M:      Guenter Roeck <[email protected]
  L:    [email protected]
  S:    Maintained
  F:    Documentation/hwmon/ina209
 -F:    Documentation/devicetree/bindings/i2c/ina209.txt
 +F:    Documentation/devicetree/bindings/hwmon/ina2xx.txt
  F:    drivers/hwmon/ina209.c
  
  INA2XX HARDWARE MONITOR DRIVER
@@@ -7364,7 -7367,7 +7381,7 @@@ M:      Megha Dey <[email protected]
  R:    Tim Chen <[email protected]>
  L:    [email protected]
  S:    Supported
 -F:    arch/x86/crypto/sha*-mb
 +F:    arch/x86/crypto/sha*-mb/
  F:    crypto/mcryptd.c
  
  INTEL TELEMETRY DRIVER
@@@ -7999,7 -8002,7 +8016,7 @@@ F:      lib/test_kmod.
  F:    tools/testing/selftests/kmod/
  
  KPROBES
 -M:    Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
 +M:    Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  M:    Anil S Keshavamurthy <[email protected]>
  M:    "David S. Miller" <[email protected]>
  M:    Masami Hiramatsu <[email protected]>
@@@ -8330,18 -8333,17 +8347,18 @@@ M:   Jade Alglave <[email protected]
  M:    Luc Maranget <[email protected]>
  M:    "Paul E. McKenney" <[email protected]>
  R:    Akira Yokosawa <[email protected]>
 +R:    Daniel Lustig <[email protected]>
  L:    [email protected]
 +L:    [email protected]
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
  F:    tools/memory-model/
 +F:    Documentation/atomic_bitops.txt
 +F:    Documentation/atomic_t.txt
 +F:    Documentation/core-api/atomic_ops.rst
 +F:    Documentation/core-api/refcount-vs-atomic.rst
  F:    Documentation/memory-barriers.txt
  
 -LINUX SECURITY MODULE (LSM) FRAMEWORK
 -M:    Chris Wright <[email protected]>
 -L:    [email protected]
 -S:    Supported
 -
  LIS3LV02D ACCELEROMETER DRIVER
  M:    Eric Piel <[email protected]>
  S:    Maintained
@@@ -9002,14 -9004,6 +9019,14 @@@ F:    include/uapi/linux/meye.
  F:    include/uapi/linux/ivtv*
  F:    include/uapi/linux/uvcvideo.h
  
 +MEDIATEK BLUETOOTH DRIVER
 +M:    Sean Wang <[email protected]>
 +L:    [email protected]
 +L:    [email protected] (moderated for non-subscribers)
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
 +F:    drivers/bluetooth/btmtkuart.c
 +
  MEDIATEK CIR DRIVER
  M:    Sean Wang <[email protected]>
  S:    Maintained
@@@ -9182,7 -9176,6 +9199,7 @@@ S:      Supporte
  W:    http://www.mellanox.com
  Q:    http://patchwork.ozlabs.org/project/netdev/list/
  F:    drivers/net/ethernet/mellanox/mlxsw/
 +F:    tools/testing/selftests/drivers/net/mlxsw/
  
  MELLANOX FIRMWARE FLASH LIBRARY (mlxfw)
  M:    [email protected]
@@@ -9370,6 -9363,7 +9387,6 @@@ F:      drivers/media/platform/atmel/atmel-i
  F:    devicetree/bindings/media/atmel-isc.txt
  
  MICROCHIP / ATMEL NAND DRIVER
 -M:    Wenyou Yang <[email protected]>
  M:    Josh Wu <[email protected]>
  L:    [email protected]
  S:    Supported
@@@ -11280,7 -11274,7 +11297,7 @@@ F:   Documentation/devicetree/bindings/pi
  
  PIN CONTROLLER - INTEL
  M:    Mika Westerberg <[email protected]>
 -M:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
 +M:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  S:    Maintained
  F:    drivers/pinctrl/intel/
  
@@@ -12061,9 -12055,9 +12078,9 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    Documentation/RCU/
  X:    Documentation/RCU/torture.txt
  F:    include/linux/rcu*
 -X:    include/linux/srcu.h
 +X:    include/linux/srcu*.h
  F:    kernel/rcu/
 -X:    kernel/torture.c
 +X:    kernel/rcu/srcu*.c
  
  REAL TIME CLOCK (RTC) SUBSYSTEM
  M:    Alessandro Zummo <[email protected]>
@@@ -12088,13 -12082,6 +12105,13 @@@ S: Maintaine
  F:    sound/soc/codecs/rt*
  F:    include/sound/rt*.h
  
 +REALTEK RTL83xx SMI DSA ROUTER CHIPS
 +M:    Linus Walleij <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
 +F:    drivers/net/dsa/realtek-smi*
 +F:    drivers/net/dsa/rtl83*
 +
  REGISTER MAP ABSTRACTION
  M:    Mark Brown <[email protected]>
  L:    [email protected]
@@@ -12202,8 -12189,6 +12219,8 @@@ S:   Maintaine
  F:    Documentation/rfkill.txt
  F:    Documentation/ABI/stable/sysfs-class-rfkill
  F:    net/rfkill/
 +F:    include/linux/rfkill.h
 +F:    include/uapi/linux/rfkill.h
  
  RHASHTABLE
  M:    Thomas Graf <[email protected]>
@@@ -12211,9 -12196,7 +12228,9 @@@ M:   Herbert Xu <[email protected]
  L:    [email protected]
  S:    Maintained
  F:    lib/rhashtable.c
 +F:    lib/test_rhashtable.c
  F:    include/linux/rhashtable.h
 +F:    include/linux/rhashtable-types.h
  
  RICOH R5C592 MEMORYSTICK DRIVER
  M:    Maxim Levitsky <[email protected]>
@@@ -12435,6 -12418,7 +12452,6 @@@ F:   drivers/pci/hotplug/s390_pci_hpc.
  
  S390 VFIO-CCW DRIVER
  M:    Cornelia Huck <[email protected]>
 -M:    Dong Jia Shi <[email protected]>
  M:    Halil Pasic <[email protected]>
  L:    [email protected]
  L:    [email protected]
@@@ -12819,7 -12803,6 +12836,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  W:    http://kernsec.org/
  S:    Supported
  F:    security/
 +X:    security/selinux/
  
  SELINUX SECURITY MODULE
  M:    Paul Moore <[email protected]>
  W:    http://www.rdrop.com/users/paulmck/RCU/
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 -F:    include/linux/srcu.h
 -F:    kernel/rcu/srcu.c
 +F:    include/linux/srcu*.h
 +F:    kernel/rcu/srcu*.c
  
  SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
  M:    Srinivas Kandagatla <[email protected]>
  S:    Maintained
  F:    drivers/block/skd*[ch]
  
 +STI AUDIO (ASoC) DRIVERS
 +M:    Arnaud Pouliquen <[email protected]>
 +L:    [email protected] (moderated for non-subscribers)
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
 +F:    sound/soc/sti/
 +
  STI CEC DRIVER
  M:    Benjamin Gaignard <[email protected]>
  S:    Maintained
@@@ -13628,14 -13604,6 +13645,14 @@@ T: git git://linuxtv.org/media_tree.gi
  S:    Maintained
  F:    drivers/media/usb/stk1160/
  
 +STM32 AUDIO (ASoC) DRIVERS
 +M:    Olivier Moysan <[email protected]>
 +M:    Arnaud Pouliquen <[email protected]>
 +L:    [email protected] (moderated for non-subscribers)
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/sound/st,stm32-*.txt
 +F:    sound/soc/stm/
 +
  STM32 TIMER/LPTIMER DRIVERS
  M:    Fabrice Gasnier <[email protected]>
  S:    Maintained
@@@ -14106,13 -14074,6 +14123,13 @@@ M: Laxman Dewangan <[email protected]
  S:    Supported
  F:    drivers/input/keyboard/tegra-kbc.c
  
 +TEGRA NAND DRIVER
 +M:    Stefan Agner <[email protected]>
 +M:    Lucas Stach <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/mtd/nvidia-tegra20-nand.txt
 +F:    drivers/mtd/nand/raw/tegra_nand.c
 +
  TEGRA PWM DRIVER
  M:    Thierry Reding <[email protected]>
  S:    Supported
@@@ -14493,7 -14454,6 +14510,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    Documentation/RCU/torture.txt
  F:    kernel/torture.c
  F:    kernel/rcu/rcutorture.c
 +F:    kernel/rcu/rcuperf.c
  F:    kernel/locking/locktorture.c
  
  TOSHIBA ACPI EXTRAS DRIVER
index dd3ff2f2cdce00cb761ffe5be5bd15b0c915c38f,6437b878724a15cbc0e29e7df0808e4deef7a908..85c2d407a52e1a5476b3269d13655606d10478fd
@@@ -429,18 -429,6 +429,18 @@@ static void adv7511_hpd_work(struct wor
        else
                status = connector_status_disconnected;
  
 +      /*
 +       * The bridge resets its registers on unplug. So when we get a plug
 +       * event and we're already supposed to be powered, cycle the bridge to
 +       * restore its state.
 +       */
 +      if (status == connector_status_connected &&
 +          adv7511->connector.status == connector_status_disconnected &&
 +          adv7511->powered) {
 +              regcache_mark_dirty(adv7511->regmap);
 +              adv7511_power_on(adv7511);
 +      }
 +
        if (adv7511->connector.status != status) {
                adv7511->connector.status = status;
                if (status == connector_status_disconnected)
@@@ -613,7 -601,7 +613,7 @@@ static int adv7511_get_modes(struct adv
                __adv7511_power_off(adv7511);
  
  
-       drm_mode_connector_update_edid_property(connector, edid);
+       drm_connector_update_edid_property(connector, edid);
        count = drm_add_edid_modes(connector, edid);
  
        adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
@@@ -872,7 -860,7 +872,7 @@@ static int adv7511_bridge_attach(struc
        }
        drm_connector_helper_add(&adv->connector,
                                 &adv7511_connector_helper_funcs);
-       drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+       drm_connector_attach_encoder(&adv->connector, bridge->encoder);
  
        if (adv->type == ADV7533)
                ret = adv7533_attach_dsi(adv);
index 81e32199d3ef4962751e2c010938b80ec8d29110,866a2cc72ef68458e1003135902c0bb913eda4f7..80be74df7ba66355163368f9f2b3eaeaf967a0d8
@@@ -30,6 -30,7 +30,7 @@@
  #include <drm/drm_plane_helper.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_writeback.h>
  #include <linux/dma-fence.h>
  
  #include "drm_crtc_helper_internal.h"
@@@ -120,7 -121,7 +121,7 @@@ static int handle_conflicting_encoders(
                        new_encoder = drm_atomic_helper_best_encoder(connector);
  
                if (new_encoder) {
-                       if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+                       if (encoder_mask & drm_encoder_mask(new_encoder)) {
                                DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
                                        new_encoder->base.id, new_encoder->name,
                                        connector->base.id, connector->name);
                                return -EINVAL;
                        }
  
-                       encoder_mask |= 1 << drm_encoder_index(new_encoder);
+                       encoder_mask |= drm_encoder_mask(new_encoder);
                }
        }
  
                        continue;
  
                encoder = connector->state->best_encoder;
-               if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
+               if (!encoder || !(encoder_mask & drm_encoder_mask(encoder)))
                        continue;
  
                if (!disable_conflicting_encoders) {
@@@ -222,7 -223,7 +223,7 @@@ set_best_encoder(struct drm_atomic_stat
                        crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  
                        crtc_state->encoder_mask &=
-                               ~(1 << drm_encoder_index(conn_state->best_encoder));
+                               ~drm_encoder_mask(conn_state->best_encoder);
                }
        }
  
                        crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  
                        crtc_state->encoder_mask |=
-                               1 << drm_encoder_index(encoder);
+                               drm_encoder_mask(encoder);
                }
        }
  
@@@ -644,7 -645,7 +645,7 @@@ drm_atomic_helper_check_modeset(struct 
                if (ret)
                        return ret;
  
-               connectors_mask += BIT(i);
+               connectors_mask |= BIT(i);
        }
  
        /*
@@@ -1172,6 -1173,27 +1173,27 @@@ void drm_atomic_helper_commit_modeset_d
  }
  EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
  
+ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
+                                               struct drm_atomic_state *old_state)
+ {
+       struct drm_connector *connector;
+       struct drm_connector_state *new_conn_state;
+       int i;
+       for_each_new_connector_in_state(old_state, connector, new_conn_state, i) {
+               const struct drm_connector_helper_funcs *funcs;
+               funcs = connector->helper_private;
+               if (!funcs->atomic_commit)
+                       continue;
+               if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
+                       WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+                       funcs->atomic_commit(connector, new_conn_state);
+               }
+       }
+ }
  /**
   * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
   * @dev: DRM device
@@@ -1251,6 -1273,8 +1273,8 @@@ void drm_atomic_helper_commit_modeset_e
  
                drm_bridge_enable(encoder->bridge);
        }
+       drm_atomic_helper_commit_writebacks(dev, old_state);
  }
  EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
  
@@@ -1426,6 -1450,8 +1450,8 @@@ void drm_atomic_helper_commit_tail(stru
  
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
  
+       drm_atomic_helper_fake_vblank(old_state);
        drm_atomic_helper_commit_hw_done(old_state);
  
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@@ -1455,6 -1481,8 +1481,8 @@@ void drm_atomic_helper_commit_tail_rpm(
        drm_atomic_helper_commit_planes(dev, old_state,
                                        DRM_PLANE_COMMIT_ACTIVE_ONLY);
  
+       drm_atomic_helper_fake_vblank(old_state);
        drm_atomic_helper_commit_hw_done(old_state);
  
        drm_atomic_helper_wait_for_vblanks(dev, old_state);
@@@ -1510,9 -1538,8 +1538,9 @@@ int drm_atomic_helper_async_check(struc
  {
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
 -      struct drm_plane *plane;
 -      struct drm_plane_state *old_plane_state, *new_plane_state;
 +      struct drm_plane *plane = NULL;
 +      struct drm_plane_state *old_plane_state = NULL;
 +      struct drm_plane_state *new_plane_state = NULL;
        const struct drm_plane_helper_funcs *funcs;
        int i, n_planes = 0;
  
        if (n_planes != 1)
                return -EINVAL;
  
 -      if (!new_plane_state->crtc)
 +      if (!new_plane_state->crtc ||
 +          old_plane_state->crtc != new_plane_state->crtc)
                return -EINVAL;
  
        funcs = plane->helper_private;
@@@ -2031,6 -2057,45 +2059,45 @@@ void drm_atomic_helper_wait_for_depende
  }
  EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
  
+ /**
+  * drm_atomic_helper_fake_vblank - fake VBLANK events if needed
+  * @old_state: atomic state object with old state structures
+  *
+  * This function walks all CRTCs and fake VBLANK events on those with
+  * &drm_crtc_state.no_vblank set to true and &drm_crtc_state.event != NULL.
+  * The primary use of this function is writeback connectors working in oneshot
+  * mode and faking VBLANK events. In this case they only fake the VBLANK event
+  * when a job is queued, and any change to the pipeline that does not touch the
+  * connector is leading to timeouts when calling
+  * drm_atomic_helper_wait_for_vblanks() or
+  * drm_atomic_helper_wait_for_flip_done().
+  *
+  * This is part of the atomic helper support for nonblocking commits, see
+  * drm_atomic_helper_setup_commit() for an overview.
+  */
+ void drm_atomic_helper_fake_vblank(struct drm_atomic_state *old_state)
+ {
+       struct drm_crtc_state *new_crtc_state;
+       struct drm_crtc *crtc;
+       int i;
+       for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
+               unsigned long flags;
+               if (!new_crtc_state->no_vblank)
+                       continue;
+               spin_lock_irqsave(&old_state->dev->event_lock, flags);
+               if (new_crtc_state->event) {
+                       drm_crtc_send_vblank_event(crtc,
+                                                  new_crtc_state->event);
+                       new_crtc_state->event = NULL;
+               }
+               spin_unlock_irqrestore(&old_state->dev->event_lock, flags);
+       }
+ }
+ EXPORT_SYMBOL(drm_atomic_helper_fake_vblank);
  /**
   * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
   * @old_state: atomic state object with old state structures
@@@ -2322,11 -2387,13 +2389,13 @@@ drm_atomic_helper_commit_planes_on_crtc
        const struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_crtc *crtc = old_crtc_state->crtc;
        struct drm_atomic_state *old_state = old_crtc_state->state;
+       struct drm_crtc_state *new_crtc_state =
+               drm_atomic_get_new_crtc_state(old_state, crtc);
        struct drm_plane *plane;
        unsigned plane_mask;
  
        plane_mask = old_crtc_state->plane_mask;
-       plane_mask |= crtc->state->plane_mask;
+       plane_mask |= new_crtc_state->plane_mask;
  
        crtc_funcs = crtc->helper_private;
        if (crtc_funcs && crtc_funcs->atomic_begin)
        drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
                struct drm_plane_state *old_plane_state =
                        drm_atomic_get_old_plane_state(old_state, plane);
+               struct drm_plane_state *new_plane_state =
+                       drm_atomic_get_new_plane_state(old_state, plane);
                const struct drm_plane_helper_funcs *plane_funcs;
  
                plane_funcs = plane->helper_private;
                if (!old_plane_state || !plane_funcs)
                        continue;
  
-               WARN_ON(plane->state->crtc && plane->state->crtc != crtc);
+               WARN_ON(new_plane_state->crtc &&
+                       new_plane_state->crtc != crtc);
  
-               if (drm_atomic_plane_disabling(old_plane_state, plane->state) &&
+               if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
                    plane_funcs->atomic_disable)
                        plane_funcs->atomic_disable(plane, old_plane_state);
-               else if (plane->state->crtc ||
-                        drm_atomic_plane_disabling(old_plane_state, plane->state))
+               else if (new_plane_state->crtc ||
+                        drm_atomic_plane_disabling(old_plane_state, new_plane_state))
                        plane_funcs->atomic_update(plane, old_plane_state);
        }
  
@@@ -2797,7 -2867,7 +2869,7 @@@ static int update_output_state(struct d
   * resets the "link-status" property to GOOD, to force any link
   * re-training. The SETCRTC ioctl does not define whether an update does
   * need a full modeset or just a plane update, hence we're allowed to do
-  * that. See also drm_mode_connector_set_link_status_property().
+  * that. See also drm_connector_set_link_status_property().
   *
   * Returns:
   * Returns 0 on success, negative errno numbers on failure.
@@@ -2916,7 -2986,6 +2988,6 @@@ static int __drm_atomic_helper_disable_
        struct drm_plane *plane;
        struct drm_crtc_state *crtc_state;
        struct drm_crtc *crtc;
-       unsigned plane_mask = 0;
        int ret, i;
  
        state = drm_atomic_state_alloc(dev);
                        goto free;
  
                drm_atomic_set_fb_for_plane(plane_state, NULL);
-               if (clean_old_fbs) {
-                       plane->old_fb = plane->fb;
-                       plane_mask |= BIT(drm_plane_index(plane));
-               }
        }
  
        ret = drm_atomic_commit(state);
  free:
-       if (plane_mask)
-               drm_atomic_clean_old_fb(dev, plane_mask, ret);
        drm_atomic_state_put(state);
        return ret;
  }
@@@ -3131,13 -3193,8 +3195,8 @@@ int drm_atomic_helper_commit_duplicated
  
        state->acquire_ctx = ctx;
  
-       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               WARN_ON(plane->crtc != new_plane_state->crtc);
-               WARN_ON(plane->fb != new_plane_state->fb);
-               WARN_ON(plane->old_fb);
+       for_each_new_plane_in_state(state, plane, new_plane_state, i)
                state->planes[i].old_state = plane->state;
-       }
  
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
                state->crtcs[i].old_state = crtc->state;
@@@ -3662,6 -3719,9 +3721,9 @@@ __drm_atomic_helper_connector_duplicate
        if (state->crtc)
                drm_connector_get(connector);
        state->commit = NULL;
+       /* Don't copy over a writeback job, they are used only once */
+       state->writeback_job = NULL;
  }
  EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
  
index b50b74053664c8c15bde326f308a0fee128245b1,c39541ed2219482c17814b9f3190b75f6be3c75d..d6c8f8fdfda5f106776e0a148e034e10e64ccbb7
@@@ -4,7 -4,6 +4,7 @@@
   * Copyright Â© 2017-2018 Intel Corporation
   */
  
 +#include <linux/irq.h>
  #include "i915_pmu.h"
  #include "intel_ringbuffer.h"
  #include "i915_drv.h"
@@@ -128,6 -127,7 +128,7 @@@ static void __i915_pmu_maybe_start_time
  {
        if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
                i915->pmu.timer_enabled = true;
+               i915->pmu.timer_last = ktime_get();
                hrtimer_start_range_ns(&i915->pmu.timer,
                                       ns_to_ktime(PERIOD), 0,
                                       HRTIMER_MODE_REL_PINNED);
@@@ -156,12 -156,13 +157,13 @@@ static bool grab_forcewake(struct drm_i
  }
  
  static void
update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
add_sample(struct i915_pmu_sample *sample, u32 val)
  {
-       sample->cur += mul_u32_u32(val, unit);
+       sample->cur += val;
  }
  
- static void engines_sample(struct drm_i915_private *dev_priv)
+ static void
+ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
  {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
  
                val = !i915_seqno_passed(current_seqno, last_seqno);
  
-               update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
-                             PERIOD, val);
+               if (val)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
+                                  period_ns);
  
                if (val && (engine->pmu.enable &
                    (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
                        val = 0;
                }
  
-               update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
-                             PERIOD, !!(val & RING_WAIT));
+               if (val & RING_WAIT)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
+                                  period_ns);
  
-               update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
-                             PERIOD, !!(val & RING_WAIT_SEMAPHORE));
+               if (val & RING_WAIT_SEMAPHORE)
+                       add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
+                                  period_ns);
        }
  
        if (fw)
        intel_runtime_pm_put(dev_priv);
  }
  
- static void frequency_sample(struct drm_i915_private *dev_priv)
+ static void
+ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
+ {
+       sample->cur += mul_u32_u32(val, mul);
+ }
+ static void
+ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
  {
        if (dev_priv->pmu.enable &
            config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
                        intel_runtime_pm_put(dev_priv);
                }
  
-               update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
-                             1, intel_gpu_freq(dev_priv, val));
+               add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
+                               intel_gpu_freq(dev_priv, val),
+                               period_ns / 1000);
        }
  
        if (dev_priv->pmu.enable &
            config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
-               update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
-                             intel_gpu_freq(dev_priv,
-                                            dev_priv->gt_pm.rps.cur_freq));
+               add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ],
+                               intel_gpu_freq(dev_priv,
+                                              dev_priv->gt_pm.rps.cur_freq),
+                               period_ns / 1000);
        }
  }
  
@@@ -238,14 -251,27 +252,27 @@@ static enum hrtimer_restart i915_sample
  {
        struct drm_i915_private *i915 =
                container_of(hrtimer, struct drm_i915_private, pmu.timer);
+       unsigned int period_ns;
+       ktime_t now;
  
        if (!READ_ONCE(i915->pmu.timer_enabled))
                return HRTIMER_NORESTART;
  
-       engines_sample(i915);
-       frequency_sample(i915);
+       now = ktime_get();
+       period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last));
+       i915->pmu.timer_last = now;
+       /*
+        * Strictly speaking the passed in period may not be 100% accurate for
+        * all internal calculation, since some amount of time can be spent on
+        * grabbing the forcewake. However the potential error from timer call-
+        * back delay greatly dominates this so we keep it simple.
+        */
+       engines_sample(i915, period_ns);
+       frequency_sample(i915, period_ns);
+       hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
  
-       hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
        return HRTIMER_RESTART;
  }
  
@@@ -520,12 -546,12 +547,12 @@@ static u64 __i915_pmu_event_read(struc
                case I915_PMU_ACTUAL_FREQUENCY:
                        val =
                           div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
-                                  FREQUENCY);
+                                  USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_REQUESTED_FREQUENCY:
                        val =
                           div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
-                                  FREQUENCY);
+                                  USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_INTERRUPTS:
                        val = count_interrupts(i915);
index 7dd5605d94ae8ae7a515e40ad056a2709d965a2d,bb94172ffc07402461bbfdecc9906387ef8bbfd1..b725835b47efc5116b53e3f27eeb6e0fcdbb65b4
@@@ -59,6 -59,7 +59,7 @@@
   */
  
  /* DP N/M table */
+ #define LC_810M       810000
  #define LC_540M       540000
  #define LC_270M       270000
  #define LC_162M       162000
@@@ -99,6 -100,15 +100,15 @@@ static const struct dp_aud_n_m dp_aud_n
        { 128000, LC_540M, 4096, 33750 },
        { 176400, LC_540M, 3136, 18750 },
        { 192000, LC_540M, 2048, 11250 },
+       { 32000, LC_810M, 1024, 50625 },
+       { 44100, LC_810M, 784, 28125 },
+       { 48000, LC_810M, 512, 16875 },
+       { 64000, LC_810M, 2048, 50625 },
+       { 88200, LC_810M, 1568, 28125 },
+       { 96000, LC_810M, 1024, 16875 },
+       { 128000, LC_810M, 4096, 50625 },
+       { 176400, LC_810M, 3136, 28125 },
+       { 192000, LC_810M, 2048, 16875 },
  };
  
  static const struct dp_aud_n_m *
@@@ -198,13 -208,13 +208,13 @@@ static int audio_config_hdmi_get_n(cons
  }
  
  static bool intel_eld_uptodate(struct drm_connector *connector,
-                              i915_reg_t reg_eldv, uint32_t bits_eldv,
-                              i915_reg_t reg_elda, uint32_t bits_elda,
+                              i915_reg_t reg_eldv, u32 bits_eldv,
+                              i915_reg_t reg_elda, u32 bits_elda,
                               i915_reg_t reg_edid)
  {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
-       uint8_t *eld = connector->eld;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 tmp;
        int i;
  
        tmp = I915_READ(reg_eldv);
        I915_WRITE(reg_elda, tmp);
  
        for (i = 0; i < drm_eld_size(eld) / 4; i++)
-               if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+               if (I915_READ(reg_edid) != *((const u32 *)eld + i))
                        return false;
  
        return true;
@@@ -229,7 -239,7 +239,7 @@@ static void g4x_audio_codec_disable(str
                                    const struct drm_connector_state *old_conn_state)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       uint32_t eldv, tmp;
+       u32 eldv, tmp;
  
        DRM_DEBUG_KMS("Disable audio codec\n");
  
@@@ -251,12 -261,12 +261,12 @@@ static void g4x_audio_codec_enable(stru
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct drm_connector *connector = conn_state->connector;
-       uint8_t *eld = connector->eld;
-       uint32_t eldv;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 eldv;
+       u32 tmp;
        int len, i;
  
-       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
+       DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
  
        tmp = I915_READ(G4X_AUD_VID_DID);
        if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
        len = min(drm_eld_size(eld) / 4, len);
        DRM_DEBUG_DRIVER("ELD size %d\n", len);
        for (i = 0; i < len; i++)
-               I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
+               I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
  
        tmp = I915_READ(G4X_AUD_CNTL_ST);
        tmp |= eldv;
@@@ -393,7 -403,7 +403,7 @@@ static void hsw_audio_codec_disable(str
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
-       uint32_t tmp;
+       u32 tmp;
  
        DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
  
@@@ -426,8 -436,8 +436,8 @@@ static void hsw_audio_codec_enable(stru
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_connector *connector = conn_state->connector;
        enum pipe pipe = crtc->pipe;
-       const uint8_t *eld = connector->eld;
-       uint32_t tmp;
+       const u8 *eld = connector->eld;
+       u32 tmp;
        int len, i;
  
        DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
        /* Up to 84 bytes of hw ELD buffer */
        len = min(drm_eld_size(eld), 84);
        for (i = 0; i < len / 4; i++)
-               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
+               I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((const u32 *)eld + i));
  
        /* ELD valid */
        tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
@@@ -477,7 -487,7 +487,7 @@@ static void ilk_audio_codec_disable(str
        struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        enum pipe pipe = crtc->pipe;
        enum port port = encoder->port;
-       uint32_t tmp, eldv;
+       u32 tmp, eldv;
        i915_reg_t aud_config, aud_cntrl_st2;
  
        DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
@@@ -524,8 -534,8 +534,8 @@@ static void ilk_audio_codec_enable(stru
        struct drm_connector *connector = conn_state->connector;
        enum pipe pipe = crtc->pipe;
        enum port port = encoder->port;
-       uint8_t *eld = connector->eld;
-       uint32_t tmp, eldv;
+       const u8 *eld = connector->eld;
+       u32 tmp, eldv;
        int len, i;
        i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
  
        /* Up to 84 bytes of hw ELD buffer */
        len = min(drm_eld_size(eld), 84);
        for (i = 0; i < len / 4; i++)
-               I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
+               I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
  
        /* ELD valid */
        tmp = I915_READ(aud_cntrl_st2);
@@@ -639,12 -649,11 +649,12 @@@ void intel_audio_codec_enable(struct in
        dev_priv->av_enc_map[pipe] = encoder;
        mutex_unlock(&dev_priv->av_mutex);
  
 -      if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
 +      if (acomp && acomp->base.audio_ops &&
 +          acomp->base.audio_ops->pin_eld_notify) {
                /* audio drivers expect pipe = -1 to indicate Non-MST cases */
                if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
                        pipe = -1;
 -              acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
 +              acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
                                                 (int) port, (int) pipe);
        }
  
@@@ -682,12 -691,11 +692,12 @@@ void intel_audio_codec_disable(struct i
        dev_priv->av_enc_map[pipe] = NULL;
        mutex_unlock(&dev_priv->av_mutex);
  
 -      if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
 +      if (acomp && acomp->base.audio_ops &&
 +          acomp->base.audio_ops->pin_eld_notify) {
                /* audio drivers expect pipe = -1 to indicate Non-MST cases */
                if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
                        pipe = -1;
 -              acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
 +              acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
                                                 (int) port, (int) pipe);
        }
  
@@@ -882,7 -890,7 +892,7 @@@ static int i915_audio_component_get_eld
        return ret;
  }
  
 -static const struct i915_audio_component_ops i915_audio_component_ops = {
 +static const struct drm_audio_component_ops i915_audio_component_ops = {
        .owner          = THIS_MODULE,
        .get_power      = i915_audio_component_get_power,
        .put_power      = i915_audio_component_put_power,
@@@ -899,12 -907,12 +909,12 @@@ static int i915_audio_component_bind(st
        struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
        int i;
  
 -      if (WARN_ON(acomp->ops || acomp->dev))
 +      if (WARN_ON(acomp->base.ops || acomp->base.dev))
                return -EEXIST;
  
        drm_modeset_lock_all(&dev_priv->drm);
 -      acomp->ops = &i915_audio_component_ops;
 -      acomp->dev = i915_kdev;
 +      acomp->base.ops = &i915_audio_component_ops;
 +      acomp->base.dev = i915_kdev;
        BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
        for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
                acomp->aud_sample_rate[i] = 0;
@@@ -921,8 -929,8 +931,8 @@@ static void i915_audio_component_unbind
        struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
  
        drm_modeset_lock_all(&dev_priv->drm);
 -      acomp->ops = NULL;
 -      acomp->dev = NULL;
 +      acomp->base.ops = NULL;
 +      acomp->base.dev = NULL;
        dev_priv->audio_component = NULL;
        drm_modeset_unlock_all(&dev_priv->drm);
  }
index b4941101f21a9fed58ce30c960174194afd36860,430732720e656b0468b0914d9d1e528ecc3cf5f8..cdf19553ffacd28f1097bb2096b8cc35d4654b84
@@@ -62,7 -62,6 +62,7 @@@
  
  #include <linux/acpi.h>
  #include <linux/device.h>
 +#include <linux/irq.h>
  #include <linux/pci.h>
  #include <linux/pm_runtime.h>
  
@@@ -127,9 -126,7 +127,7 @@@ lpe_audio_platdev_create(struct drm_i91
                return platdev;
        }
  
-       pm_runtime_forbid(&platdev->dev);
-       pm_runtime_set_active(&platdev->dev);
-       pm_runtime_enable(&platdev->dev);
+       pm_runtime_no_callbacks(&platdev->dev);
  
        return platdev;
  }
index a951ec75d01f8b1f579402512a4696af2c696533,9d7a36f148cfe1d0fef1cc1d1a8fdb0aab3e0cca..cfb50fedfa2b3a49bd37198f093eff841ecd3603
@@@ -320,9 -320,6 +320,9 @@@ static int vc4_plane_setup_clipping_and
                        vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
                if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
                        vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
 +      } else {
 +              vc4_state->x_scaling[1] = VC4_SCALING_NONE;
 +              vc4_state->y_scaling[1] = VC4_SCALING_NONE;
        }
  
        vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
@@@ -470,12 -467,14 +470,14 @@@ static int vc4_plane_mode_set(struct dr
        struct drm_framebuffer *fb = state->fb;
        u32 ctl0_offset = vc4_state->dlist_count;
        const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
+       u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
        int num_planes = drm_format_num_planes(format->drm);
        bool mix_plane_alpha;
        bool covers_screen;
        u32 scl0, scl1, pitch0;
        u32 lbm_size, tiling;
        unsigned long irqflags;
+       u32 hvs_format = format->hvs;
        int ret, i;
  
        ret = vc4_plane_setup_clipping_and_scaling(state);
                scl1 = vc4_get_scl_field(state, 0);
        }
  
-       switch (fb->modifier) {
+       switch (base_format_mod) {
        case DRM_FORMAT_MOD_LINEAR:
                tiling = SCALER_CTL0_TILING_LINEAR;
                pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
                break;
        }
  
+       case DRM_FORMAT_MOD_BROADCOM_SAND64:
+       case DRM_FORMAT_MOD_BROADCOM_SAND128:
+       case DRM_FORMAT_MOD_BROADCOM_SAND256: {
+               uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
+               /* Column-based NV12 or RGBA.
+                */
+               if (fb->format->num_planes > 1) {
+                       if (hvs_format != HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE) {
+                               DRM_DEBUG_KMS("SAND format only valid for NV12/21");
+                               return -EINVAL;
+                       }
+                       hvs_format = HVS_PIXEL_FORMAT_H264;
+               } else {
+                       if (base_format_mod == DRM_FORMAT_MOD_BROADCOM_SAND256) {
+                               DRM_DEBUG_KMS("SAND256 format only valid for H.264");
+                               return -EINVAL;
+                       }
+               }
+               switch (base_format_mod) {
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+                       tiling = SCALER_CTL0_TILING_64B;
+                       break;
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+                       tiling = SCALER_CTL0_TILING_128B;
+                       break;
+               case DRM_FORMAT_MOD_BROADCOM_SAND256:
+                       tiling = SCALER_CTL0_TILING_256B_OR_T;
+                       break;
+               default:
+                       break;
+               }
+               if (param > SCALER_TILE_HEIGHT_MASK) {
+                       DRM_DEBUG_KMS("SAND height too large (%d)\n", param);
+                       return -EINVAL;
+               }
+               pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
+               break;
+       }
        default:
                DRM_DEBUG_KMS("Unsupported FB tiling flag 0x%16llx",
                              (long long)fb->modifier);
        /* Control word */
        vc4_dlist_write(vc4_state,
                        SCALER_CTL0_VALID |
+                       VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
                        (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
-                       (format->hvs << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
+                       (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
                        VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
                        (vc4_state->is_unity ? SCALER_CTL0_UNITY : 0) |
                        VC4_SET_FIELD(scl0, SCALER_CTL0_SCL0) |
  
        /* Pitch word 1/2 */
        for (i = 1; i < num_planes; i++) {
-               vc4_dlist_write(vc4_state,
-                               VC4_SET_FIELD(fb->pitches[i], SCALER_SRC_PITCH));
+               if (hvs_format != HVS_PIXEL_FORMAT_H264) {
+                       vc4_dlist_write(vc4_state,
+                                       VC4_SET_FIELD(fb->pitches[i],
+                                                     SCALER_SRC_PITCH));
+               } else {
+                       vc4_dlist_write(vc4_state, pitch0);
+               }
        }
  
        /* Colorspace conversion words */
@@@ -813,18 -861,21 +864,21 @@@ static int vc4_prepare_fb(struct drm_pl
        struct dma_fence *fence;
        int ret;
  
-       if ((plane->state->fb == state->fb) || !state->fb)
+       if (!state->fb)
                return 0;
  
        bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
  
+       fence = reservation_object_get_excl_rcu(bo->resv);
+       drm_atomic_set_fence_for_plane(state, fence);
+       if (plane->state->fb == state->fb)
+               return 0;
        ret = vc4_bo_inc_usecnt(bo);
        if (ret)
                return ret;
  
-       fence = reservation_object_get_excl_rcu(bo->resv);
-       drm_atomic_set_fence_for_plane(state, fence);
        return 0;
  }
  
@@@ -851,7 -902,7 +905,7 @@@ static const struct drm_plane_helper_fu
  
  static void vc4_plane_destroy(struct drm_plane *plane)
  {
-       drm_plane_helper_disable(plane);
+       drm_plane_helper_disable(plane, NULL);
        drm_plane_cleanup(plane);
  }
  
@@@ -869,13 -920,32 +923,32 @@@ static bool vc4_format_mod_supported(st
        case DRM_FORMAT_BGR565:
        case DRM_FORMAT_ARGB1555:
        case DRM_FORMAT_XRGB1555:
-               return true;
+               switch (fourcc_mod_broadcom_mod(modifier)) {
+               case DRM_FORMAT_MOD_LINEAR:
+               case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+                       return true;
+               default:
+                       return false;
+               }
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_NV21:
+               switch (fourcc_mod_broadcom_mod(modifier)) {
+               case DRM_FORMAT_MOD_LINEAR:
+               case DRM_FORMAT_MOD_BROADCOM_SAND64:
+               case DRM_FORMAT_MOD_BROADCOM_SAND128:
+               case DRM_FORMAT_MOD_BROADCOM_SAND256:
+                       return true;
+               default:
+                       return false;
+               }
        case DRM_FORMAT_YUV422:
        case DRM_FORMAT_YVU422:
        case DRM_FORMAT_YUV420:
        case DRM_FORMAT_YVU420:
-       case DRM_FORMAT_NV12:
        case DRM_FORMAT_NV16:
+       case DRM_FORMAT_NV61:
        default:
                return (modifier == DRM_FORMAT_MOD_LINEAR);
        }
@@@ -903,6 -973,9 +976,9 @@@ struct drm_plane *vc4_plane_init(struc
        unsigned i;
        static const uint64_t modifiers[] = {
                DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED,
+               DRM_FORMAT_MOD_BROADCOM_SAND128,
+               DRM_FORMAT_MOD_BROADCOM_SAND64,
+               DRM_FORMAT_MOD_BROADCOM_SAND256,
                DRM_FORMAT_MOD_LINEAR,
                DRM_FORMAT_MOD_INVALID
        };
index 589fd923c5508cb5dee94b2949aace2a9af21df6,0000000000000000000000000000000000000000..cba6b586bfbdfe00e97b7e9d63b789255acfda61
mode 100644,000000..100644
--- /dev/null
@@@ -1,857 -1,0 +1,856 @@@
-                                struct device *target_dev,
 +// SPDX-License-Identifier: GPL-2.0
 +
 +/*
 + * Xen dma-buf functionality for gntdev.
 + *
 + * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
 + *
 + * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
 + */
 +
 +#include <linux/kernel.h>
 +#include <linux/errno.h>
 +#include <linux/dma-buf.h>
 +#include <linux/slab.h>
 +#include <linux/types.h>
 +#include <linux/uaccess.h>
 +
 +#include <xen/xen.h>
 +#include <xen/grant_table.h>
 +
 +#include "gntdev-common.h"
 +#include "gntdev-dmabuf.h"
 +
 +#ifndef GRANT_INVALID_REF
 +/*
 + * Note on usage of grant reference 0 as invalid grant reference:
 + * grant reference 0 is valid, but never exposed to a driver,
 + * because of the fact it is already in use/reserved by the PV console.
 + */
 +#define GRANT_INVALID_REF     0
 +#endif
 +
 +struct gntdev_dmabuf {
 +      struct gntdev_dmabuf_priv *priv;
 +      struct dma_buf *dmabuf;
 +      struct list_head next;
 +      int fd;
 +
 +      union {
 +              struct {
 +                      /* Exported buffers are reference counted. */
 +                      struct kref refcount;
 +
 +                      struct gntdev_priv *priv;
 +                      struct gntdev_grant_map *map;
 +              } exp;
 +              struct {
 +                      /* Granted references of the imported buffer. */
 +                      grant_ref_t *refs;
 +                      /* Scatter-gather table of the imported buffer. */
 +                      struct sg_table *sgt;
 +                      /* dma-buf attachment of the imported buffer. */
 +                      struct dma_buf_attachment *attach;
 +              } imp;
 +      } u;
 +
 +      /* Number of pages this buffer has. */
 +      int nr_pages;
 +      /* Pages of this buffer. */
 +      struct page **pages;
 +};
 +
 +struct gntdev_dmabuf_wait_obj {
 +      struct list_head next;
 +      struct gntdev_dmabuf *gntdev_dmabuf;
 +      struct completion completion;
 +};
 +
 +struct gntdev_dmabuf_attachment {
 +      struct sg_table *sgt;
 +      enum dma_data_direction dir;
 +};
 +
 +struct gntdev_dmabuf_priv {
 +      /* List of exported DMA buffers. */
 +      struct list_head exp_list;
 +      /* List of wait objects. */
 +      struct list_head exp_wait_list;
 +      /* List of imported DMA buffers. */
 +      struct list_head imp_list;
 +      /* This is the lock which protects dma_buf_xxx lists. */
 +      struct mutex lock;
 +};
 +
 +/* DMA buffer export support. */
 +
 +/* Implementation of wait for exported DMA buffer to be released. */
 +
 +static void dmabuf_exp_release(struct kref *kref);
 +
 +static struct gntdev_dmabuf_wait_obj *
 +dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
 +                      struct gntdev_dmabuf *gntdev_dmabuf)
 +{
 +      struct gntdev_dmabuf_wait_obj *obj;
 +
 +      obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 +      if (!obj)
 +              return ERR_PTR(-ENOMEM);
 +
 +      init_completion(&obj->completion);
 +      obj->gntdev_dmabuf = gntdev_dmabuf;
 +
 +      mutex_lock(&priv->lock);
 +      list_add(&obj->next, &priv->exp_wait_list);
 +      /* Put our reference and wait for gntdev_dmabuf's release to fire. */
 +      kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
 +      mutex_unlock(&priv->lock);
 +      return obj;
 +}
 +
 +static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
 +                                   struct gntdev_dmabuf_wait_obj *obj)
 +{
 +      mutex_lock(&priv->lock);
 +      list_del(&obj->next);
 +      mutex_unlock(&priv->lock);
 +      kfree(obj);
 +}
 +
 +static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
 +                                  u32 wait_to_ms)
 +{
 +      if (wait_for_completion_timeout(&obj->completion,
 +                      msecs_to_jiffies(wait_to_ms)) <= 0)
 +              return -ETIMEDOUT;
 +
 +      return 0;
 +}
 +
 +static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
 +                                     struct gntdev_dmabuf *gntdev_dmabuf)
 +{
 +      struct gntdev_dmabuf_wait_obj *obj;
 +
 +      list_for_each_entry(obj, &priv->exp_wait_list, next)
 +              if (obj->gntdev_dmabuf == gntdev_dmabuf) {
 +                      pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
 +                      complete_all(&obj->completion);
 +                      break;
 +              }
 +}
 +
 +static struct gntdev_dmabuf *
 +dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
 +
 +      mutex_lock(&priv->lock);
 +      list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
 +              if (gntdev_dmabuf->fd == fd) {
 +                      pr_debug("Found gntdev_dmabuf in the wait list\n");
 +                      kref_get(&gntdev_dmabuf->u.exp.refcount);
 +                      ret = gntdev_dmabuf;
 +                      break;
 +              }
 +      mutex_unlock(&priv->lock);
 +      return ret;
 +}
 +
 +static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
 +                                  int wait_to_ms)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf;
 +      struct gntdev_dmabuf_wait_obj *obj;
 +      int ret;
 +
 +      pr_debug("Will wait for dma-buf with fd %d\n", fd);
 +      /*
 +       * Try to find the DMA buffer: if not found means that
 +       * either the buffer has already been released or file descriptor
 +       * provided is wrong.
 +       */
 +      gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
 +      if (IS_ERR(gntdev_dmabuf))
 +              return PTR_ERR(gntdev_dmabuf);
 +
 +      /*
 +       * gntdev_dmabuf still exists and is reference count locked by us now,
 +       * so prepare to wait: allocate wait object and add it to the wait list,
 +       * so we can find it on release.
 +       */
 +      obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
 +      if (IS_ERR(obj))
 +              return PTR_ERR(obj);
 +
 +      ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
 +      dmabuf_exp_wait_obj_free(priv, obj);
 +      return ret;
 +}
 +
 +/* DMA buffer export support. */
 +
 +static struct sg_table *
 +dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
 +{
 +      struct sg_table *sgt;
 +      int ret;
 +
 +      sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 +      if (!sgt) {
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +
 +      ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
 +                                      nr_pages << PAGE_SHIFT,
 +                                      GFP_KERNEL);
 +      if (ret)
 +              goto out;
 +
 +      return sgt;
 +
 +out:
 +      kfree(sgt);
 +      return ERR_PTR(ret);
 +}
 +
 +static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
 +                               struct dma_buf_attachment *attach)
 +{
 +      struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
 +
 +      gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
 +                                     GFP_KERNEL);
 +      if (!gntdev_dmabuf_attach)
 +              return -ENOMEM;
 +
 +      gntdev_dmabuf_attach->dir = DMA_NONE;
 +      attach->priv = gntdev_dmabuf_attach;
 +      return 0;
 +}
 +
 +static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
 +                                struct dma_buf_attachment *attach)
 +{
 +      struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
 +
 +      if (gntdev_dmabuf_attach) {
 +              struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
 +
 +              if (sgt) {
 +                      if (gntdev_dmabuf_attach->dir != DMA_NONE)
 +                              dma_unmap_sg_attrs(attach->dev, sgt->sgl,
 +                                                 sgt->nents,
 +                                                 gntdev_dmabuf_attach->dir,
 +                                                 DMA_ATTR_SKIP_CPU_SYNC);
 +                      sg_free_table(sgt);
 +              }
 +
 +              kfree(sgt);
 +              kfree(gntdev_dmabuf_attach);
 +              attach->priv = NULL;
 +      }
 +}
 +
 +static struct sg_table *
 +dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
 +                         enum dma_data_direction dir)
 +{
 +      struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
 +      struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
 +      struct sg_table *sgt;
 +
 +      pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
 +               attach->dev);
 +
 +      if (dir == DMA_NONE || !gntdev_dmabuf_attach)
 +              return ERR_PTR(-EINVAL);
 +
 +      /* Return the cached mapping when possible. */
 +      if (gntdev_dmabuf_attach->dir == dir)
 +              return gntdev_dmabuf_attach->sgt;
 +
 +      /*
 +       * Two mappings with different directions for the same attachment are
 +       * not allowed.
 +       */
 +      if (gntdev_dmabuf_attach->dir != DMA_NONE)
 +              return ERR_PTR(-EBUSY);
 +
 +      sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
 +                                gntdev_dmabuf->nr_pages);
 +      if (!IS_ERR(sgt)) {
 +              if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
 +                                    DMA_ATTR_SKIP_CPU_SYNC)) {
 +                      sg_free_table(sgt);
 +                      kfree(sgt);
 +                      sgt = ERR_PTR(-ENOMEM);
 +              } else {
 +                      gntdev_dmabuf_attach->sgt = sgt;
 +                      gntdev_dmabuf_attach->dir = dir;
 +              }
 +      }
 +      if (IS_ERR(sgt))
 +              pr_debug("Failed to map sg table for dev %p\n", attach->dev);
 +      return sgt;
 +}
 +
 +static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
 +                                       struct sg_table *sgt,
 +                                       enum dma_data_direction dir)
 +{
 +      /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
 +}
 +
 +static void dmabuf_exp_release(struct kref *kref)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf =
 +              container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
 +
 +      dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
 +      list_del(&gntdev_dmabuf->next);
 +      kfree(gntdev_dmabuf);
 +}
 +
 +static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
 +                                struct gntdev_grant_map *map)
 +{
 +      mutex_lock(&priv->lock);
 +      list_del(&map->next);
 +      gntdev_put_map(NULL /* already removed */, map);
 +      mutex_unlock(&priv->lock);
 +}
 +
 +static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
 +      struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
 +
 +      dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
 +                            gntdev_dmabuf->u.exp.map);
 +      mutex_lock(&priv->lock);
 +      kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
 +      mutex_unlock(&priv->lock);
 +}
 +
 +static void *dmabuf_exp_ops_kmap(struct dma_buf *dma_buf,
 +                               unsigned long page_num)
 +{
 +      /* Not implemented. */
 +      return NULL;
 +}
 +
 +static void dmabuf_exp_ops_kunmap(struct dma_buf *dma_buf,
 +                                unsigned long page_num, void *addr)
 +{
 +      /* Not implemented. */
 +}
 +
 +static int dmabuf_exp_ops_mmap(struct dma_buf *dma_buf,
 +                             struct vm_area_struct *vma)
 +{
 +      /* Not implemented. */
 +      return 0;
 +}
 +
 +static const struct dma_buf_ops dmabuf_exp_ops =  {
 +      .attach = dmabuf_exp_ops_attach,
 +      .detach = dmabuf_exp_ops_detach,
 +      .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
 +      .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
 +      .release = dmabuf_exp_ops_release,
 +      .map = dmabuf_exp_ops_kmap,
 +      .unmap = dmabuf_exp_ops_kunmap,
 +      .mmap = dmabuf_exp_ops_mmap,
 +};
 +
 +struct gntdev_dmabuf_export_args {
 +      struct gntdev_priv *priv;
 +      struct gntdev_grant_map *map;
 +      struct gntdev_dmabuf_priv *dmabuf_priv;
 +      struct device *dev;
 +      int count;
 +      struct page **pages;
 +      u32 fd;
 +};
 +
 +static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
 +{
 +      DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 +      struct gntdev_dmabuf *gntdev_dmabuf;
 +      int ret;
 +
 +      gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
 +      if (!gntdev_dmabuf)
 +              return -ENOMEM;
 +
 +      kref_init(&gntdev_dmabuf->u.exp.refcount);
 +
 +      gntdev_dmabuf->priv = args->dmabuf_priv;
 +      gntdev_dmabuf->nr_pages = args->count;
 +      gntdev_dmabuf->pages = args->pages;
 +      gntdev_dmabuf->u.exp.priv = args->priv;
 +      gntdev_dmabuf->u.exp.map = args->map;
 +
 +      exp_info.exp_name = KBUILD_MODNAME;
 +      if (args->dev->driver && args->dev->driver->owner)
 +              exp_info.owner = args->dev->driver->owner;
 +      else
 +              exp_info.owner = THIS_MODULE;
 +      exp_info.ops = &dmabuf_exp_ops;
 +      exp_info.size = args->count << PAGE_SHIFT;
 +      exp_info.flags = O_RDWR;
 +      exp_info.priv = gntdev_dmabuf;
 +
 +      gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
 +      if (IS_ERR(gntdev_dmabuf->dmabuf)) {
 +              ret = PTR_ERR(gntdev_dmabuf->dmabuf);
 +              gntdev_dmabuf->dmabuf = NULL;
 +              goto fail;
 +      }
 +
 +      ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
 +      if (ret < 0)
 +              goto fail;
 +
 +      gntdev_dmabuf->fd = ret;
 +      args->fd = ret;
 +
 +      pr_debug("Exporting DMA buffer with fd %d\n", ret);
 +
 +      mutex_lock(&args->dmabuf_priv->lock);
 +      list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
 +      mutex_unlock(&args->dmabuf_priv->lock);
 +      return 0;
 +
 +fail:
 +      if (gntdev_dmabuf->dmabuf)
 +              dma_buf_put(gntdev_dmabuf->dmabuf);
 +      kfree(gntdev_dmabuf);
 +      return ret;
 +}
 +
 +static struct gntdev_grant_map *
 +dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
 +                               int count)
 +{
 +      struct gntdev_grant_map *map;
 +
 +      if (unlikely(count <= 0))
 +              return ERR_PTR(-EINVAL);
 +
 +      if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
 +          (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
 +              pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
 +              return ERR_PTR(-EINVAL);
 +      }
 +
 +      map = gntdev_alloc_map(priv, count, dmabuf_flags);
 +      if (!map)
 +              return ERR_PTR(-ENOMEM);
 +
 +      if (unlikely(gntdev_account_mapped_pages(count))) {
 +              pr_debug("can't map %d pages: over limit\n", count);
 +              gntdev_put_map(NULL, map);
 +              return ERR_PTR(-ENOMEM);
 +      }
 +      return map;
 +}
 +
 +static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
 +                              int count, u32 domid, u32 *refs, u32 *fd)
 +{
 +      struct gntdev_grant_map *map;
 +      struct gntdev_dmabuf_export_args args;
 +      int i, ret;
 +
 +      map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
 +      if (IS_ERR(map))
 +              return PTR_ERR(map);
 +
 +      for (i = 0; i < count; i++) {
 +              map->grants[i].domid = domid;
 +              map->grants[i].ref = refs[i];
 +      }
 +
 +      mutex_lock(&priv->lock);
 +      gntdev_add_map(priv, map);
 +      mutex_unlock(&priv->lock);
 +
 +      map->flags |= GNTMAP_host_map;
 +#if defined(CONFIG_X86)
 +      map->flags |= GNTMAP_device_map;
 +#endif
 +
 +      ret = gntdev_map_grant_pages(map);
 +      if (ret < 0)
 +              goto out;
 +
 +      args.priv = priv;
 +      args.map = map;
 +      args.dev = priv->dma_dev;
 +      args.dmabuf_priv = priv->dmabuf_priv;
 +      args.count = map->count;
 +      args.pages = map->pages;
 +      args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
 +
 +      ret = dmabuf_exp_from_pages(&args);
 +      if (ret < 0)
 +              goto out;
 +
 +      *fd = args.fd;
 +      return 0;
 +
 +out:
 +      dmabuf_exp_remove_map(priv, map);
 +      return ret;
 +}
 +
 +/* DMA buffer import support. */
 +
 +static int
 +dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
 +                              int count, int domid)
 +{
 +      grant_ref_t priv_gref_head;
 +      int i, ret;
 +
 +      ret = gnttab_alloc_grant_references(count, &priv_gref_head);
 +      if (ret < 0) {
 +              pr_debug("Cannot allocate grant references, ret %d\n", ret);
 +              return ret;
 +      }
 +
 +      for (i = 0; i < count; i++) {
 +              int cur_ref;
 +
 +              cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
 +              if (cur_ref < 0) {
 +                      ret = cur_ref;
 +                      pr_debug("Cannot claim grant reference, ret %d\n", ret);
 +                      goto out;
 +              }
 +
 +              gnttab_grant_foreign_access_ref(cur_ref, domid,
 +                                              xen_page_to_gfn(pages[i]), 0);
 +              refs[i] = cur_ref;
 +      }
 +
 +      return 0;
 +
 +out:
 +      gnttab_free_grant_references(priv_gref_head);
 +      return ret;
 +}
 +
 +static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
 +{
 +      int i;
 +
 +      for (i = 0; i < count; i++)
 +              if (refs[i] != GRANT_INVALID_REF)
 +                      gnttab_end_foreign_access(refs[i], 0, 0UL);
 +}
 +
 +static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
 +{
 +      kfree(gntdev_dmabuf->pages);
 +      kfree(gntdev_dmabuf->u.imp.refs);
 +      kfree(gntdev_dmabuf);
 +}
 +
 +static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf;
 +      int i;
 +
 +      gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
 +      if (!gntdev_dmabuf)
 +              goto fail_no_free;
 +
 +      gntdev_dmabuf->u.imp.refs = kcalloc(count,
 +                                          sizeof(gntdev_dmabuf->u.imp.refs[0]),
 +                                          GFP_KERNEL);
 +      if (!gntdev_dmabuf->u.imp.refs)
 +              goto fail;
 +
 +      gntdev_dmabuf->pages = kcalloc(count,
 +                                     sizeof(gntdev_dmabuf->pages[0]),
 +                                     GFP_KERNEL);
 +      if (!gntdev_dmabuf->pages)
 +              goto fail;
 +
 +      gntdev_dmabuf->nr_pages = count;
 +
 +      for (i = 0; i < count; i++)
 +              gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
 +
 +      return gntdev_dmabuf;
 +
 +fail:
 +      dmabuf_imp_free_storage(gntdev_dmabuf);
 +fail_no_free:
 +      return ERR_PTR(-ENOMEM);
 +}
 +
 +static struct gntdev_dmabuf *
 +dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
 +                 int fd, int count, int domid)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf, *ret;
 +      struct dma_buf *dma_buf;
 +      struct dma_buf_attachment *attach;
 +      struct sg_table *sgt;
 +      struct sg_page_iter sg_iter;
 +      int i;
 +
 +      dma_buf = dma_buf_get(fd);
 +      if (IS_ERR(dma_buf))
 +              return ERR_CAST(dma_buf);
 +
 +      gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
 +      if (IS_ERR(gntdev_dmabuf)) {
 +              ret = gntdev_dmabuf;
 +              goto fail_put;
 +      }
 +
 +      gntdev_dmabuf->priv = priv;
 +      gntdev_dmabuf->fd = fd;
 +
 +      attach = dma_buf_attach(dma_buf, dev);
 +      if (IS_ERR(attach)) {
 +              ret = ERR_CAST(attach);
 +              goto fail_free_obj;
 +      }
 +
 +      gntdev_dmabuf->u.imp.attach = attach;
 +
 +      sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
 +      if (IS_ERR(sgt)) {
 +              ret = ERR_CAST(sgt);
 +              goto fail_detach;
 +      }
 +
 +      /* Check number of pages that imported buffer has. */
 +      if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
 +              ret = ERR_PTR(-EINVAL);
 +              pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
 +                       attach->dmabuf->size, gntdev_dmabuf->nr_pages);
 +              goto fail_unmap;
 +      }
 +
 +      gntdev_dmabuf->u.imp.sgt = sgt;
 +
 +      /* Now convert sgt to array of pages and check for page validity. */
 +      i = 0;
 +      for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0) {
 +              struct page *page = sg_page_iter_page(&sg_iter);
 +              /*
 +               * Check if page is valid: this can happen if we are given
 +               * a page from VRAM or other resources which are not backed
 +               * by a struct page.
 +               */
 +              if (!pfn_valid(page_to_pfn(page))) {
 +                      ret = ERR_PTR(-EINVAL);
 +                      goto fail_unmap;
 +              }
 +
 +              gntdev_dmabuf->pages[i++] = page;
 +      }
 +
 +      ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
 +                                                    gntdev_dmabuf->u.imp.refs,
 +                                                    count, domid));
 +      if (IS_ERR(ret))
 +              goto fail_end_access;
 +
 +      pr_debug("Imported DMA buffer with fd %d\n", fd);
 +
 +      mutex_lock(&priv->lock);
 +      list_add(&gntdev_dmabuf->next, &priv->imp_list);
 +      mutex_unlock(&priv->lock);
 +
 +      return gntdev_dmabuf;
 +
 +fail_end_access:
 +      dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
 +fail_unmap:
 +      dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
 +fail_detach:
 +      dma_buf_detach(dma_buf, attach);
 +fail_free_obj:
 +      dmabuf_imp_free_storage(gntdev_dmabuf);
 +fail_put:
 +      dma_buf_put(dma_buf);
 +      return ret;
 +}
 +
 +/*
 + * Find the hyper dma-buf by its file descriptor and remove
 + * it from the buffer's list.
 + */
 +static struct gntdev_dmabuf *
 +dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
 +{
 +      struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
 +
 +      mutex_lock(&priv->lock);
 +      list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
 +              if (gntdev_dmabuf->fd == fd) {
 +                      pr_debug("Found gntdev_dmabuf in the import list\n");
 +                      ret = gntdev_dmabuf;
 +                      list_del(&gntdev_dmabuf->next);
 +                      break;
 +              }
 +      }
 +      mutex_unlock(&priv->lock);
 +      return ret;
 +}
 +
 +static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
 +{
 +      struct gntdev_dmabuf *gntdev_dmabuf;
 +      struct dma_buf_attachment *attach;
 +      struct dma_buf *dma_buf;
 +
 +      gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
 +      if (IS_ERR(gntdev_dmabuf))
 +              return PTR_ERR(gntdev_dmabuf);
 +
 +      pr_debug("Releasing DMA buffer with fd %d\n", fd);
 +
 +      dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
 +                                    gntdev_dmabuf->nr_pages);
 +
 +      attach = gntdev_dmabuf->u.imp.attach;
 +
 +      if (gntdev_dmabuf->u.imp.sgt)
 +              dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
 +                                       DMA_BIDIRECTIONAL);
 +      dma_buf = attach->dmabuf;
 +      dma_buf_detach(attach->dmabuf, attach);
 +      dma_buf_put(dma_buf);
 +
 +      dmabuf_imp_free_storage(gntdev_dmabuf);
 +      return 0;
 +}
 +
 +/* DMA buffer IOCTL support. */
 +
 +long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
 +                                     struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
 +{
 +      struct ioctl_gntdev_dmabuf_exp_from_refs op;
 +      u32 *refs;
 +      long ret;
 +
 +      if (use_ptemod) {
 +              pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
 +                       use_ptemod);
 +              return -EINVAL;
 +      }
 +
 +      if (copy_from_user(&op, u, sizeof(op)) != 0)
 +              return -EFAULT;
 +
 +      if (unlikely(op.count <= 0))
 +              return -EINVAL;
 +
 +      refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
 +      if (!refs)
 +              return -ENOMEM;
 +
 +      if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
 +              ret = -EFAULT;
 +              goto out;
 +      }
 +
 +      ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
 +                                 op.domid, refs, &op.fd);
 +      if (ret)
 +              goto out;
 +
 +      if (copy_to_user(u, &op, sizeof(op)) != 0)
 +              ret = -EFAULT;
 +
 +out:
 +      kfree(refs);
 +      return ret;
 +}
 +
 +long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
 +                                         struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
 +{
 +      struct ioctl_gntdev_dmabuf_exp_wait_released op;
 +
 +      if (copy_from_user(&op, u, sizeof(op)) != 0)
 +              return -EFAULT;
 +
 +      return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
 +                                      op.wait_to_ms);
 +}
 +
 +long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
 +                                   struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
 +{
 +      struct ioctl_gntdev_dmabuf_imp_to_refs op;
 +      struct gntdev_dmabuf *gntdev_dmabuf;
 +      long ret;
 +
 +      if (copy_from_user(&op, u, sizeof(op)) != 0)
 +              return -EFAULT;
 +
 +      if (unlikely(op.count <= 0))
 +              return -EINVAL;
 +
 +      gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
 +                                         priv->dma_dev, op.fd,
 +                                         op.count, op.domid);
 +      if (IS_ERR(gntdev_dmabuf))
 +              return PTR_ERR(gntdev_dmabuf);
 +
 +      if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
 +                       sizeof(*u->refs) * op.count) != 0) {
 +              ret = -EFAULT;
 +              goto out_release;
 +      }
 +      return 0;
 +
 +out_release:
 +      dmabuf_imp_release(priv->dmabuf_priv, op.fd);
 +      return ret;
 +}
 +
 +long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
 +                                   struct ioctl_gntdev_dmabuf_imp_release __user *u)
 +{
 +      struct ioctl_gntdev_dmabuf_imp_release op;
 +
 +      if (copy_from_user(&op, u, sizeof(op)) != 0)
 +              return -EFAULT;
 +
 +      return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
 +}
 +
 +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
 +{
 +      struct gntdev_dmabuf_priv *priv;
 +
 +      priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 +      if (!priv)
 +              return ERR_PTR(-ENOMEM);
 +
 +      mutex_init(&priv->lock);
 +      INIT_LIST_HEAD(&priv->exp_list);
 +      INIT_LIST_HEAD(&priv->exp_wait_list);
 +      INIT_LIST_HEAD(&priv->imp_list);
 +
 +      return priv;
 +}
 +
 +void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
 +{
 +      kfree(priv);
 +}
diff --combined include/linux/pci.h
index c133ccfa002e17362288eab00b11bc04895c6805,e04ab6265566dc3337e972cce5801a70a9ba58bf..5f13cdbc73cd70a3dbd8ba5372c3a33dae29b3d8
@@@ -261,6 -261,9 +261,9 @@@ enum pci_bus_speed 
        PCI_SPEED_UNKNOWN               = 0xff,
  };
  
+ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
  struct pci_cap_saved_data {
        u16             cap_nr;
        bool            cap_extended;
@@@ -368,6 -371,7 +371,6 @@@ struct pci_dev 
        unsigned int    transparent:1;          /* Subtractive decode bridge */
        unsigned int    multifunction:1;        /* Multi-function device */
  
 -      unsigned int    is_added:1;
        unsigned int    is_busmaster:1;         /* Is busmaster */
        unsigned int    no_msi:1;               /* May not use MSI */
        unsigned int    no_64bit_msi:1;         /* May only use 32-bit MSIs */
index 57bef4fbfb31cb65788caaa2e8ee654378aa3246,c28224347d6905b30301df5bda3f88724297b051..7d0b0ed744042d827886554951a24329879b93f0
@@@ -21,9 -21,6 +21,9 @@@
   *          Davidlohr Bueso <[email protected]>
   *    Based on kernel/rcu/torture.c.
   */
 +
 +#define pr_fmt(fmt) fmt
 +
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/kthread.h>
@@@ -60,7 -57,7 +60,7 @@@ torture_param(int, shutdown_secs, 0, "S
  torture_param(int, stat_interval, 60,
             "Number of seconds between stats printk()s");
  torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
 -torture_param(bool, verbose, true,
 +torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
  
  static char *torture_type = "spin_lock";
@@@ -368,7 -365,7 +368,7 @@@ static struct lock_torture_ops mutex_lo
  };
  
  #include <linux/ww_mutex.h>
- static DEFINE_WW_CLASS(torture_ww_class);
+ static DEFINE_WD_CLASS(torture_ww_class);
  static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
  static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
  static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
diff --combined kernel/printk/printk.c
index 9a63aeeaaf5dd7210f7afebadf8b47ae0440bb7b,3f041e7cbfc9ea5a016ec7c9cfb177280305a229..90b6ab01db59cadbaf6e92283e982cb3ae820289
@@@ -349,7 -349,7 +349,7 @@@ static int console_msg_format = MSG_FOR
   */
  
  enum log_flags {
 -      LOG_NOCONS      = 1,    /* already flushed, do not print to console */
 +      LOG_NOCONS      = 1,    /* suppress print, do not print to console */
        LOG_NEWLINE     = 2,    /* text ended with a newline */
        LOG_PREFIX      = 4,    /* text started with a prefix */
        LOG_CONT        = 8,    /* text is a fragment of a continuation line */
@@@ -1352,68 -1352,71 +1352,68 @@@ static int syslog_print_all(char __use
  {
        char *text;
        int len = 0;
 +      u64 next_seq;
 +      u64 seq;
 +      u32 idx;
  
        text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
        if (!text)
                return -ENOMEM;
  
        logbuf_lock_irq();
 -      if (buf) {
 -              u64 next_seq;
 -              u64 seq;
 -              u32 idx;
 +      /*
 +       * Find first record that fits, including all following records,
 +       * into the user-provided buffer for this dump.
 +       */
 +      seq = clear_seq;
 +      idx = clear_idx;
 +      while (seq < log_next_seq) {
 +              struct printk_log *msg = log_from_idx(idx);
  
 -              /*
 -               * Find first record that fits, including all following records,
 -               * into the user-provided buffer for this dump.
 -               */
 -              seq = clear_seq;
 -              idx = clear_idx;
 -              while (seq < log_next_seq) {
 -                      struct printk_log *msg = log_from_idx(idx);
 -
 -                      len += msg_print_text(msg, true, NULL, 0);
 -                      idx = log_next(idx);
 -                      seq++;
 -              }
 +              len += msg_print_text(msg, true, NULL, 0);
 +              idx = log_next(idx);
 +              seq++;
 +      }
  
 -              /* move first record forward until length fits into the buffer */
 -              seq = clear_seq;
 -              idx = clear_idx;
 -              while (len > size && seq < log_next_seq) {
 -                      struct printk_log *msg = log_from_idx(idx);
 +      /* move first record forward until length fits into the buffer */
 +      seq = clear_seq;
 +      idx = clear_idx;
 +      while (len > size && seq < log_next_seq) {
 +              struct printk_log *msg = log_from_idx(idx);
  
 -                      len -= msg_print_text(msg, true, NULL, 0);
 -                      idx = log_next(idx);
 -                      seq++;
 -              }
 +              len -= msg_print_text(msg, true, NULL, 0);
 +              idx = log_next(idx);
 +              seq++;
 +      }
  
 -              /* last message fitting into this dump */
 -              next_seq = log_next_seq;
 +      /* last message fitting into this dump */
 +      next_seq = log_next_seq;
  
 -              len = 0;
 -              while (len >= 0 && seq < next_seq) {
 -                      struct printk_log *msg = log_from_idx(idx);
 -                      int textlen;
 +      len = 0;
 +      while (len >= 0 && seq < next_seq) {
 +              struct printk_log *msg = log_from_idx(idx);
 +              int textlen;
  
 -                      textlen = msg_print_text(msg, true, text,
 -                                               LOG_LINE_MAX + PREFIX_MAX);
 -                      if (textlen < 0) {
 -                              len = textlen;
 -                              break;
 -                      }
 -                      idx = log_next(idx);
 -                      seq++;
 +              textlen = msg_print_text(msg, true, text,
 +                                       LOG_LINE_MAX + PREFIX_MAX);
 +              if (textlen < 0) {
 +                      len = textlen;
 +                      break;
 +              }
 +              idx = log_next(idx);
 +              seq++;
  
 -                      logbuf_unlock_irq();
 -                      if (copy_to_user(buf + len, text, textlen))
 -                              len = -EFAULT;
 -                      else
 -                              len += textlen;
 -                      logbuf_lock_irq();
 -
 -                      if (seq < log_first_seq) {
 -                              /* messages are gone, move to next one */
 -                              seq = log_first_seq;
 -                              idx = log_first_idx;
 -                      }
 +              logbuf_unlock_irq();
 +              if (copy_to_user(buf + len, text, textlen))
 +                      len = -EFAULT;
 +              else
 +                      len += textlen;
 +              logbuf_lock_irq();
 +
 +              if (seq < log_first_seq) {
 +                      /* messages are gone, move to next one */
 +                      seq = log_first_seq;
 +                      idx = log_first_idx;
                }
        }
  
        return len;
  }
  
 +static void syslog_clear(void)
 +{
 +      logbuf_lock_irq();
 +      clear_seq = log_next_seq;
 +      clear_idx = log_next_idx;
 +      logbuf_unlock_irq();
 +}
 +
  int do_syslog(int type, char __user *buf, int len, int source)
  {
        bool clear = false;
                break;
        /* Clear ring buffer */
        case SYSLOG_ACTION_CLEAR:
 -              syslog_print_all(NULL, 0, true);
 +              syslog_clear();
                break;
        /* Disable logging to console */
        case SYSLOG_ACTION_CONSOLE_OFF:
@@@ -1829,16 -1824,28 +1829,16 @@@ static size_t log_output(int facility, 
        return log_store(facility, level, lflags, 0, dict, dictlen, text, text_len);
  }
  
 -asmlinkage int vprintk_emit(int facility, int level,
 -                          const char *dict, size_t dictlen,
 -                          const char *fmt, va_list args)
 +/* Must be called under logbuf_lock. */
 +int vprintk_store(int facility, int level,
 +                const char *dict, size_t dictlen,
 +                const char *fmt, va_list args)
  {
        static char textbuf[LOG_LINE_MAX];
        char *text = textbuf;
        size_t text_len;
        enum log_flags lflags = 0;
 -      unsigned long flags;
 -      int printed_len;
 -      bool in_sched = false;
 -
 -      if (level == LOGLEVEL_SCHED) {
 -              level = LOGLEVEL_DEFAULT;
 -              in_sched = true;
 -      }
  
 -      boot_delay_msec(level);
 -      printk_delay();
 -
 -      /* This stops the holder of console_sem just where we want him */
 -      logbuf_lock_irqsave(flags);
        /*
         * The printf needs to come first; we need the syslog
         * prefix which might be passed-in as a parameter.
        if (dict)
                lflags |= LOG_PREFIX|LOG_NEWLINE;
  
 -      printed_len = log_output(facility, level, lflags, dict, dictlen, text, text_len);
 +      if (suppress_message_printing(level))
 +              lflags |= LOG_NOCONS;
  
 +      return log_output(facility, level, lflags,
 +                        dict, dictlen, text, text_len);
 +}
 +
 +asmlinkage int vprintk_emit(int facility, int level,
 +                          const char *dict, size_t dictlen,
 +                          const char *fmt, va_list args)
 +{
 +      int printed_len;
 +      bool in_sched = false;
 +      unsigned long flags;
 +
 +      if (level == LOGLEVEL_SCHED) {
 +              level = LOGLEVEL_DEFAULT;
 +              in_sched = true;
 +      }
 +
 +      boot_delay_msec(level);
 +      printk_delay();
 +
 +      /* This stops the holder of console_sem just where we want him */
 +      logbuf_lock_irqsave(flags);
 +      printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
        logbuf_unlock_irqrestore(flags);
  
        /* If called from the scheduler, we can not call up(). */
@@@ -2030,6 -2013,7 +2030,6 @@@ static void call_console_drivers(const 
                                 const char *text, size_t len) {}
  static size_t msg_print_text(const struct printk_log *msg,
                             bool syslog, char *buf, size_t size) { return 0; }
 -static bool suppress_message_printing(int level) { return false; }
  
  #endif /* CONFIG_PRINTK */
  
@@@ -2259,6 -2243,7 +2259,7 @@@ int is_console_locked(void
  {
        return console_locked;
  }
+ EXPORT_SYMBOL(is_console_locked);
  
  /*
   * Check if we have any console that is capable of printing while cpu is
@@@ -2365,10 -2350,11 +2366,10 @@@ skip
                        break;
  
                msg = log_from_idx(console_idx);
 -              if (suppress_message_printing(msg->level)) {
 +              if (msg->flags & LOG_NOCONS) {
                        /*
 -                       * Skip record we have buffered and already printed
 -                       * directly to the console when we received it, and
 -                       * record that has level above the console loglevel.
 +                       * Skip record if !ignore_loglevel, and
 +                       * record has level above the console loglevel.
                         */
                        console_idx = log_next(console_idx);
                        console_seq++;
@@@ -2893,20 -2879,16 +2894,20 @@@ void wake_up_klogd(void
        preempt_enable();
  }
  
 -int vprintk_deferred(const char *fmt, va_list args)
 +void defer_console_output(void)
  {
 -      int r;
 -
 -      r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
 -
        preempt_disable();
        __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
        irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
        preempt_enable();
 +}
 +
 +int vprintk_deferred(const char *fmt, va_list args)
 +{
 +      int r;
 +
 +      r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
 +      defer_console_output();
  
        return r;
  }
This page took 0.170582 seconds and 4 git commands to generate.