]> Git Repo - linux.git/commitdiff
BackMerge v4.18-rc7 into drm-next
authorDave Airlie <[email protected]>
Mon, 30 Jul 2018 00:39:22 +0000 (10:39 +1000)
committerDave Airlie <[email protected]>
Mon, 30 Jul 2018 00:39:22 +0000 (10:39 +1000)
rmk requested this for armada and I think we've had a few
conflicts build up.

Signed-off-by: Dave Airlie <[email protected]>
26 files changed:
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/sun4i/Makefile
drivers/pci/pci.c
include/linux/pci.h

diff --combined MAINTAINERS
index 93f189f0d60d3148b519dc75a26216bbf7408298,32fbc6f732d44da6bad368f00ecb000c52c3263f..9b2bf134964e7a42b8ca332d2869a470dab0877d
@@@ -581,7 -581,7 +581,7 @@@ W: https://www.infradead.org/~dhowells/
  
  AGPGART DRIVER
  M:    David Airlie <[email protected]>
- T:    git git://people.freedesktop.org/~airlied/linux (part of drm maint)
+ T:    git git://anongit.freedesktop.org/drm/drm
  S:    Maintained
  F:    drivers/char/agp/
  F:    include/linux/agp*
@@@ -728,14 -728,6 +728,14 @@@ S:       Supporte
  F:    drivers/crypto/ccp/
  F:    include/linux/ccp.h
  
 +AMD DISPLAY CORE
 +M:    Harry Wentland <[email protected]>
 +M:    Leo Li <[email protected]>
 +L:    [email protected]
 +T:    git git://people.freedesktop.org/~agd5f/linux
 +S:    Supported
 +F:    drivers/gpu/drm/amd/display/
 +
  AMD FAM15H PROCESSOR POWER MONITORING DRIVER
  M:    Huang Rui <[email protected]>
  L:    [email protected]
@@@ -785,14 -777,6 +785,14 @@@ F:       drivers/gpu/drm/amd/include/vi_struc
  F:    drivers/gpu/drm/amd/include/v9_structs.h
  F:    include/uapi/linux/kfd_ioctl.h
  
 +AMD POWERPLAY
 +M:    Rex Zhu <[email protected]>
 +M:    Evan Quan <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/gpu/drm/amd/powerplay/
 +T:    git git://people.freedesktop.org/~agd5f/linux
 +
  AMD SEATTLE DEVICE TREE SUPPORT
  M:    Brijesh Singh <[email protected]>
  M:    Suravee Suthikulpanit <[email protected]>
@@@ -2539,7 -2523,7 +2539,7 @@@ S:      Supporte
  F:    drivers/scsi/esas2r
  
  ATUSB IEEE 802.15.4 RADIO DRIVER
- M:    Stefan Schmidt <stefan@osg.samsung.com>
+ M:    Stefan Schmidt <stefan@datenfreihafen.org>
  L:    [email protected]
  S:    Maintained
  F:    drivers/net/ieee802154/atusb.c
@@@ -4476,6 -4460,7 +4476,7 @@@ F:      Documentation/blockdev/drbd
  
  DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
  M:    Greg Kroah-Hartman <[email protected]>
+ R:    "Rafael J. Wysocki" <[email protected]>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
  S:    Supported
  F:    Documentation/kobject.txt
@@@ -4646,7 -4631,7 +4647,7 @@@ F:      include/uapi/drm/vmwgfx_drm.
  DRM DRIVERS
  M:    David Airlie <[email protected]>
  L:    [email protected]
- T:    git git://people.freedesktop.org/~airlied/linux
+ T:    git git://anongit.freedesktop.org/drm/drm
  B:    https://bugs.freedesktop.org/
  C:    irc://chat.freenode.net/dri-devel
  S:    Maintained
@@@ -4899,8 -4884,7 +4900,8 @@@ F:      Documentation/gpu/xen-front.rs
  
  DRM TTM SUBSYSTEM
  M:    Christian Koenig <[email protected]>
 -M:    Roger He <[email protected]>
 +M:    Huang Rui <[email protected]>
 +M:    Junwei Zhang <[email protected]>
  T:    git git://people.freedesktop.org/~agd5f/linux
  S:    Maintained
  L:    [email protected]
@@@ -5806,7 -5790,6 +5807,6 @@@ F:      include/linux/fsl
  
  FREESCALE SOC FS_ENET DRIVER
  M:    Pantelis Antoniou <[email protected]>
- M:    Vitaly Bordug <[email protected]>
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
@@@ -6925,7 -6908,7 +6925,7 @@@ F:      drivers/clk/clk-versaclock5.
  
  IEEE 802.15.4 SUBSYSTEM
  M:    Alexander Aring <[email protected]>
- M:    Stefan Schmidt <stefan@osg.samsung.com>
+ M:    Stefan Schmidt <stefan@datenfreihafen.org>
  L:    [email protected]
  W:    http://wpan.cakelab.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@@ -7112,6 -7095,7 +7112,7 @@@ F:      include/uapi/linux/input.
  F:    include/uapi/linux/input-event-codes.h
  F:    include/linux/input/
  F:    Documentation/devicetree/bindings/input/
+ F:    Documentation/devicetree/bindings/serio/
  F:    Documentation/input/
  
  INPUT MULTITOUCH (MT) PROTOCOL
@@@ -8645,7 -8629,7 +8646,7 @@@ MARVELL MWIFIEX WIRELESS DRIVE
  M:    Amitkumar Karwar <[email protected]>
  M:    Nishant Sarmukadam <[email protected]>
  M:    Ganapathi Bhat <[email protected]>
- M:    Xinming Hu <huxm@marvell.com>
+ M:    Xinming Hu <huxinming820@gmail.com>
  L:    [email protected]
  S:    Maintained
  F:    drivers/net/wireless/marvell/mwifiex/
@@@ -9091,7 -9075,7 +9092,7 @@@ S:      Maintaine
  F:    drivers/usb/mtu3/
  
  MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
- M:    Peter Senna Tschudin <peter.senna@collabora.com>
+ M:    Peter Senna Tschudin <peter.senna@gmail.com>
  M:    Martin Donnelly <[email protected]>
  M:    Martyn Welch <[email protected]>
  S:    Maintained
@@@ -10230,11 -10214,13 +10231,13 @@@ F:        sound/soc/codecs/sgtl5000
  
  NXP TDA998X DRM DRIVER
  M:    Russell King <[email protected]>
- S:    Supported
+ S:    Maintained
  T:    git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
  T:    git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
  F:    drivers/gpu/drm/i2c/tda998x_drv.c
  F:    include/drm/i2c/tda998x.h
+ F:    include/dt-bindings/display/tda998x.h
+ K:    "nxp,tda998x"
  
  NXP TFA9879 DRIVER
  M:    Peter Rosin <[email protected]>
@@@ -11852,7 -11838,7 +11855,7 @@@ S:   Supporte
  F:    arch/hexagon/
  
  QUALCOMM HIDMA DRIVER
- M:    Sinan Kaya <okaya@codeaurora.org>
+ M:    Sinan Kaya <okaya@kernel.org>
  L:    [email protected]
  L:    [email protected]
  L:    [email protected]
index b33f1680c9a322c6eb90b3f42159d35d85ca6953,ca8bf1c9a98e18a45ff1f07148bec1b1dd6aafc2..a028661d9e2013dd2a6e5611448438c7590fec82
@@@ -32,7 -32,7 +32,7 @@@ struct amdgpu_atpx_functions 
        bool switch_start;
        bool switch_end;
        bool disp_connectors_mapping;
 -      bool disp_detetion_ports;
 +      bool disp_detection_ports;
  };
  
  struct amdgpu_atpx {
@@@ -162,7 -162,7 +162,7 @@@ static void amdgpu_atpx_parse_functions
        f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
        f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
        f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
 -      f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
 +      f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
  }
  
  /**
@@@ -575,6 -575,7 +575,7 @@@ static const struct amdgpu_px_quirk amd
        { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+       { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
        { 0, 0, 0, 0, 0 },
  };
  
index 13acef526c5bab92ba362605d4845700e11fd1bc,2c5f093e79e369db4900f6b64ddcc107956d3964..e839470880d7f20c038eda1d2577435080911daa
@@@ -25,7 -25,6 +25,7 @@@
   *          Alex Deucher
   *          Jerome Glisse
   */
 +#include <linux/power_supply.h>
  #include <linux/kthread.h>
  #include <linux/console.h>
  #include <linux/slab.h>
@@@ -676,15 -675,17 +676,15 @@@ void amdgpu_device_vram_location(struc
  }
  
  /**
 - * amdgpu_device_gart_location - try to find GTT location
 + * amdgpu_device_gart_location - try to find GART location
   *
   * @adev: amdgpu device structure holding all necessary informations
   * @mc: memory controller structure holding memory informations
   *
 - * Function will place try to place GTT before or after VRAM.
 + * Function will place try to place GART before or after VRAM.
   *
 - * If GTT size is bigger than space left then we ajust GTT size.
 + * If GART size is bigger than space left then we ajust GART size.
   * Thus function will never fails.
 - *
 - * FIXME: when reducing GTT size align new size on power of 2.
   */
  void amdgpu_device_gart_location(struct amdgpu_device *adev,
                                 struct amdgpu_gmc *mc)
        size_bf = mc->vram_start;
        if (size_bf > size_af) {
                if (mc->gart_size > size_bf) {
 -                      dev_warn(adev->dev, "limiting GTT\n");
 +                      dev_warn(adev->dev, "limiting GART\n");
                        mc->gart_size = size_bf;
                }
                mc->gart_start = 0;
        } else {
                if (mc->gart_size > size_af) {
 -                      dev_warn(adev->dev, "limiting GTT\n");
 +                      dev_warn(adev->dev, "limiting GART\n");
                        mc->gart_size = size_af;
                }
                /* VCE doesn't like it when BOs cross a 4GB segment, so align
                mc->gart_start = ALIGN(mc->vram_end + 1, 0x100000000ULL);
        }
        mc->gart_end = mc->gart_start + mc->gart_size - 1;
 -      dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
 +      dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
                        mc->gart_size >> 20, mc->gart_start, mc->gart_end);
  }
  
@@@ -1076,7 -1077,7 +1076,7 @@@ static const struct vga_switcheroo_clie
  /**
   * amdgpu_device_ip_set_clockgating_state - set the CG state
   *
 - * @adev: amdgpu_device pointer
 + * @dev: amdgpu_device pointer
   * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
   * @state: clockgating state (gate or ungate)
   *
@@@ -1110,7 -1111,7 +1110,7 @@@ int amdgpu_device_ip_set_clockgating_st
  /**
   * amdgpu_device_ip_set_powergating_state - set the PG state
   *
 - * @adev: amdgpu_device pointer
 + * @dev: amdgpu_device pointer
   * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
   * @state: powergating state (gate or ungate)
   *
@@@ -1221,7 -1222,7 +1221,7 @@@ bool amdgpu_device_ip_is_idle(struct am
   * amdgpu_device_ip_get_ip_block - get a hw IP pointer
   *
   * @adev: amdgpu_device pointer
 - * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
 + * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
   *
   * Returns a pointer to the hardware IP block structure
   * if it exists for the asic, otherwise NULL.
@@@ -1707,6 -1708,10 +1707,6 @@@ static int amdgpu_device_ip_late_set_cg
        if (amdgpu_emu_mode == 1)
                return 0;
  
 -      r = amdgpu_ib_ring_tests(adev);
 -      if (r)
 -              DRM_ERROR("ib ring test failed (%d).\n", r);
 -
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
                }
        }
  
 -      if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) {
 -              /* enable gfx powergating */
 -              amdgpu_device_ip_set_powergating_state(adev,
 -                                                     AMD_IP_BLOCK_TYPE_GFX,
 -                                                     AMD_PG_STATE_GATE);
 -              /* enable gfxoff */
 -              amdgpu_device_ip_set_powergating_state(adev,
 -                                                     AMD_IP_BLOCK_TYPE_SMC,
 -                                                     AMD_PG_STATE_GATE);
 -      }
 +      return 0;
 +}
  
 +static int amdgpu_device_ip_late_set_pg_state(struct amdgpu_device *adev)
 +{
 +      int i = 0, r;
 +
 +      if (amdgpu_emu_mode == 1)
 +              return 0;
 +
 +      for (i = 0; i < adev->num_ip_blocks; i++) {
 +              if (!adev->ip_blocks[i].status.valid)
 +                      continue;
 +              /* skip CG for VCE/UVD, it's handled specially */
 +              if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
 +                  adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
 +                  adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
 +                  adev->ip_blocks[i].version->funcs->set_powergating_state) {
 +                      /* enable powergating to save power */
 +                      r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
 +                                                                                   AMD_PG_STATE_GATE);
 +                      if (r) {
 +                              DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
 +                                        adev->ip_blocks[i].version->funcs->name, r);
 +                              return r;
 +                      }
 +              }
 +      }
        return 0;
  }
  
@@@ -1787,9 -1775,6 +1787,9 @@@ static int amdgpu_device_ip_late_init(s
                }
        }
  
 +      amdgpu_device_ip_late_set_cg_state(adev);
 +      amdgpu_device_ip_late_set_pg_state(adev);
 +
        queue_delayed_work(system_wq, &adev->late_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
  
@@@ -1828,8 -1813,6 +1828,8 @@@ static int amdgpu_device_ip_fini(struc
                                          adev->ip_blocks[i].version->funcs->name, r);
                                return r;
                        }
 +                      if (adev->powerplay.pp_funcs->set_powergating_by_smu)
 +                              amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
                        r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
                        /* XXX handle errors */
                        if (r) {
@@@ -1918,15 -1901,11 +1918,15 @@@ static void amdgpu_device_ip_late_init_
  {
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, late_init_work.work);
 -      amdgpu_device_ip_late_set_cg_state(adev);
 +      int r;
 +
 +      r = amdgpu_ib_ring_tests(adev);
 +      if (r)
 +              DRM_ERROR("ib ring test failed (%d).\n", r);
  }
  
  /**
 - * amdgpu_device_ip_suspend - run suspend for hardware IPs
 + * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
   *
   * @adev: amdgpu_device pointer
   *
   * in each IP into a state suitable for suspend.
   * Returns 0 on success, negative error code on failure.
   */
 -int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
 +static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
  {
        int i, r;
  
        if (amdgpu_sriov_vf(adev))
                amdgpu_virt_request_full_gpu(adev, false);
  
 -      /* ungate SMC block powergating */
 -      if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
 -              amdgpu_device_ip_set_powergating_state(adev,
 -                                                     AMD_IP_BLOCK_TYPE_SMC,
 -                                                     AMD_CG_STATE_UNGATE);
 +      for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
 +              if (!adev->ip_blocks[i].status.valid)
 +                      continue;
 +              /* displays are handled separately */
 +              if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
 +                      /* ungate blocks so that suspend can properly shut them down */
 +                      if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
 +                              r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
 +                                                                                           AMD_CG_STATE_UNGATE);
 +                              if (r) {
 +                                      DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
 +                                                adev->ip_blocks[i].version->funcs->name, r);
 +                              }
 +                      }
 +                      /* XXX handle errors */
 +                      r = adev->ip_blocks[i].version->funcs->suspend(adev);
 +                      /* XXX handle errors */
 +                      if (r) {
 +                              DRM_ERROR("suspend of IP block <%s> failed %d\n",
 +                                        adev->ip_blocks[i].version->funcs->name, r);
 +                      }
 +              }
 +      }
 +
 +      if (amdgpu_sriov_vf(adev))
 +              amdgpu_virt_release_full_gpu(adev, false);
 +
 +      return 0;
 +}
 +
 +/**
 + * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Main suspend function for hardware IPs.  The list of all the hardware
 + * IPs that make up the asic is walked, clockgating is disabled and the
 + * suspend callbacks are run.  suspend puts the hardware and software state
 + * in each IP into a state suitable for suspend.
 + * Returns 0 on success, negative error code on failure.
 + */
 +static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
 +{
 +      int i, r;
 +
 +      if (amdgpu_sriov_vf(adev))
 +              amdgpu_virt_request_full_gpu(adev, false);
  
        /* ungate SMC block first */
        r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
                DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
        }
  
 +      /* call smu to disable gfx off feature first when suspend */
 +      if (adev->powerplay.pp_funcs->set_powergating_by_smu)
 +              amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false);
 +
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
 +              /* displays are handled in phase1 */
 +              if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
 +                      continue;
                /* ungate blocks so that suspend can properly shut them down */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
                        adev->ip_blocks[i].version->funcs->set_clockgating_state) {
        return 0;
  }
  
 +/**
 + * amdgpu_device_ip_suspend - run suspend for hardware IPs
 + *
 + * @adev: amdgpu_device pointer
 + *
 + * Main suspend function for hardware IPs.  The list of all the hardware
 + * IPs that make up the asic is walked, clockgating is disabled and the
 + * suspend callbacks are run.  suspend puts the hardware and software state
 + * in each IP into a state suitable for suspend.
 + * Returns 0 on success, negative error code on failure.
 + */
 +int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
 +{
 +      int r;
 +
 +      r = amdgpu_device_ip_suspend_phase1(adev);
 +      if (r)
 +              return r;
 +      r = amdgpu_device_ip_suspend_phase2(adev);
 +
 +      return r;
 +}
 +
  static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
  {
        int i, r;
                                continue;
  
                        r = block->version->funcs->hw_init(adev);
 -                      DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
 +                      DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
                        if (r)
                                return r;
                }
@@@ -2113,7 -2020,7 +2113,7 @@@ static int amdgpu_device_ip_reinit_late
                                continue;
  
                        r = block->version->funcs->hw_init(adev);
 -                      DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
 +                      DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
                        if (r)
                                return r;
                }
@@@ -2274,7 -2181,7 +2274,7 @@@ bool amdgpu_device_asic_has_dc_support(
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 +#ifdef CONFIG_X86
        case CHIP_RAVEN:
  #endif
                return amdgpu_dc != 0;
@@@ -2303,7 -2210,7 +2303,7 @@@ bool amdgpu_device_has_dc_support(struc
   * amdgpu_device_init - initialize the driver
   *
   * @adev: amdgpu_device pointer
 - * @pdev: drm dev pointer
 + * @ddev: drm dev pointer
   * @pdev: pci dev pointer
   * @flags: driver flags
   *
@@@ -2394,8 -2301,6 +2394,8 @@@ int amdgpu_device_init(struct amdgpu_de
        INIT_DELAYED_WORK(&adev->late_init_work,
                          amdgpu_device_ip_late_init_func_handler);
  
 +      adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
 +
        /* Registers mapping */
        /* TODO: block userspace mapping of io register */
        if (adev->asic_type >= CHIP_BONAIRE) {
@@@ -2676,9 -2581,8 +2676,9 @@@ void amdgpu_device_fini(struct amdgpu_d
  /**
   * amdgpu_device_suspend - initiate device suspend
   *
 - * @pdev: drm dev pointer
 - * @state: suspend state
 + * @dev: drm dev pointer
 + * @suspend: suspend state
 + * @fbcon : notify the fbdev of suspend
   *
   * Puts the hw in the suspend state (all asics).
   * Returns 0 for success or an error on failure.
@@@ -2702,9 -2606,6 +2702,9 @@@ int amdgpu_device_suspend(struct drm_de
  
        drm_kms_helper_poll_disable(dev);
  
 +      if (fbcon)
 +              amdgpu_fbdev_set_suspend(adev, 1);
 +
        if (!amdgpu_device_has_dc_support(adev)) {
                /* turn off display hw */
                drm_modeset_lock_all(dev);
                        drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
                }
                drm_modeset_unlock_all(dev);
 -      }
 -
 -      amdgpu_amdkfd_suspend(adev);
 -
 -      /* unpin the front buffers and cursors */
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 -              struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 -              struct drm_framebuffer *fb = crtc->primary->fb;
 -              struct amdgpu_bo *robj;
 -
 -              if (amdgpu_crtc->cursor_bo) {
 -                      struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
 -                      r = amdgpu_bo_reserve(aobj, true);
 -                      if (r == 0) {
 -                              amdgpu_bo_unpin(aobj);
 -                              amdgpu_bo_unreserve(aobj);
 +                      /* unpin the front buffers and cursors */
 +              list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +                      struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 +                      struct drm_framebuffer *fb = crtc->primary->fb;
 +                      struct amdgpu_bo *robj;
 +
 +                      if (amdgpu_crtc->cursor_bo) {
 +                              struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
 +                              r = amdgpu_bo_reserve(aobj, true);
 +                              if (r == 0) {
 +                                      amdgpu_bo_unpin(aobj);
 +                                      amdgpu_bo_unreserve(aobj);
 +                              }
                        }
 -              }
  
 -              if (fb == NULL || fb->obj[0] == NULL) {
 -                      continue;
 -              }
 -              robj = gem_to_amdgpu_bo(fb->obj[0]);
 -              /* don't unpin kernel fb objects */
 -              if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
 -                      r = amdgpu_bo_reserve(robj, true);
 -                      if (r == 0) {
 -                              amdgpu_bo_unpin(robj);
 -                              amdgpu_bo_unreserve(robj);
 +                      if (fb == NULL || fb->obj[0] == NULL) {
 +                              continue;
 +                      }
 +                      robj = gem_to_amdgpu_bo(fb->obj[0]);
 +                      /* don't unpin kernel fb objects */
 +                      if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
 +                              r = amdgpu_bo_reserve(robj, true);
 +                              if (r == 0) {
 +                                      amdgpu_bo_unpin(robj);
 +                                      amdgpu_bo_unreserve(robj);
 +                              }
                        }
                }
        }
 +
 +      amdgpu_amdkfd_suspend(adev);
 +
 +      r = amdgpu_device_ip_suspend_phase1(adev);
 +
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
  
        amdgpu_fence_driver_suspend(adev);
  
 -      r = amdgpu_device_ip_suspend(adev);
 +      r = amdgpu_device_ip_suspend_phase2(adev);
  
        /* evict remaining vram memory
         * This second call to evict vram is to evict the gart page table
                        DRM_ERROR("amdgpu asic reset failed\n");
        }
  
 -      if (fbcon) {
 -              console_lock();
 -              amdgpu_fbdev_set_suspend(adev, 1);
 -              console_unlock();
 -      }
        return 0;
  }
  
  /**
   * amdgpu_device_resume - initiate device resume
   *
 - * @pdev: drm dev pointer
 + * @dev: drm dev pointer
 + * @resume: resume state
 + * @fbcon : notify the fbdev of resume
   *
   * Bring the hw back to operating state (all asics).
   * Returns 0 for success or an error on failure.
@@@ -2794,12 -2696,15 +2794,12 @@@ int amdgpu_device_resume(struct drm_dev
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
  
 -      if (fbcon)
 -              console_lock();
 -
        if (resume) {
                pci_set_power_state(dev->pdev, PCI_D0);
                pci_restore_state(dev->pdev);
                r = pci_enable_device(dev->pdev);
                if (r)
 -                      goto unlock;
 +                      return r;
        }
  
        /* post card */
        r = amdgpu_device_ip_resume(adev);
        if (r) {
                DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
 -              goto unlock;
 +              return r;
        }
        amdgpu_fence_driver_resume(adev);
  
  
        r = amdgpu_device_ip_late_init(adev);
        if (r)
 -              goto unlock;
 -
 -      /* pin cursors */
 -      list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 -              struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 -
 -              if (amdgpu_crtc->cursor_bo) {
 -                      struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
 -                      r = amdgpu_bo_reserve(aobj, true);
 -                      if (r == 0) {
 -                              r = amdgpu_bo_pin(aobj,
 -                                                AMDGPU_GEM_DOMAIN_VRAM,
 -                                                &amdgpu_crtc->cursor_addr);
 -                              if (r != 0)
 -                                      DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
 -                              amdgpu_bo_unreserve(aobj);
 +              return r;
 +
 +      if (!amdgpu_device_has_dc_support(adev)) {
 +              /* pin cursors */
 +              list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 +                      struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
 +
 +                      if (amdgpu_crtc->cursor_bo) {
 +                              struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
 +                              r = amdgpu_bo_reserve(aobj, true);
 +                              if (r == 0) {
 +                                      r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
 +                                      if (r != 0)
 +                                              DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
 +                                      amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
 +                                      amdgpu_bo_unreserve(aobj);
 +                              }
                        }
                }
        }
        if (r)
                return r;
  
+       /* Make sure IB tests flushed */
+       flush_delayed_work(&adev->late_init_work);
        /* blat the mode back in */
        if (fbcon) {
                if (!amdgpu_device_has_dc_support(adev)) {
                        }
                        drm_modeset_unlock_all(dev);
                }
 +              amdgpu_fbdev_set_suspend(adev, 0);
        }
  
        drm_kms_helper_poll_enable(dev);
  #ifdef CONFIG_PM
        dev->dev->power.disable_depth--;
  #endif
 -
 -      if (fbcon)
 -              amdgpu_fbdev_set_suspend(adev, 0);
 -
 -unlock:
 -      if (fbcon)
 -              console_unlock();
 -
 -      return r;
 +      return 0;
  }
  
  /**
@@@ -3157,7 -3071,7 +3160,7 @@@ static int amdgpu_device_handle_vram_lo
   * @adev: amdgpu device pointer
   *
   * attempt to do soft-reset or full-reset and reinitialize Asic
 - * return 0 means successed otherwise failed
 + * return 0 means succeeded otherwise failed
   */
  static int amdgpu_device_reset(struct amdgpu_device *adev)
  {
   * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
   *
   * @adev: amdgpu device pointer
 + * @from_hypervisor: request from hypervisor
   *
   * do VF FLR and reinitialize Asic
 - * return 0 means successed otherwise failed
 + * return 0 means succeeded otherwise failed
   */
  static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
                                     bool from_hypervisor)
@@@ -3280,7 -3193,7 +3283,7 @@@ error
   *
   * @adev: amdgpu device pointer
   * @job: which job trigger hang
 - * @force forces reset regardless of amdgpu_gpu_recovery
 + * @force: forces reset regardless of amdgpu_gpu_recovery
   *
   * Attempt to reset the GPU if it has hung (all asics).
   * Returns 0 for success or an error on failure.
@@@ -3307,9 -3220,6 +3310,9 @@@ int amdgpu_device_gpu_recover(struct am
        atomic_inc(&adev->gpu_reset_counter);
        adev->in_gpu_reset = 1;
  
 +      /* Block kfd */
 +      amdgpu_amdkfd_pre_reset(adev);
 +
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
  
  
                kthread_park(ring->sched.thread);
  
 -              if (job && job->ring->idx != i)
 +              if (job && job->base.sched == &ring->sched)
                        continue;
  
 -              drm_sched_hw_job_reset(&ring->sched, &job->base);
 +              drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
  
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
                 * or all rings (in the case @job is NULL)
                 * after above amdgpu_reset accomplished
                 */
 -              if ((!job || job->ring->idx == i) && !r)
 +              if ((!job || job->base.sched == &ring->sched) && !r)
                        drm_sched_job_recovery(&ring->sched);
  
                kthread_unpark(ring->sched.thread);
                dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
                amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
        } else {
 -              dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
 +              dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
        }
  
 +      /*unlock kfd */
 +      amdgpu_amdkfd_post_reset(adev);
        amdgpu_vf_error_trans_all(adev);
        adev->in_gpu_reset = 0;
        mutex_unlock(&adev->lock_reset);
   */
  static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
  {
 -      u32 mask;
 -      int ret;
 +      struct pci_dev *pdev;
 +      enum pci_bus_speed speed_cap;
 +      enum pcie_link_width link_width;
  
        if (amdgpu_pcie_gen_cap)
                adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
        }
  
        if (adev->pm.pcie_gen_mask == 0) {
 -              ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
 -              if (!ret) {
 -                      adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +              /* asic caps */
 +              pdev = adev->pdev;
 +              speed_cap = pcie_get_speed_cap(pdev);
 +              if (speed_cap == PCI_SPEED_UNKNOWN) {
 +                      adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
                                                  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
                                                  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
 -
 -                      if (mask & DRM_PCIE_SPEED_25)
 -                              adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
 -                      if (mask & DRM_PCIE_SPEED_50)
 -                              adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
 -                      if (mask & DRM_PCIE_SPEED_80)
 -                              adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
                } else {
 -                      adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
 +                      if (speed_cap == PCIE_SPEED_16_0GT)
 +                              adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                        CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 +                                                        CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
 +                                                        CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
 +                      else if (speed_cap == PCIE_SPEED_8_0GT)
 +                              adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                        CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 +                                                        CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
 +                      else if (speed_cap == PCIE_SPEED_5_0GT)
 +                              adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                        CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
 +                      else
 +                              adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
 +              }
 +              /* platform caps */
 +              pdev = adev->ddev->pdev->bus->self;
 +              speed_cap = pcie_get_speed_cap(pdev);
 +              if (speed_cap == PCI_SPEED_UNKNOWN) {
 +                      adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
 +              } else {
 +                      if (speed_cap == PCIE_SPEED_16_0GT)
 +                              adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 +                                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
 +                                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
 +                      else if (speed_cap == PCIE_SPEED_8_0GT)
 +                              adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
 +                                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
 +                      else if (speed_cap == PCIE_SPEED_5_0GT)
 +                              adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
 +                                                         CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
 +                      else
 +                              adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
 +
                }
        }
        if (adev->pm.pcie_mlw_mask == 0) {
 -              ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
 -              if (!ret) {
 -                      switch (mask) {
 -                      case 32:
 +              pdev = adev->ddev->pdev->bus->self;
 +              link_width = pcie_get_width_cap(pdev);
 +              if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
 +                      adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
 +              } else {
 +                      switch (link_width) {
 +                      case PCIE_LNK_X32:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
 -                      case 16:
 +                      case PCIE_LNK_X16:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
 -                      case 12:
 +                      case PCIE_LNK_X12:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
 -                      case 8:
 +                      case PCIE_LNK_X8:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
 -                      case 4:
 +                      case PCIE_LNK_X4:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
 -                      case 2:
 +                      case PCIE_LNK_X2:
                                adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
                                                          CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
                                break;
 -                      case 1:
 +                      case PCIE_LNK_X1:
                                adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
                                break;
                        default:
                                break;
                        }
 -              } else {
 -                      adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
                }
        }
  }
index 9f0a217603ad001cd37c520b9937631f6adc5584,5a2e952c5bead295df49350289416d4674c4dcef..516795342dd2815629e0876031fa47070c9ec12a
  #include "amdgpu_dm_irq.h"
  #include "amdgpu_pm.h"
  
 -unsigned long long dm_get_timestamp(struct dc_context *ctx)
 -{
 -      struct timespec64 time;
  
 -      getrawmonotonic64(&time);
 -      return timespec64_to_ns(&time);
 -}
  
  unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
                unsigned long long current_time_stamp,
@@@ -74,4 -80,327 +74,3 @@@ bool dm_read_persistent_data(struct dc_
  
  /**** power component interfaces ****/
  
 -bool dm_pp_apply_display_requirements(
 -              const struct dc_context *ctx,
 -              const struct dm_pp_display_configuration *pp_display_cfg)
 -{
 -      struct amdgpu_device *adev = ctx->driver_context;
 -
 -      if (adev->pm.dpm_enabled) {
 -
 -              memset(&adev->pm.pm_display_cfg, 0,
 -                              sizeof(adev->pm.pm_display_cfg));
 -
 -              adev->pm.pm_display_cfg.cpu_cc6_disable =
 -                      pp_display_cfg->cpu_cc6_disable;
 -
 -              adev->pm.pm_display_cfg.cpu_pstate_disable =
 -                      pp_display_cfg->cpu_pstate_disable;
 -
 -              adev->pm.pm_display_cfg.cpu_pstate_separation_time =
 -                      pp_display_cfg->cpu_pstate_separation_time;
 -
 -              adev->pm.pm_display_cfg.nb_pstate_switch_disable =
 -                      pp_display_cfg->nb_pstate_switch_disable;
 -
 -              adev->pm.pm_display_cfg.num_display =
 -                              pp_display_cfg->display_count;
 -              adev->pm.pm_display_cfg.num_path_including_non_display =
 -                              pp_display_cfg->display_count;
 -
 -              adev->pm.pm_display_cfg.min_core_set_clock =
 -                              pp_display_cfg->min_engine_clock_khz/10;
 -              adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
 -                              pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
 -              adev->pm.pm_display_cfg.min_mem_set_clock =
 -                              pp_display_cfg->min_memory_clock_khz/10;
 -
 -              adev->pm.pm_display_cfg.multi_monitor_in_sync =
 -                              pp_display_cfg->all_displays_in_sync;
 -              adev->pm.pm_display_cfg.min_vblank_time =
 -                              pp_display_cfg->avail_mclk_switch_time_us;
 -
 -              adev->pm.pm_display_cfg.display_clk =
 -                              pp_display_cfg->disp_clk_khz/10;
 -
 -              adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
 -                              pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
 -
 -              adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
 -              adev->pm.pm_display_cfg.line_time_in_us =
 -                              pp_display_cfg->line_time_in_us;
 -
 -              adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
 -              adev->pm.pm_display_cfg.crossfire_display_index = -1;
 -              adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
 -
 -              /* TODO: complete implementation of
 -               * pp_display_configuration_change().
 -               * Follow example of:
 -               * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
 -               * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
 -              if (adev->powerplay.pp_funcs->display_configuration_change)
 -                      adev->powerplay.pp_funcs->display_configuration_change(
 -                              adev->powerplay.pp_handle,
 -                              &adev->pm.pm_display_cfg);
 -
 -              /* TODO: replace by a separate call to 'apply display cfg'? */
 -              amdgpu_pm_compute_clocks(adev);
 -      }
 -
 -      return true;
 -}
 -
 -static void get_default_clock_levels(
 -              enum dm_pp_clock_type clk_type,
 -              struct dm_pp_clock_levels *clks)
 -{
 -      uint32_t disp_clks_in_khz[6] = {
 -                      300000, 400000, 496560, 626090, 685720, 757900 };
 -      uint32_t sclks_in_khz[6] = {
 -                      300000, 360000, 423530, 514290, 626090, 720000 };
 -      uint32_t mclks_in_khz[2] = { 333000, 800000 };
 -
 -      switch (clk_type) {
 -      case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 -              clks->num_levels = 6;
 -              memmove(clks->clocks_in_khz, disp_clks_in_khz,
 -                              sizeof(disp_clks_in_khz));
 -              break;
 -      case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 -              clks->num_levels = 6;
 -              memmove(clks->clocks_in_khz, sclks_in_khz,
 -                              sizeof(sclks_in_khz));
 -              break;
 -      case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 -              clks->num_levels = 2;
 -              memmove(clks->clocks_in_khz, mclks_in_khz,
 -                              sizeof(mclks_in_khz));
 -              break;
 -      default:
 -              clks->num_levels = 0;
 -              break;
 -      }
 -}
 -
 -static enum amd_pp_clock_type dc_to_pp_clock_type(
 -              enum dm_pp_clock_type dm_pp_clk_type)
 -{
 -      enum amd_pp_clock_type amd_pp_clk_type = 0;
 -
 -      switch (dm_pp_clk_type) {
 -      case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
 -              amd_pp_clk_type = amd_pp_disp_clock;
 -              break;
 -      case DM_PP_CLOCK_TYPE_ENGINE_CLK:
 -              amd_pp_clk_type = amd_pp_sys_clock;
 -              break;
 -      case DM_PP_CLOCK_TYPE_MEMORY_CLK:
 -              amd_pp_clk_type = amd_pp_mem_clock;
 -              break;
 -      default:
 -              DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
 -                              dm_pp_clk_type);
 -              break;
 -      }
 -
 -      return amd_pp_clk_type;
 -}
 -
 -static void pp_to_dc_clock_levels(
 -              const struct amd_pp_clocks *pp_clks,
 -              struct dm_pp_clock_levels *dc_clks,
 -              enum dm_pp_clock_type dc_clk_type)
 -{
 -      uint32_t i;
 -
 -      if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
 -              DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 -                              DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 -                              pp_clks->count,
 -                              DM_PP_MAX_CLOCK_LEVELS);
 -
 -              dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 -      } else
 -              dc_clks->num_levels = pp_clks->count;
 -
 -      DRM_INFO("DM_PPLIB: values for %s clock\n",
 -                      DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 -
 -      for (i = 0; i < dc_clks->num_levels; i++) {
 -              DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
 -              /* translate 10kHz to kHz */
 -              dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
 -      }
 -}
 -
 -static void pp_to_dc_clock_levels_with_latency(
 -              const struct pp_clock_levels_with_latency *pp_clks,
 -              struct dm_pp_clock_levels_with_latency *clk_level_info,
 -              enum dm_pp_clock_type dc_clk_type)
 -{
 -      uint32_t i;
 -
 -      if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
 -              DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
 -                              DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
 -                              pp_clks->num_levels,
 -                              DM_PP_MAX_CLOCK_LEVELS);
 -
 -              clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
 -      } else
 -              clk_level_info->num_levels = pp_clks->num_levels;
 -
 -      DRM_DEBUG("DM_PPLIB: values for %s clock\n",
 -                      DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 -
 -      for (i = 0; i < clk_level_info->num_levels; i++) {
 -              DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
 -              /* translate 10kHz to kHz */
 -              clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
 -              clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
 -      }
 -}
 -
 -bool dm_pp_get_clock_levels_by_type(
 -              const struct dc_context *ctx,
 -              enum dm_pp_clock_type clk_type,
 -              struct dm_pp_clock_levels *dc_clks)
 -{
 -      struct amdgpu_device *adev = ctx->driver_context;
 -      void *pp_handle = adev->powerplay.pp_handle;
 -      struct amd_pp_clocks pp_clks = { 0 };
 -      struct amd_pp_simple_clock_info validation_clks = { 0 };
 -      uint32_t i;
 -
 -      if (adev->powerplay.pp_funcs->get_clock_by_type) {
 -              if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
 -                      dc_to_pp_clock_type(clk_type), &pp_clks)) {
 -              /* Error in pplib. Provide default values. */
 -                      get_default_clock_levels(clk_type, dc_clks);
 -                      return true;
 -              }
 -      }
 -
 -      pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 -
 -      if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
 -              if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
 -                                              pp_handle, &validation_clks)) {
 -                      /* Error in pplib. Provide default values. */
 -                      DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
 -                      validation_clks.engine_max_clock = 72000;
 -                      validation_clks.memory_max_clock = 80000;
 -                      validation_clks.level = 0;
 -              }
 -      }
 -
 -      DRM_INFO("DM_PPLIB: Validation clocks:\n");
 -      DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
 -                      validation_clks.engine_max_clock);
 -      DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
 -                      validation_clks.memory_max_clock);
 -      DRM_INFO("DM_PPLIB:    level           : %d\n",
 -                      validation_clks.level);
 -
 -      /* Translate 10 kHz to kHz. */
 -      validation_clks.engine_max_clock *= 10;
 -      validation_clks.memory_max_clock *= 10;
 -
 -      /* Determine the highest non-boosted level from the Validation Clocks */
 -      if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
 -              for (i = 0; i < dc_clks->num_levels; i++) {
 -                      if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
 -                              /* This clock is higher the validation clock.
 -                               * Than means the previous one is the highest
 -                               * non-boosted one. */
 -                              DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
 -                                              dc_clks->num_levels, i);
 -                              dc_clks->num_levels = i > 0 ? i : 1;
 -                              break;
 -                      }
 -              }
 -      } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
 -              for (i = 0; i < dc_clks->num_levels; i++) {
 -                      if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
 -                              DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
 -                                              dc_clks->num_levels, i);
 -                              dc_clks->num_levels = i > 0 ? i : 1;
 -                              break;
 -                      }
 -              }
 -      }
 -
 -      return true;
 -}
 -
 -bool dm_pp_get_clock_levels_by_type_with_latency(
 -      const struct dc_context *ctx,
 -      enum dm_pp_clock_type clk_type,
 -      struct dm_pp_clock_levels_with_latency *clk_level_info)
 -{
 -      struct amdgpu_device *adev = ctx->driver_context;
 -      void *pp_handle = adev->powerplay.pp_handle;
 -      struct pp_clock_levels_with_latency pp_clks = { 0 };
 -      const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 -
 -      if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
 -              return false;
 -
 -      if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
 -                                                   dc_to_pp_clock_type(clk_type),
 -                                                   &pp_clks))
 -              return false;
 -
 -      pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 -
 -      return true;
 -}
 -
 -bool dm_pp_get_clock_levels_by_type_with_voltage(
 -      const struct dc_context *ctx,
 -      enum dm_pp_clock_type clk_type,
 -      struct dm_pp_clock_levels_with_voltage *clk_level_info)
 -{
 -      /* TODO: to be implemented */
 -      return false;
 -}
 -
 -bool dm_pp_notify_wm_clock_changes(
 -      const struct dc_context *ctx,
 -      struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
 -{
 -      /* TODO: to be implemented */
 -      return false;
 -}
 -
 -bool dm_pp_apply_power_level_change_request(
 -      const struct dc_context *ctx,
 -      struct dm_pp_power_level_change_request *level_change_req)
 -{
 -      /* TODO: to be implemented */
 -      return false;
 -}
 -
 -bool dm_pp_apply_clock_for_voltage_request(
 -      const struct dc_context *ctx,
 -      struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
 -{
 -      /* TODO: to be implemented */
 -      return false;
 -}
 -
 -bool dm_pp_get_static_clocks(
 -      const struct dc_context *ctx,
 -      struct dm_pp_static_clock_info *static_clk_info)
 -{
 -      /* TODO: to be implemented */
 -      return false;
 -}
 -
 -void dm_pp_get_funcs_rv(
 -              struct dc_context *ctx,
 -              struct pp_smu_funcs_rv *funcs)
 -{}
--
 -/**** end of power component interfaces ****/
index 9d901ca705883efe421ec33931066200bcedd6ba,bdd121485cbcd661c4b317ad5f9816ae5c1d127b..af9386ee5a93311f2d0241e86af6170b18622206
@@@ -3,7 -3,6 +3,7 @@@
  #include "dc.h"
  #include "dc_link_dp.h"
  #include "dm_helpers.h"
 +#include "opp.h"
  
  #include "inc/core_types.h"
  #include "link_hwss.h"
@@@ -39,7 -38,7 +39,7 @@@ static bool decide_fallback_link_settin
                struct dc_link_settings initial_link_settings,
                struct dc_link_settings *current_link_setting,
                enum link_training_result training_result);
 -static struct dc_link_settings get_common_supported_link_settings (
 +static struct dc_link_settings get_common_supported_link_settings(
                struct dc_link_settings link_setting_a,
                struct dc_link_settings link_setting_b);
  
@@@ -94,8 -93,8 +94,8 @@@ static void dpcd_set_link_settings
        uint8_t rate = (uint8_t)
        (lt_settings->link_settings.link_rate);
  
 -      union down_spread_ctrl downspread = {{0}};
 -      union lane_count_set lane_count_set = {{0}};
 +      union down_spread_ctrl downspread = { {0} };
 +      union lane_count_set lane_count_set = { {0} };
        uint8_t link_set_buffer[2];
  
        downspread.raw = (uint8_t)
@@@ -165,11 -164,11 +165,11 @@@ static void dpcd_set_lt_pattern_and_lan
        const struct link_training_settings *lt_settings,
        enum hw_dp_training_pattern pattern)
  {
 -      union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
 +      union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
        const uint32_t dpcd_base_lt_offset =
        DP_TRAINING_PATTERN_SET;
        uint8_t dpcd_lt_buffer[5] = {0};
 -      union dpcd_training_pattern dpcd_pattern = {{0}};
 +      union dpcd_training_pattern dpcd_pattern = { {0} };
        uint32_t lane;
        uint32_t size_in_bytes;
        bool edp_workaround = false; /* TODO link_prop.INTERNAL */
                        link,
                        DP_TRAINING_PATTERN_SET,
                        &dpcd_pattern.raw,
 -                      sizeof(dpcd_pattern.raw) );
 +                      sizeof(dpcd_pattern.raw));
  
                core_link_write_dpcd(
                        link,
                                link,
                                dpcd_base_lt_offset,
                                dpcd_lt_buffer,
 -                              size_in_bytes + sizeof(dpcd_pattern.raw) );
 +                              size_in_bytes + sizeof(dpcd_pattern.raw));
  
        link->cur_lane_setting = lt_settings->lane_settings[0];
  }
@@@ -429,8 -428,8 +429,8 @@@ static void get_lane_status_and_drive_s
        struct link_training_settings *req_settings)
  {
        uint8_t dpcd_buf[6] = {0};
 -      union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
 -      struct link_training_settings request_settings = {{0}};
 +      union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
 +      struct link_training_settings request_settings = { {0} };
        uint32_t lane;
  
        memset(req_settings, '\0', sizeof(struct link_training_settings));
@@@ -652,7 -651,7 +652,7 @@@ static bool perform_post_lt_adj_req_seq
  
                        if (req_drv_setting_changed) {
                                update_drive_settings(
 -                                      lt_settings,req_settings);
 +                                      lt_settings, req_settings);
  
                                dc_link_dp_set_drive_settings(link,
                                                lt_settings);
@@@ -725,8 -724,8 +725,8 @@@ static enum link_training_result perfor
        enum hw_dp_training_pattern hw_tr_pattern;
        uint32_t retries_ch_eq;
        enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
 -      union lane_align_status_updated dpcd_lane_status_updated = {{0}};
 -      union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
 +      union lane_align_status_updated dpcd_lane_status_updated = { {0} };
 +      union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
  
        hw_tr_pattern = get_supported_tp(link);
  
@@@ -1028,9 -1027,6 +1028,9 @@@ enum link_training_result dc_link_dp_pe
                        lt_settings.lane_settings[0].VOLTAGE_SWING,
                        lt_settings.lane_settings[0].PRE_EMPHASIS);
  
 +      if (status != LINK_TRAINING_SUCCESS)
 +              link->ctx->dc->debug_data.ltFailCount++;
 +
        return status;
  }
  
@@@ -1086,7 -1082,7 +1086,7 @@@ static struct dc_link_settings get_max_
        return max_link_cap;
  }
  
 -bool dp_hbr_verify_link_cap(
 +bool dp_verify_link_cap(
        struct dc_link *link,
        struct dc_link_settings *known_limit_link_setting)
  {
        enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
        enum link_training_result status;
  
 +      if (link->dc->debug.skip_detection_link_training) {
 +              link->verified_link_cap = *known_limit_link_setting;
 +              return true;
 +      }
 +
        success = false;
        skip_link_training = false;
  
        return success;
  }
  
 -static struct dc_link_settings get_common_supported_link_settings (
 +static struct dc_link_settings get_common_supported_link_settings(
                struct dc_link_settings link_setting_a,
                struct dc_link_settings link_setting_b)
  {
@@@ -1437,7 -1428,6 +1437,7 @@@ static uint32_t bandwidth_in_kbps_from_
  
        uint32_t lane_count  = link_setting->lane_count;
        uint32_t kbps = link_rate_in_kbps;
 +
        kbps *= lane_count;
        kbps *= 8;   /* 8 bits per byte*/
  
@@@ -1455,9 -1445,9 +1455,9 @@@ bool dp_validate_mode_timing
        const struct dc_link_settings *link_setting;
  
        /*always DP fail safe mode*/
 -      if (timing->pix_clk_khz == (uint32_t)25175 &&
 -              timing->h_addressable == (uint32_t)640 &&
 -              timing->v_addressable == (uint32_t)480)
 +      if (timing->pix_clk_khz == (uint32_t) 25175 &&
 +              timing->h_addressable == (uint32_t) 640 &&
 +              timing->v_addressable == (uint32_t) 480)
                return true;
  
        /* We always use verified link settings */
@@@ -1657,26 -1647,22 +1657,26 @@@ static enum dc_status read_hpd_rx_irq_d
                        irq_data->raw,
                        sizeof(union hpd_irq_data));
        else {
 -              /* Read 2 bytes at this location,... */
 +              /* Read 14 bytes in a single read and then copy only the required fields.
 +               * This is more efficient than doing it in two separate AUX reads. */
 +
 +              uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
 +
                retval = core_link_read_dpcd(
                        link,
                        DP_SINK_COUNT_ESI,
 -                      irq_data->raw,
 -                      2);
 +                      tmp,
 +                      sizeof(tmp));
  
                if (retval != DC_OK)
                        return retval;
  
 -              /* ... then read remaining 4 at the other location */
 -              retval = core_link_read_dpcd(
 -                      link,
 -                      DP_LANE0_1_STATUS_ESI,
 -                      &irq_data->raw[2],
 -                      4);
 +              irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
 +              irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
 +              irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
 +              irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
 +              irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
 +              irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
        }
  
        return retval;
@@@ -1781,12 -1767,10 +1781,10 @@@ static void dp_test_send_link_training(
        dp_retrain_link_dp_test(link, &link_settings, false);
  }
  
- /* TODO hbr2 compliance eye output is unstable
+ /* TODO Raven hbr2 compliance eye output is unstable
   * (toggling on and off) with debugger break
   * This caueses intermittent PHY automation failure
   * Need to look into the root cause */
- static uint8_t force_tps4_for_cp2520 = 1;
  static void dp_test_send_phy_test_pattern(struct dc_link *link)
  {
        union phy_test_pattern dpcd_test_pattern;
                break;
        case PHY_TEST_PATTERN_CP2520_1:
                /* CP2520 pattern is unstable, temporarily use TPS4 instead */
-               test_pattern = (force_tps4_for_cp2520 == 1) ?
+               test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
                                DP_TEST_PATTERN_TRAINING_PATTERN4 :
                                DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
                break;
        case PHY_TEST_PATTERN_CP2520_2:
                /* CP2520 pattern is unstable, temporarily use TPS4 instead */
-               test_pattern = (force_tps4_for_cp2520 == 1) ?
+               test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
                                DP_TEST_PATTERN_TRAINING_PATTERN4 :
                                DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
                break;
@@@ -2005,16 -1989,12 +2003,16 @@@ static void handle_automated_test(struc
                        sizeof(test_response));
  }
  
 -bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
 +bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
  {
 -      union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
 +      union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
        union device_service_irq device_service_clear = { { 0 } };
 -      enum dc_status result = DDC_RESULT_UNKNOWN;
 +      enum dc_status result;
 +
        bool status = false;
 +
 +      if (out_link_loss)
 +              *out_link_loss = false;
        /* For use cases related to down stream connection status change,
         * PSR and device auto test, refer to function handle_sst_hpd_irq
         * in DAL2.1*/
                        true, LINK_TRAINING_ATTEMPTS);
  
                status = false;
 +              if (out_link_loss)
 +                      *out_link_loss = true;
        }
  
        if (link->type == dc_connection_active_dongle &&
@@@ -2277,11 -2255,6 +2275,11 @@@ static void get_active_converter_info
  
                link->dpcd_caps.branch_hw_revision =
                        dp_hw_fw_revision.ieee_hw_rev;
 +
 +              memmove(
 +                      link->dpcd_caps.branch_fw_revision,
 +                      dp_hw_fw_revision.ieee_fw_rev,
 +                      sizeof(dp_hw_fw_revision.ieee_fw_rev));
        }
  }
  
@@@ -2330,14 -2303,12 +2328,14 @@@ static bool retrieve_link_cap(struct dc
  {
        uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
  
 +      struct dp_device_vendor_id sink_id;
        union down_stream_port_count down_strm_port_count;
        union edp_configuration_cap edp_config_cap;
        union dp_downstream_port_present ds_port = { 0 };
        enum dc_status status = DC_ERROR_UNEXPECTED;
        uint32_t read_dpcd_retry_cnt = 3;
        int i;
 +      struct dp_sink_hw_fw_revision dp_hw_fw_revision;
  
        memset(dpcd_data, '\0', sizeof(dpcd_data));
        memset(&down_strm_port_count,
                        &link->dpcd_caps.sink_count.raw,
                        sizeof(link->dpcd_caps.sink_count.raw));
  
 +      /* read sink ieee oui */
 +      core_link_read_dpcd(link,
 +                      DP_SINK_OUI,
 +                      (uint8_t *)(&sink_id),
 +                      sizeof(sink_id));
 +
 +      link->dpcd_caps.sink_dev_id =
 +                      (sink_id.ieee_oui[0] << 16) +
 +                      (sink_id.ieee_oui[1] << 8) +
 +                      (sink_id.ieee_oui[2]);
 +
 +      memmove(
 +              link->dpcd_caps.sink_dev_id_str,
 +              sink_id.ieee_device_id,
 +              sizeof(sink_id.ieee_device_id));
 +
 +      core_link_read_dpcd(
 +              link,
 +              DP_SINK_HW_REVISION_START,
 +              (uint8_t *)&dp_hw_fw_revision,
 +              sizeof(dp_hw_fw_revision));
 +
 +      link->dpcd_caps.sink_hw_revision =
 +              dp_hw_fw_revision.ieee_hw_rev;
 +
 +      memmove(
 +              link->dpcd_caps.sink_fw_revision,
 +              dp_hw_fw_revision.ieee_fw_rev,
 +              sizeof(dp_hw_fw_revision.ieee_fw_rev));
 +
        /* Connectivity log: detection */
        CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
  
@@@ -2552,8 -2493,8 +2550,8 @@@ static void set_crtc_test_pattern(struc
                pipe_ctx->stream->bit_depth_params = params;
                pipe_ctx->stream_res.opp->funcs->
                        opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
 -
 -              pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
 +              if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
 +                      pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
                                controller_test_pattern, color_depth);
        }
        break;
                pipe_ctx->stream->bit_depth_params = params;
                pipe_ctx->stream_res.opp->funcs->
                        opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
 -
 -              pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
 +              if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
 +                      pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
                                CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                color_depth);
        }
index 7515c0dcbdd2456ecfb0542fc3eebe8b3a232df6,53c71296f3dd2c3d6eb70bd068ac760e423f4322..b91f14989aef3a9a28eb901211db6a9075ed2de9
@@@ -38,7 -38,7 +38,7 @@@
  #include "inc/compressor.h"
  #include "dml/display_mode_lib.h"
  
 -#define DC_VER "3.1.44"
 +#define DC_VER "3.1.58"
  
  #define MAX_SURFACES 3
  #define MAX_STREAMS 6
@@@ -68,7 -68,6 +68,7 @@@ struct dc_caps 
        uint32_t max_planes;
        uint32_t max_downscale_ratio;
        uint32_t i2c_speed_in_khz;
 +      uint32_t dmdata_alloc_size;
        unsigned int max_cursor_size;
        unsigned int max_video_width;
        int linear_pitch_alignment;
@@@ -77,6 -76,7 +77,7 @@@
        bool is_apu;
        bool dual_link_dvi;
        bool post_blend_color_processing;
+       bool force_dp_tps4_for_cp2520;
  };
  
  struct dc_dcc_surface_param {
@@@ -169,12 -169,6 +170,12 @@@ struct dc_config 
        bool disable_disp_pll_sharing;
  };
  
 +enum visual_confirm {
 +      VISUAL_CONFIRM_DISABLE = 0,
 +      VISUAL_CONFIRM_SURFACE = 1,
 +      VISUAL_CONFIRM_HDR = 2,
 +};
 +
  enum dcc_option {
        DCC_ENABLE = 0,
        DCC_DISABLE = 1,
@@@ -192,10 -186,6 +193,10 @@@ enum wm_report_mode 
        WM_REPORT_OVERRIDE = 1,
  };
  
 +/*
 + * For any clocks that may differ per pipe
 + * only the max is stored in this structure
 + */
  struct dc_clocks {
        int dispclk_khz;
        int max_supported_dppclk_khz;
        int socclk_khz;
        int dcfclk_deep_sleep_khz;
        int fclk_khz;
 +      int phyclk_khz;
  };
  
 -struct dc_debug {
 -      bool surface_visual_confirm;
 +struct dc_debug_options {
 +      enum visual_confirm visual_confirm;
        bool sanity_checks;
        bool max_disp_clk;
        bool surface_trace;
        int urgent_latency_ns;
        int percent_of_ideal_drambw;
        int dram_clock_change_latency_ns;
 +      bool optimized_watermark;
        int always_scale;
        bool disable_pplib_clock_request;
        bool disable_clock_gate;
        bool always_use_regamma;
        bool p010_mpo_support;
        bool recovery_enabled;
 +      bool avoid_vbios_exec_table;
 +      bool scl_reset_length10;
 +      bool hdmi20_disable;
 +      bool skip_detection_link_training;
 +};
  
 +struct dc_debug_data {
 +      uint32_t ltFailCount;
 +      uint32_t i2cErrorCount;
 +      uint32_t auxErrorCount;
  };
 +
 +
  struct dc_state;
  struct resource_pool;
  struct dce_hwseq;
@@@ -276,7 -253,8 +277,7 @@@ struct dc 
        struct dc_caps caps;
        struct dc_cap_funcs cap_funcs;
        struct dc_config config;
 -      struct dc_debug debug;
 -
 +      struct dc_debug_options debug;
        struct dc_context *ctx;
  
        uint8_t link_count;
        /* Inputs into BW and WM calculations. */
        struct bw_calcs_dceip *bw_dceip;
        struct bw_calcs_vbios *bw_vbios;
 -#ifdef CONFIG_DRM_AMD_DC_DCN1_0
 +#ifdef CONFIG_X86
        struct dcn_soc_bounding_box *dcn_soc;
        struct dcn_ip_params *dcn_ip;
        struct display_mode_lib dml;
        bool apply_edp_fast_boot_optimization;
  
        /* FBC compressor */
 -#if defined(CONFIG_DRM_AMD_DC_FBC)
        struct compressor *fbc_compressor;
 -#endif
 +
 +      struct dc_debug_data debug_data;
  };
  
  enum frame_buffer_mode {
@@@ -381,7 -359,6 +382,7 @@@ enum dc_transfer_func_type 
        TF_TYPE_PREDEFINED,
        TF_TYPE_DISTRIBUTED_POINTS,
        TF_TYPE_BYPASS,
 +      TF_TYPE_HWPWL
  };
  
  struct dc_transfer_func_distributed_points {
@@@ -401,22 -378,16 +402,22 @@@ enum dc_transfer_func_predefined 
        TRANSFER_FUNCTION_PQ,
        TRANSFER_FUNCTION_LINEAR,
        TRANSFER_FUNCTION_UNITY,
 +      TRANSFER_FUNCTION_HLG,
 +      TRANSFER_FUNCTION_HLG12,
 +      TRANSFER_FUNCTION_GAMMA22
  };
  
  struct dc_transfer_func {
        struct kref refcount;
 -      struct dc_transfer_func_distributed_points tf_pts;
        enum dc_transfer_func_type type;
        enum dc_transfer_func_predefined tf;
        /* FP16 1.0 reference level in nits, default is 80 nits, only for PQ*/
        uint32_t sdr_ref_white_level;
        struct dc_context *ctx;
 +      union {
 +              struct pwl_params pwl;
 +              struct dc_transfer_func_distributed_points tf_pts;
 +      };
  };
  
  /*
@@@ -646,14 -617,9 +647,14 @@@ struct dpcd_caps 
        struct dc_dongle_caps dongle_caps;
  
        uint32_t sink_dev_id;
 +      int8_t sink_dev_id_str[6];
 +      int8_t sink_hw_revision;
 +      int8_t sink_fw_revision[2];
 +
        uint32_t branch_dev_id;
        int8_t branch_dev_name[6];
        int8_t branch_hw_revision;
 +      int8_t branch_fw_revision[2];
  
        bool allow_invalid_MSA_timing_param;
        bool panel_mode_edp;
@@@ -696,13 -662,9 +697,13 @@@ struct dc_sink 
        struct dc_link *link;
        struct dc_context *ctx;
  
 +      uint32_t sink_id;
 +
        /* private to dc_sink.c */
 +      // refcount must be the last member in dc_sink, since we want the
 +      // sink structure to be logically cloneable up to (but not including)
 +      // refcount
        struct kref refcount;
 -
  };
  
  void dc_sink_retain(struct dc_sink *sink);
index cd8c22839227a2e4958003378f2718ce2b92e8a1,34dac84066a0bc32e16aecaa5567d99d1d27ffd8..c39934f8677ffd591458005152de5f6ff0a1036e
  #include "reg_helper.h"
  #include "dce/dce_abm.h"
  #include "dce/dce_dmcu.h"
 +#include "dce/dce_aux.h"
 +
 +const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
 +      .rob_buffer_size_kbytes = 64,
 +      .det_buffer_size_kbytes = 164,
 +      .dpte_buffer_size_in_pte_reqs = 42,
 +      .dpp_output_buffer_pixels = 2560,
 +      .opp_output_buffer_lines = 1,
 +      .pixel_chunk_size_kbytes = 8,
 +      .pte_enable = 1,
 +      .pte_chunk_size_kbytes = 2,
 +      .meta_chunk_size_kbytes = 2,
 +      .writeback_chunk_size_kbytes = 2,
 +      .line_buffer_size_bits = 589824,
 +      .max_line_buffer_lines = 12,
 +      .IsLineBufferBppFixed = 0,
 +      .LineBufferFixedBpp = -1,
 +      .writeback_luma_buffer_size_kbytes = 12,
 +      .writeback_chroma_buffer_size_kbytes = 8,
 +      .max_num_dpp = 4,
 +      .max_num_wb = 2,
 +      .max_dchub_pscl_bw_pix_per_clk = 4,
 +      .max_pscl_lb_bw_pix_per_clk = 2,
 +      .max_lb_vscl_bw_pix_per_clk = 4,
 +      .max_vscl_hscl_bw_pix_per_clk = 4,
 +      .max_hscl_ratio = 4,
 +      .max_vscl_ratio = 4,
 +      .hscl_mults = 4,
 +      .vscl_mults = 4,
 +      .max_hscl_taps = 8,
 +      .max_vscl_taps = 8,
 +      .dispclk_ramp_margin_percent = 1,
 +      .underscan_factor = 1.10,
 +      .min_vblank_lines = 14,
 +      .dppclk_delay_subtotal = 90,
 +      .dispclk_delay_subtotal = 42,
 +      .dcfclk_cstate_latency = 10,
 +      .max_inter_dcn_tile_repeaters = 8,
 +      .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0,
 +      .bug_forcing_LC_req_same_size_fixed = 0,
 +};
 +
 +const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = {
 +      .sr_exit_time_us = 9.0,
 +      .sr_enter_plus_exit_time_us = 11.0,
 +      .urgent_latency_us = 4.0,
 +      .writeback_latency_us = 12.0,
 +      .ideal_dram_bw_after_urgent_percent = 80.0,
 +      .max_request_size_bytes = 256,
 +      .downspread_percent = 0.5,
 +      .dram_page_open_time_ns = 50.0,
 +      .dram_rw_turnaround_time_ns = 17.5,
 +      .dram_return_buffer_per_channel_bytes = 8192,
 +      .round_trip_ping_latency_dcfclk_cycles = 128,
 +      .urgent_out_of_order_return_per_channel_bytes = 256,
 +      .channel_interleave_bytes = 256,
 +      .num_banks = 8,
 +      .num_chans = 2,
 +      .vmm_page_size_bytes = 4096,
 +      .dram_clock_change_latency_us = 17.0,
 +      .writeback_dram_clock_change_latency_us = 23.0,
 +      .return_bus_width_bytes = 64,
 +};
  
  #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
        #define mmDP0_DP_DPHY_INTERNAL_CTRL             0x210f
@@@ -357,21 -294,6 +357,21 @@@ static const struct dcn10_opp_mask opp_
                OPP_MASK_SH_LIST_DCN10(_MASK),
  };
  
 +#define aux_engine_regs(id)\
 +[id] = {\
 +      AUX_COMMON_REG_LIST(id), \
 +      .AUX_RESET_MASK = 0 \
 +}
 +
 +static const struct dce110_aux_registers aux_engine_regs[] = {
 +              aux_engine_regs(0),
 +              aux_engine_regs(1),
 +              aux_engine_regs(2),
 +              aux_engine_regs(3),
 +              aux_engine_regs(4),
 +              aux_engine_regs(5)
 +};
 +
  #define tf_regs(id)\
  [id] = {\
        TF_REG_LIST_DCN10(id),\
@@@ -495,14 -417,13 +495,14 @@@ static const struct dce110_clk_src_mas
  
  static const struct resource_caps res_cap = {
                .num_timing_generator = 4,
 +              .num_opp = 4,
                .num_video_plane = 4,
                .num_audio = 4,
                .num_stream_encoder = 4,
                .num_pll = 4,
  };
  
 -static const struct dc_debug debug_defaults_drv = {
 +static const struct dc_debug_options debug_defaults_drv = {
                .sanity_checks = true,
                .disable_dmcu = true,
                .force_abm_enable = false,
                 */
                .min_disp_clk_khz = 100000,
  
 -              .disable_pplib_clock_request = true,
 +              .disable_pplib_clock_request = false,
                .disable_pplib_wm_range = false,
                .pplib_wm_report_mode = WM_REPORT_DEFAULT,
                .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
                .max_downscale_src_width = 3840,
  };
  
 -static const struct dc_debug debug_defaults_diags = {
 +static const struct dc_debug_options debug_defaults_diags = {
                .disable_dmcu = true,
                .force_abm_enable = false,
                .timing_trace = true,
@@@ -594,23 -515,6 +594,23 @@@ static struct output_pixel_processor *d
        return &opp->base;
  }
  
 +struct engine *dcn10_aux_engine_create(
 +      struct dc_context *ctx,
 +      uint32_t inst)
 +{
 +      struct aux_engine_dce110 *aux_engine =
 +              kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
 +
 +      if (!aux_engine)
 +              return NULL;
 +
 +      dce110_aux_engine_construct(aux_engine, ctx, inst,
 +                                  SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
 +                                  &aux_engine_regs[inst]);
 +
 +      return &aux_engine->base.base;
 +}
 +
  static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
  {
        struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@@ -776,7 -680,6 +776,7 @@@ static struct dce_hwseq *dcn10_hwseq_cr
                hws->masks = &hwseq_mask;
                hws->wa.DEGVIDCN10_253 = true;
                hws->wa.false_optc_underflow = true;
 +              hws->wa.DEGVIDCN10_254 = true;
        }
        return hws;
  }
@@@ -859,9 -762,6 +859,9 @@@ static void destruct(struct dcn10_resou
                        kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
                        pool->base.timing_generators[i] = NULL;
                }
 +
 +              if (pool->base.engines[i] != NULL)
 +                      pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
        }
  
        for (i = 0; i < pool->base.stream_enc_count; i++)
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
  
 -      if (pool->base.display_clock != NULL)
 -              dce_disp_clk_destroy(&pool->base.display_clock);
 +      if (pool->base.dccg != NULL)
 +              dce_dccg_destroy(&pool->base.dccg);
  
        kfree(pool->base.pp_smu);
  }
@@@ -1071,11 -971,11 +1071,11 @@@ static enum dc_status dcn10_validate_pl
        return DC_OK;
  }
  
 -static struct dc_cap_funcs cap_funcs = {
 +static const struct dc_cap_funcs cap_funcs = {
        .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
  };
  
 -static struct resource_funcs dcn10_res_pool_funcs = {
 +static const struct resource_funcs dcn10_res_pool_funcs = {
        .destroy = dcn10_destroy_resource_pool,
        .link_enc_create = dcn10_link_encoder_create,
        .validate_bandwidth = dcn_validate_bandwidth,
@@@ -1127,6 -1027,8 +1127,8 @@@ static bool construct
        dc->caps.max_slave_planes = 1;
        dc->caps.is_apu = true;
        dc->caps.post_blend_color_processing = false;
+       /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+       dc->caps.force_dp_tps4_for_cp2520 = true;
  
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
                }
        }
  
 -      pool->base.display_clock = dce120_disp_clk_create(ctx);
 -      if (pool->base.display_clock == NULL) {
 +      pool->base.dccg = dcn1_dccg_create(ctx);
 +      if (pool->base.dccg == NULL) {
                dm_error("DC: failed to create display clock!\n");
                BREAK_TO_DEBUGGER();
                goto fail;
                        goto fail;
                }
  
 +              pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
 +              if (pool->base.engines[i] == NULL) {
 +                      BREAK_TO_DEBUGGER();
 +                      dm_error(
 +                              "DC:failed to create aux engine!!\n");
 +                      goto fail;
 +              }
 +
                /* check next valid pipe */
                j++;
        }
index f4f366b26fd151a24046ee2fbb81884e7e939db9,29914700ee82f5d8d09de71ff8fadb7b28bfaca3..cb3a5b1737c888fc040825dff14021f916447493
@@@ -224,8 -224,14 +224,10 @@@ static int append_vbios_pptable(struct 
        ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
        ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
  
 -      /* 0xFFFF will disable the ACG feature */
 -      if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
 -              ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
 -              ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
 -      }
 +      ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
  
+       ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
        return 0;
  }
  
index 207532c05eb86578d2f5ad94c0454e832d1a7c34,540b59fb41038fcbed3f16528edf9f85e3976071..9b2720b41571f245a1ba5ad677bb0566d95ca207
@@@ -49,12 -49,12 +49,12 @@@ static int etnaviv_open(struct drm_devi
  
        for (i = 0; i < ETNA_MAX_PIPES; i++) {
                struct etnaviv_gpu *gpu = priv->gpu[i];
 +              struct drm_sched_rq *rq;
  
                if (gpu) {
 -                      drm_sched_entity_init(&gpu->sched,
 -                              &ctx->sched_entity[i],
 -                              &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
 -                              NULL);
 +                      rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 +                      drm_sched_entity_init(&ctx->sched_entity[i],
 +                                            &rq, 1, NULL);
                        }
        }
  
@@@ -78,7 -78,8 +78,7 @@@ static void etnaviv_postclose(struct dr
                                gpu->lastctx = NULL;
                        mutex_unlock(&gpu->lock);
  
 -                      drm_sched_entity_fini(&gpu->sched,
 -                                            &ctx->sched_entity[i]);
 +                      drm_sched_entity_destroy(&ctx->sched_entity[i]);
                }
        }
  
@@@ -630,8 -631,11 +630,11 @@@ static struct platform_driver etnaviv_p
        },
  };
  
+ static struct platform_device *etnaviv_drm;
  static int __init etnaviv_init(void)
  {
+       struct platform_device *pdev;
        int ret;
        struct device_node *np;
  
  
        ret = platform_driver_register(&etnaviv_platform_driver);
        if (ret != 0)
-               platform_driver_unregister(&etnaviv_gpu_driver);
+               goto unregister_gpu_driver;
  
        /*
         * If the DT contains at least one available GPU device, instantiate
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-               platform_device_register_simple("etnaviv", -1, NULL, 0);
+               pdev = platform_device_register_simple("etnaviv", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev)) {
+                       ret = PTR_ERR(pdev);
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               etnaviv_drm = pdev;
                of_node_put(np);
                break;
        }
  
+       return 0;
+ unregister_platform_driver:
+       platform_driver_unregister(&etnaviv_platform_driver);
+ unregister_gpu_driver:
+       platform_driver_unregister(&etnaviv_gpu_driver);
        return ret;
  }
  module_init(etnaviv_init);
  
  static void __exit etnaviv_exit(void)
  {
-       platform_driver_unregister(&etnaviv_gpu_driver);
+       platform_device_unregister(etnaviv_drm);
        platform_driver_unregister(&etnaviv_platform_driver);
+       platform_driver_unregister(&etnaviv_gpu_driver);
  }
  module_exit(etnaviv_exit);
  
index 590e44b0d96396090755f8bf5602fd38e2aeac3e,50d6b88cb7aab3f99025829e1f544aae957d6d7f..3949f18afb3527e8b18fdcffc717af46ba45276b
@@@ -10,6 -10,7 +10,7 @@@
  #include "etnaviv_gem.h"
  #include "etnaviv_gpu.h"
  #include "etnaviv_sched.h"
+ #include "state.xml.h"
  
  static int etnaviv_job_hang_limit = 0;
  module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@@ -85,6 -86,29 +86,29 @@@ static void etnaviv_sched_timedout_job(
  {
        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
        struct etnaviv_gpu *gpu = submit->gpu;
+       u32 dma_addr;
+       int change;
+       /*
+        * If the GPU managed to complete this jobs fence, the timout is
+        * spurious. Bail out.
+        */
+       if (fence_completed(gpu, submit->out_fence->seqno))
+               return;
+       /*
+        * If the GPU is still making forward progress on the front-end (which
+        * should never loop) we shift out the timeout to give it a chance to
+        * finish the job.
+        */
+       dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+       change = dma_addr - gpu->hangcheck_dma_addr;
+       if (change < 0 || change > 16) {
+               gpu->hangcheck_dma_addr = dma_addr;
+               schedule_delayed_work(&sched_job->work_tdr,
+                                     sched_job->sched->timeout);
+               return;
+       }
  
        /* block scheduler */
        kthread_park(gpu->sched.thread);
@@@ -118,8 -142,8 +142,8 @@@ int etnaviv_sched_push_job(struct drm_s
  {
        int ret;
  
 -      ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
 -                               sched_entity, submit->cmdbuf.ctx);
 +      ret = drm_sched_job_init(&submit->sched_job, sched_entity,
 +                               submit->cmdbuf.ctx);
        if (ret)
                return ret;
  
index 0651e63b25fb7bc8a3c748810f2837a690504c1b,7f562410f9cf8aab7c47462fcbe7fd07fea68c73..45e89b1e048183164e218c5aa868dca458ab93a4
@@@ -172,7 -172,6 +172,7 @@@ struct decode_info 
  #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
  #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
  #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
 +#define OP_MEDIA_POOL_STATE                     OP_3D_MEDIA(0x2, 0x0, 0x5)
  
  #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
  #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
@@@ -863,6 -862,7 +863,7 @@@ static int cmd_reg_handler(struct parse
  {
        struct intel_vgpu *vgpu = s->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
+       u32 ctx_sr_ctl;
  
        if (offset + 4 > gvt->device_info.mmio_size) {
                gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
                patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
        }
  
+       /* TODO
+        * Right now only scan LRI command on KBL and in inhibit context.
+        * It's good enough to support initializing mmio by lri command in
+        * vgpu inhibit context on KBL.
+        */
+       if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+                       intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+                       !strncmp(cmd, "lri", 3)) {
+               intel_gvt_hypervisor_read_gpa(s->vgpu,
+                       s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+               /* check inhibit context */
+               if (ctx_sr_ctl & 1) {
+                       u32 data = cmd_val(s, index + 1);
+                       if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+                               intel_vgpu_mask_mmio_write(vgpu,
+                                                       offset, &data, 4);
+                       else
+                               vgpu_vreg(vgpu, offset) = data;
+               }
+       }
        /* TODO: Update the global mask if this MMIO is a masked-MMIO */
        intel_gvt_mmio_set_cmd_accessed(gvt, offset);
        return 0;
@@@ -1257,9 -1279,7 +1280,9 @@@ static int gen8_check_mi_display_flip(s
        if (!info->async_flip)
                return 0;
  
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 +      if (IS_SKYLAKE(dev_priv)
 +              || IS_KABYLAKE(dev_priv)
 +              || IS_BROXTON(dev_priv)) {
                stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
                tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
                                GENMASK(12, 10)) >> 10;
@@@ -1287,9 -1307,7 +1310,9 @@@ static int gen8_update_plane_mmio_from_
  
        set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
                      info->surf_val << 12);
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
 +      if (IS_SKYLAKE(dev_priv)
 +              || IS_KABYLAKE(dev_priv)
 +              || IS_BROXTON(dev_priv)) {
                set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
                              info->stride_val);
                set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@@ -1313,9 -1331,7 +1336,9 @@@ static int decode_mi_display_flip(struc
  
        if (IS_BROADWELL(dev_priv))
                return gen8_decode_mi_display_flip(s, info);
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 +      if (IS_SKYLAKE(dev_priv)
 +              || IS_KABYLAKE(dev_priv)
 +              || IS_BROXTON(dev_priv))
                return skl_decode_mi_display_flip(s, info);
  
        return -ENODEV;
  static int check_mi_display_flip(struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
  {
 -      struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 -
 -      if (IS_BROADWELL(dev_priv)
 -              || IS_SKYLAKE(dev_priv)
 -              || IS_KABYLAKE(dev_priv))
 -              return gen8_check_mi_display_flip(s, info);
 -      return -ENODEV;
 +      return gen8_check_mi_display_flip(s, info);
  }
  
  static int update_plane_mmio_from_mi_display_flip(
                struct parser_exec_state *s,
                struct mi_display_flip_command_info *info)
  {
 -      struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 -
 -      if (IS_BROADWELL(dev_priv)
 -              || IS_SKYLAKE(dev_priv)
 -              || IS_KABYLAKE(dev_priv))
 -              return gen8_update_plane_mmio_from_mi_display_flip(s, info);
 -      return -ENODEV;
 +      return gen8_update_plane_mmio_from_mi_display_flip(s, info);
  }
  
  static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
@@@ -1610,10 -1638,15 +1633,10 @@@ static int copy_gma_to_hva(struct intel
   */
  static int batch_buffer_needs_scan(struct parser_exec_state *s)
  {
 -      struct intel_gvt *gvt = s->vgpu->gvt;
 -
 -      if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
 -              || IS_KABYLAKE(gvt->dev_priv)) {
 -              /* BDW decides privilege based on address space */
 -              if (cmd_val(s, 0) & (1 << 8) &&
 +      /* Decide privilege based on address space */
 +      if (cmd_val(s, 0) & (1 << 8) &&
                        !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
 -                      return 0;
 -      }
 +              return 0;
        return 1;
  }
  
@@@ -2339,9 -2372,6 +2362,9 @@@ static struct cmd_info cmd_info[] = 
        {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
                0, 16, NULL},
  
 +      {"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
 +              0, 16, NULL},
 +
        {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
  
        {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
index 6ee50cb328f8efb4f8d5c94ffc731ca7195884dc,4b072ade8c389372bcf1e161f6257305dec043bd..3019dbc39aef22573fa04b7120fc9ad021d83c15
@@@ -171,29 -171,6 +171,29 @@@ static void emulate_monitor_status_chan
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        int pipe;
  
 +      if (IS_BROXTON(dev_priv)) {
 +              vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
 +                      BXT_DE_PORT_HP_DDIB |
 +                      BXT_DE_PORT_HP_DDIC);
 +
 +              if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
 +                      vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
 +                              BXT_DE_PORT_HP_DDIA;
 +              }
 +
 +              if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
 +                      vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
 +                              BXT_DE_PORT_HP_DDIB;
 +              }
 +
 +              if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
 +                      vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
 +                              BXT_DE_PORT_HP_DDIC;
 +              }
 +
 +              return;
 +      }
 +
        vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
                        SDE_PORTC_HOTPLUG_CPT |
                        SDE_PORTD_HOTPLUG_CPT);
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_B << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_C << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_D << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
        for_each_pipe(dev_priv, pipe) {
                vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
                vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
 -              vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
 -              vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
 +              vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
 +              vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
        }
  
        vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
@@@ -360,28 -337,26 +360,28 @@@ void intel_gvt_check_vblank_emulation(s
        struct intel_gvt_irq *irq = &gvt->irq;
        struct intel_vgpu *vgpu;
        int pipe, id;
 +      int found = false;
  
 -      if (WARN_ON(!mutex_is_locked(&gvt->lock)))
 -              return;
 -
 +      mutex_lock(&gvt->lock);
        for_each_active_vgpu(gvt, vgpu, id) {
                for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
 -                      if (pipe_is_enabled(vgpu, pipe))
 -                              goto out;
 +                      if (pipe_is_enabled(vgpu, pipe)) {
 +                              found = true;
 +                              break;
 +                      }
                }
 +              if (found)
 +                      break;
        }
  
        /* all the pipes are disabled */
 -      hrtimer_cancel(&irq->vblank_timer.timer);
 -      return;
 -
 -out:
 -      hrtimer_start(&irq->vblank_timer.timer,
 -              ktime_add_ns(ktime_get(), irq->vblank_timer.period),
 -              HRTIMER_MODE_ABS);
 -
 +      if (!found)
 +              hrtimer_cancel(&irq->vblank_timer.timer);
 +      else
 +              hrtimer_start(&irq->vblank_timer.timer,
 +                      ktime_add_ns(ktime_get(), irq->vblank_timer.period),
 +                      HRTIMER_MODE_ABS);
 +      mutex_unlock(&gvt->lock);
  }
  
  static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
@@@ -418,10 -393,8 +418,10 @@@ static void emulate_vblank(struct intel
  {
        int pipe;
  
 +      mutex_lock(&vgpu->vgpu_lock);
        for_each_pipe(vgpu->gvt->dev_priv, pipe)
                emulate_vblank_on_pipe(vgpu, pipe);
 +      mutex_unlock(&vgpu->vgpu_lock);
  }
  
  /**
@@@ -436,10 -409,11 +436,10 @@@ void intel_gvt_emulate_vblank(struct in
        struct intel_vgpu *vgpu;
        int id;
  
 -      if (WARN_ON(!mutex_is_locked(&gvt->lock)))
 -              return;
 -
 +      mutex_lock(&gvt->lock);
        for_each_active_vgpu(gvt, vgpu, id)
                emulate_vblank(vgpu);
 +      mutex_unlock(&gvt->lock);
  }
  
  /**
index 39980dfbbebde6ff96942eb2ee3bd6875bc880ef,4efec8fa6c1d30aa9c7853131ad299fb8bbef169..00aad8164dec2037f8fc8709298aa3dba2c8c0fa
@@@ -216,22 -216,16 +216,22 @@@ static struct gtt_type_table_entry gtt_
                        GTT_TYPE_PPGTT_PDE_PT,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_PPGTT_PTE_2M_ENTRY),
 +      /* We take IPS bit as 'PSE' for PTE level. */
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_INVALID,
 -                      GTT_TYPE_INVALID),
 +                      GTT_TYPE_PPGTT_PTE_64K_ENTRY),
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
                        GTT_TYPE_PPGTT_PTE_PT,
                        GTT_TYPE_INVALID,
 -                      GTT_TYPE_INVALID),
 +                      GTT_TYPE_PPGTT_PTE_64K_ENTRY),
 +      GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
 +                      GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 +                      GTT_TYPE_PPGTT_PTE_PT,
 +                      GTT_TYPE_INVALID,
 +                      GTT_TYPE_PPGTT_PTE_64K_ENTRY),
        GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
                        GTT_TYPE_PPGTT_PDE_ENTRY,
                        GTT_TYPE_PPGTT_PDE_PT,
@@@ -345,14 -339,8 +345,14 @@@ static inline int gtt_set_entry64(void 
  
  #define ADDR_1G_MASK  GENMASK_ULL(GTT_HAW - 1, 30)
  #define ADDR_2M_MASK  GENMASK_ULL(GTT_HAW - 1, 21)
 +#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
  #define ADDR_4K_MASK  GENMASK_ULL(GTT_HAW - 1, 12)
  
 +#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
 +#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
 +
 +#define GTT_64K_PTE_STRIDE 16
 +
  static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
  {
        unsigned long pfn;
                pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
        else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
                pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
 +      else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
 +              pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
        else
                pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
        return pfn;
@@@ -376,9 -362,6 +376,9 @@@ static void gen8_gtt_set_pfn(struct int
        } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
                e->val64 &= ~ADDR_2M_MASK;
                pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
 +      } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
 +              e->val64 &= ~ADDR_64K_MASK;
 +              pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
        } else {
                e->val64 &= ~ADDR_4K_MASK;
                pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
  
  static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
  {
 -      /* Entry doesn't have PSE bit. */
 -      if (get_pse_type(e->type) == GTT_TYPE_INVALID)
 -              return false;
 +      return !!(e->val64 & _PAGE_PSE);
 +}
  
 -      e->type = get_entry_type(e->type);
 -      if (!(e->val64 & _PAGE_PSE))
 +static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
 +{
 +      if (gen8_gtt_test_pse(e)) {
 +              switch (e->type) {
 +              case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
 +                      e->val64 &= ~_PAGE_PSE;
 +                      e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
 +                      break;
 +              case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
 +                      e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
 +                      e->val64 &= ~_PAGE_PSE;
 +                      break;
 +              default:
 +                      WARN_ON(1);
 +              }
 +      }
 +}
 +
 +static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
 +{
 +      if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
                return false;
  
 -      e->type = get_pse_type(e->type);
 -      return true;
 +      return !!(e->val64 & GEN8_PDE_IPS_64K);
 +}
 +
 +static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
 +{
 +      if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
 +              return;
 +
 +      e->val64 &= ~GEN8_PDE_IPS_64K;
  }
  
  static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
@@@ -450,21 -408,6 +450,21 @@@ static void gtt_entry_set_present(struc
        e->val64 |= _PAGE_PRESENT;
  }
  
 +static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
 +{
 +      return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
 +}
 +
 +static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
 +{
 +      e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
 +}
 +
 +static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
 +{
 +      e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
 +}
 +
  /*
   * Per-platform GMA routines.
   */
@@@ -497,12 -440,6 +497,12 @@@ static struct intel_gvt_gtt_pte_ops gen
        .set_present = gtt_entry_set_present,
        .test_present = gen8_gtt_test_present,
        .test_pse = gen8_gtt_test_pse,
 +      .clear_pse = gen8_gtt_clear_pse,
 +      .clear_ips = gen8_gtt_clear_ips,
 +      .test_ips = gen8_gtt_test_ips,
 +      .clear_64k_splited = gen8_gtt_clear_64k_splited,
 +      .set_64k_splited = gen8_gtt_set_64k_splited,
 +      .test_64k_splited = gen8_gtt_test_64k_splited,
        .get_pfn = gen8_gtt_get_pfn,
        .set_pfn = gen8_gtt_set_pfn,
  };
@@@ -516,27 -453,6 +516,27 @@@ static struct intel_gvt_gtt_gma_ops gen
        .gma_to_pml4_index = gen8_gma_to_pml4_index,
  };
  
 +/* Update entry type per pse and ips bit. */
 +static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
 +      struct intel_gvt_gtt_entry *entry, bool ips)
 +{
 +      switch (entry->type) {
 +      case GTT_TYPE_PPGTT_PDE_ENTRY:
 +      case GTT_TYPE_PPGTT_PDP_ENTRY:
 +              if (pte_ops->test_pse(entry))
 +                      entry->type = get_pse_type(entry->type);
 +              break;
 +      case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
 +              if (ips)
 +                      entry->type = get_pse_type(entry->type);
 +              break;
 +      default:
 +              GEM_BUG_ON(!gtt_type_is_entry(entry->type));
 +      }
 +
 +      GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
 +}
 +
  /*
   * MM helpers.
   */
@@@ -552,7 -468,8 +552,7 @@@ static void _ppgtt_get_root_entry(struc
        pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
                           mm->ppgtt_mm.shadow_pdps,
                           entry, index, false, 0, mm->vgpu);
 -
 -      pte_ops->test_pse(entry);
 +      update_entry_type_for_real(pte_ops, entry, false);
  }
  
  static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
@@@ -657,8 -574,7 +657,8 @@@ static inline int ppgtt_spt_get_entry
        if (ret)
                return ret;
  
 -      ops->test_pse(e);
 +      update_entry_type_for_real(ops, e, guest ?
 +                                 spt->guest_page.pde_ips : false);
  
        gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
                    type, e->type, index, e->val64);
@@@ -737,12 -653,10 +737,12 @@@ static void ppgtt_free_spt(struct intel
  
        radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
  
 -      if (spt->guest_page.oos_page)
 -              detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
 +      if (spt->guest_page.gfn) {
 +              if (spt->guest_page.oos_page)
 +                      detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
  
 -      intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
 +              intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
 +      }
  
        list_del_init(&spt->post_shadow_list);
        free_spt(spt);
@@@ -803,9 -717,8 +803,9 @@@ static inline struct intel_vgpu_ppgtt_s
  
  static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
  
 +/* Allocate shadow page table without guest page. */
  static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
 -              struct intel_vgpu *vgpu, int type, unsigned long gfn)
 +              struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
  {
        struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
@@@ -840,12 -753,26 +840,12 @@@ retry
        spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
        spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
  
 -      /*
 -       * Init guest_page.
 -       */
 -      spt->guest_page.type = type;
 -      spt->guest_page.gfn = gfn;
 -
 -      ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
 -                                      ppgtt_write_protection_handler, spt);
 -      if (ret)
 -              goto err_unmap_dma;
 -
        ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
        if (ret)
 -              goto err_unreg_page_track;
 +              goto err_unmap_dma;
  
 -      trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
        return spt;
  
 -err_unreg_page_track:
 -      intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
  err_unmap_dma:
        dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  err_free_spt:
        return ERR_PTR(ret);
  }
  
 +/* Allocate shadow page table associated with specific gfn. */
 +static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
 +              struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
 +              unsigned long gfn, bool guest_pde_ips)
 +{
 +      struct intel_vgpu_ppgtt_spt *spt;
 +      int ret;
 +
 +      spt = ppgtt_alloc_spt(vgpu, type);
 +      if (IS_ERR(spt))
 +              return spt;
 +
 +      /*
 +       * Init guest_page.
 +       */
 +      ret = intel_vgpu_register_page_track(vgpu, gfn,
 +                      ppgtt_write_protection_handler, spt);
 +      if (ret) {
 +              ppgtt_free_spt(spt);
 +              return ERR_PTR(ret);
 +      }
 +
 +      spt->guest_page.type = type;
 +      spt->guest_page.gfn = gfn;
 +      spt->guest_page.pde_ips = guest_pde_ips;
 +
 +      trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
 +
 +      return spt;
 +}
 +
  #define pt_entry_size_shift(spt) \
        ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
  
        (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
  
  #define for_each_present_guest_entry(spt, e, i) \
 -      for (i = 0; i < pt_entries(spt); i++) \
 +      for (i = 0; i < pt_entries(spt); \
 +           i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
                if (!ppgtt_get_guest_entry(spt, e, i) && \
                    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
  
  #define for_each_present_shadow_entry(spt, e, i) \
 -      for (i = 0; i < pt_entries(spt); i++) \
 +      for (i = 0; i < pt_entries(spt); \
 +           i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
                if (!ppgtt_get_shadow_entry(spt, e, i) && \
                    spt->vgpu->gvt->gtt.pte_ops->test_present(e))
  
 -static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
 +#define for_each_shadow_entry(spt, e, i) \
 +      for (i = 0; i < pt_entries(spt); \
 +           i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
 +              if (!ppgtt_get_shadow_entry(spt, e, i))
 +
 +static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
  {
        int v = atomic_read(&spt->refcount);
  
        trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
 -
        atomic_inc(&spt->refcount);
  }
  
 +static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
 +{
 +      int v = atomic_read(&spt->refcount);
 +
 +      trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
 +      return atomic_dec_return(&spt->refcount);
 +}
 +
  static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
  
  static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
@@@ -961,8 -843,7 +961,8 @@@ static inline void ppgtt_invalidate_pte
        pfn = ops->get_pfn(entry);
        type = spt->shadow_page.type;
  
 -      if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
 +      /* Uninitialized spte or unshadowed spte. */
 +      if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
                return;
  
        intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
@@@ -974,11 -855,14 +974,11 @@@ static int ppgtt_invalidate_spt(struct 
        struct intel_gvt_gtt_entry e;
        unsigned long index;
        int ret;
 -      int v = atomic_read(&spt->refcount);
  
        trace_spt_change(spt->vgpu->id, "die", spt,
                        spt->guest_page.gfn, spt->shadow_page.type);
  
 -      trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
 -
 -      if (atomic_dec_return(&spt->refcount) > 0)
 +      if (ppgtt_put_spt(spt) > 0)
                return 0;
  
        for_each_present_shadow_entry(spt, &e, index) {
                        gvt_vdbg_mm("invalidate 4K entry\n");
                        ppgtt_invalidate_pte(spt, &e);
                        break;
 +              case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
 +                      /* We don't setup 64K shadow entry so far. */
 +                      WARN(1, "suspicious 64K gtt entry\n");
 +                      continue;
                case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
 +                      gvt_vdbg_mm("invalidate 2M entry\n");
 +                      continue;
                case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
 -                      WARN(1, "GVT doesn't support 2M/1GB page\n");
 +                      WARN(1, "GVT doesn't support 1GB page\n");
                        continue;
                case GTT_TYPE_PPGTT_PML4_ENTRY:
                case GTT_TYPE_PPGTT_PDP_ENTRY:
@@@ -1021,22 -899,6 +1021,22 @@@ fail
        return ret;
  }
  
 +static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
 +{
 +      struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 +
 +      if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
 +              u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
 +                      GAMW_ECO_ENABLE_64K_IPS_FIELD;
 +
 +              return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
 +      } else if (INTEL_GEN(dev_priv) >= 11) {
 +              /* 64K paging only controlled by IPS bit in PTE now. */
 +              return true;
 +      } else
 +              return false;
 +}
 +
  static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
  
  static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
  {
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
 +      bool ips = false;
        int ret;
  
        GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
  
 +      if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
 +              ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
 +
        spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
 -      if (spt)
 +      if (spt) {
                ppgtt_get_spt(spt);
 -      else {
 +
 +              if (ips != spt->guest_page.pde_ips) {
 +                      spt->guest_page.pde_ips = ips;
 +
 +                      gvt_dbg_mm("reshadow PDE since ips changed\n");
 +                      clear_page(spt->shadow_page.vaddr);
 +                      ret = ppgtt_populate_spt(spt);
 +                      if (ret) {
 +                              ppgtt_put_spt(spt);
 +                              goto err;
 +                      }
 +              }
 +      } else {
                int type = get_next_pt_type(we->type);
  
 -              spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
 +              spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
                if (IS_ERR(spt)) {
                        ret = PTR_ERR(spt);
 -                      goto fail;
 +                      goto err;
                }
  
                ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
                if (ret)
 -                      goto fail;
 +                      goto err_free_spt;
  
                ret = ppgtt_populate_spt(spt);
                if (ret)
 -                      goto fail;
 +                      goto err_free_spt;
  
                trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
                                 spt->shadow_page.type);
        }
        return spt;
 -fail:
 +
 +err_free_spt:
 +      ppgtt_free_spt(spt);
 +err:
        gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
                     spt, we->val64, we->type);
        return ERR_PTR(ret);
@@@ -1105,118 -948,16 +1105,118 @@@ static inline void ppgtt_generate_shado
        se->type = ge->type;
        se->val64 = ge->val64;
  
 +      /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
 +      if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
 +              ops->clear_ips(se);
 +
        ops->set_pfn(se, s->shadow_page.mfn);
  }
  
 +/**
 + * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
 + * negtive if found err.
 + */
 +static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
 +      struct intel_gvt_gtt_entry *entry)
 +{
 +      struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 +      unsigned long pfn;
 +
 +      if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
 +              return 0;
 +
 +      pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
 +      if (pfn == INTEL_GVT_INVALID_ADDR)
 +              return -EINVAL;
 +
 +      return PageTransHuge(pfn_to_page(pfn));
 +}
 +
 +static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
 +      struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
 +      struct intel_gvt_gtt_entry *se)
 +{
 +      struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 +      struct intel_vgpu_ppgtt_spt *sub_spt;
 +      struct intel_gvt_gtt_entry sub_se;
 +      unsigned long start_gfn;
 +      dma_addr_t dma_addr;
 +      unsigned long sub_index;
 +      int ret;
 +
 +      gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
 +
 +      start_gfn = ops->get_pfn(se);
 +
 +      sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
 +      if (IS_ERR(sub_spt))
 +              return PTR_ERR(sub_spt);
 +
 +      for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
 +              ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
 +                              start_gfn + sub_index, PAGE_SIZE, &dma_addr);
 +              if (ret) {
 +                      ppgtt_invalidate_spt(spt);
 +                      return ret;
 +              }
 +              sub_se.val64 = se->val64;
 +
 +              /* Copy the PAT field from PDE. */
 +              sub_se.val64 &= ~_PAGE_PAT;
 +              sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
 +
 +              ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
 +              ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
 +      }
 +
 +      /* Clear dirty field. */
 +      se->val64 &= ~_PAGE_DIRTY;
 +
 +      ops->clear_pse(se);
 +      ops->clear_ips(se);
 +      ops->set_pfn(se, sub_spt->shadow_page.mfn);
 +      ppgtt_set_shadow_entry(spt, se, index);
 +      return 0;
 +}
 +
 +static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
 +      struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
 +      struct intel_gvt_gtt_entry *se)
 +{
 +      struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
 +      struct intel_gvt_gtt_entry entry = *se;
 +      unsigned long start_gfn;
 +      dma_addr_t dma_addr;
 +      int i, ret;
 +
 +      gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
 +
 +      GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
 +
 +      start_gfn = ops->get_pfn(se);
 +
 +      entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
 +      ops->set_64k_splited(&entry);
 +
 +      for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
 +              ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
 +                                      start_gfn + i, PAGE_SIZE, &dma_addr);
 +              if (ret)
 +                      return ret;
 +
 +              ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
 +              ppgtt_set_shadow_entry(spt, &entry, index + i);
 +      }
 +      return 0;
 +}
 +
  static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
        struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
        struct intel_gvt_gtt_entry *ge)
  {
        struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry se = *ge;
 -      unsigned long gfn;
 +      unsigned long gfn, page_size = PAGE_SIZE;
        dma_addr_t dma_addr;
        int ret;
  
        case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
                gvt_vdbg_mm("shadow 4K gtt entry\n");
                break;
 +      case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
 +              gvt_vdbg_mm("shadow 64K gtt entry\n");
 +              /*
 +               * The layout of 64K page is special, the page size is
 +               * controlled by uper PDE. To be simple, we always split
 +               * 64K page to smaller 4K pages in shadow PT.
 +               */
 +              return split_64KB_gtt_entry(vgpu, spt, index, &se);
        case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
 +              gvt_vdbg_mm("shadow 2M gtt entry\n");
 +              ret = is_2MB_gtt_possible(vgpu, ge);
 +              if (ret == 0)
 +                      return split_2MB_gtt_entry(vgpu, spt, index, &se);
 +              else if (ret < 0)
 +                      return ret;
 +              page_size = I915_GTT_PAGE_SIZE_2M;
 +              break;
        case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
 -              gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
 +              gvt_vgpu_err("GVT doesn't support 1GB entry\n");
                return -EINVAL;
        default:
                GEM_BUG_ON(1);
        };
  
        /* direct shadow */
 -      ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
 +      ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
 +                                                    &dma_addr);
        if (ret)
                return -ENXIO;
  
@@@ -1338,12 -1062,8 +1338,12 @@@ static int ppgtt_handle_guest_entry_rem
                ret = ppgtt_invalidate_spt(s);
                if (ret)
                        goto fail;
 -      } else
 +      } else {
 +              /* We don't setup 64K shadow entry so far. */
 +              WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
 +                   "suspicious 64K entry\n");
                ppgtt_invalidate_pte(spt, se);
 +      }
  
        return 0;
  fail:
@@@ -1566,7 -1286,7 +1566,7 @@@ static int ppgtt_handle_guest_write_pag
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry old_se;
        int new_present;
 -      int ret;
 +      int i, ret;
  
        new_present = ops->test_present(we);
  
                goto fail;
  
        if (!new_present) {
 -              ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
 -              ppgtt_set_shadow_entry(spt, &old_se, index);
 +              /* For 64KB splited entries, we need clear them all. */
 +              if (ops->test_64k_splited(&old_se) &&
 +                  !(index % GTT_64K_PTE_STRIDE)) {
 +                      gvt_vdbg_mm("remove splited 64K shadow entries\n");
 +                      for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
 +                              ops->clear_64k_splited(&old_se);
 +                              ops->set_pfn(&old_se,
 +                                      vgpu->gtt.scratch_pt[type].page_mfn);
 +                              ppgtt_set_shadow_entry(spt, &old_se, index + i);
 +                      }
 +              } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
 +                         old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
 +                      ops->clear_pse(&old_se);
 +                      ops->set_pfn(&old_se,
 +                                   vgpu->gtt.scratch_pt[type].page_mfn);
 +                      ppgtt_set_shadow_entry(spt, &old_se, index);
 +              } else {
 +                      ops->set_pfn(&old_se,
 +                                   vgpu->gtt.scratch_pt[type].page_mfn);
 +                      ppgtt_set_shadow_entry(spt, &old_se, index);
 +              }
        }
  
        return 0;
@@@ -1690,17 -1391,7 +1690,17 @@@ static int ppgtt_handle_guest_write_pag
  
        ppgtt_get_guest_entry(spt, &we, index);
  
 -      ops->test_pse(&we);
 +      /*
 +       * For page table which has 64K gtt entry, only PTE#0, PTE#16,
 +       * PTE#32, ... PTE#496 are used. Unused PTEs update should be
 +       * ignored.
 +       */
 +      if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
 +          (index % GTT_64K_PTE_STRIDE)) {
 +              gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
 +                          index);
 +              return 0;
 +      }
  
        if (bytes == info->gtt_entry_size) {
                ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
@@@ -1901,6 -1592,7 +1901,7 @@@ static struct intel_vgpu_mm *intel_vgpu
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
        }
+       mm->ggtt_mm.last_partial_off = -1UL;
  
        return mm;
  }
@@@ -1925,6 -1617,7 +1926,7 @@@ void _intel_vgpu_mm_release(struct kre
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
+               mm->ggtt_mm.last_partial_off = -1UL;
        }
  
        vgpu_free_mm(mm);
@@@ -2177,6 -1870,62 +2179,62 @@@ static int emulate_ggtt_mmio_write(stru
        memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
                        bytes);
  
+       /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+        * write, we assume the two 4 bytes writes are consecutive.
+        * Otherwise, we abort and report error
+        */
+       if (bytes < info->gtt_entry_size) {
+               if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+                       /* the first partial part*/
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+                       return 0;
+               } else if ((g_gtt_index ==
+                               (ggtt_mm->ggtt_mm.last_partial_off >>
+                               info->gtt_entry_size_shift)) &&
+                       (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+                       /* the second partial part */
+                       int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+                               (info->gtt_entry_size - 1);
+                       memcpy((void *)&e.val64 + last_off,
+                               (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+                               last_off, bytes);
+                       ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+               } else {
+                       int last_offset;
+                       gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+                                       ggtt_mm->ggtt_mm.last_partial_off, off,
+                                       bytes, info->gtt_entry_size);
+                       /* set host ggtt entry to scratch page and clear
+                        * virtual ggtt entry as not present for last
+                        * partially write offset
+                        */
+                       last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+                                       (~(info->gtt_entry_size - 1));
+                       ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate_pte(vgpu, &m);
+                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+                       ops->clear_present(&m);
+                       ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate(gvt->dev_priv);
+                       ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+                       ops->clear_present(&e);
+                       ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+                       return 0;
+               }
+       }
        if (ops->test_present(&e)) {
                gfn = ops->get_pfn(&e);
                m = e;
                }
  
                ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
 -                                                            &dma_addr);
 +                                                      PAGE_SIZE, &dma_addr);
                if (ret) {
                        gvt_vgpu_err("fail to populate guest ggtt entry\n");
                        /* guest driver may read/write the entry when partial
@@@ -2282,7 -2031,7 +2340,7 @@@ static int alloc_scratch_pages(struct i
         * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
 -      if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
 +      if (type > GTT_TYPE_PPGTT_PTE_PT) {
                struct intel_gvt_gtt_entry se;
  
                memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
@@@ -2566,8 -2315,13 +2624,8 @@@ int intel_gvt_init_gtt(struct intel_gv
  
        gvt_dbg_core("init gtt\n");
  
 -      if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
 -              || IS_KABYLAKE(gvt->dev_priv)) {
 -              gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
 -              gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
 -      } else {
 -              return -ENODEV;
 -      }
 +      gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
 +      gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
  
        page = (void *)get_zeroed_page(GFP_KERNEL);
        if (!page) {
index b7bf68cc8418007fea2200bd6c09781a8493b1da,97e62647418a0a48fa1d1da6ace5a800f7d0de23..7a9b36176efb7fca7198527512f8873ad21248cb
@@@ -63,12 -63,6 +63,12 @@@ struct intel_gvt_gtt_pte_ops 
        void (*clear_present)(struct intel_gvt_gtt_entry *e);
        void (*set_present)(struct intel_gvt_gtt_entry *e);
        bool (*test_pse)(struct intel_gvt_gtt_entry *e);
 +      void (*clear_pse)(struct intel_gvt_gtt_entry *e);
 +      bool (*test_ips)(struct intel_gvt_gtt_entry *e);
 +      void (*clear_ips)(struct intel_gvt_gtt_entry *e);
 +      bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
 +      void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
 +      void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
        void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
        unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
  };
@@@ -101,7 -95,6 +101,7 @@@ typedef enum 
        GTT_TYPE_GGTT_PTE,
  
        GTT_TYPE_PPGTT_PTE_4K_ENTRY,
 +      GTT_TYPE_PPGTT_PTE_64K_ENTRY,
        GTT_TYPE_PPGTT_PTE_2M_ENTRY,
        GTT_TYPE_PPGTT_PTE_1G_ENTRY,
  
@@@ -157,6 -150,8 +157,8 @@@ struct intel_vgpu_mm 
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
+                       unsigned long last_partial_off;
+                       u64 last_partial_data;
                } ggtt_mm;
        };
  };
@@@ -227,7 -222,6 +229,7 @@@ struct intel_vgpu_ppgtt_spt 
  
        struct {
                intel_gvt_gtt_type_t type;
 +              bool pde_ips; /* for 64KB PTEs */
                void *vaddr;
                struct page *page;
                unsigned long mfn;
  
        struct {
                intel_gvt_gtt_type_t type;
 +              bool pde_ips; /* for 64KB PTEs */
                unsigned long gfn;
                unsigned long write_cnt;
                struct intel_vgpu_oos_page *oos_page;
index de2a3a2580be682edeea35756af75183dfdbf066,858967daf04b21792be2f578c9b27a6e75ba8277..9a967152277494ccbf1dbd5ac55b4b7f5b842eba
@@@ -170,18 -170,12 +170,18 @@@ struct intel_vgpu_submission 
  
  struct intel_vgpu {
        struct intel_gvt *gvt;
 +      struct mutex vgpu_lock;
        int id;
        unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
        bool active;
        bool pv_notified;
        bool failsafe;
        unsigned int resetting_eng;
 +
 +      /* Both sched_data and sched_ctl can be seen a part of the global gvt
 +       * scheduler structure. So below 2 vgpu data are protected
 +       * by sched_lock, not vgpu_lock.
 +       */
        void *sched_data;
        struct vgpu_sched_ctl sched_ctl;
  
@@@ -274,6 -268,8 +274,8 @@@ struct intel_gvt_mmio 
  #define F_CMD_ACCESSED        (1 << 5)
  /* This reg could be accessed by unaligned address */
  #define F_UNALIGN     (1 << 6)
+ /* This reg is saved/restored in context */
+ #define F_IN_CTX      (1 << 7)
  
        struct gvt_mmio_block *mmio_block;
        unsigned int num_mmio_block;
@@@ -300,13 -296,7 +302,13 @@@ struct intel_vgpu_type 
  };
  
  struct intel_gvt {
 +      /* GVT scope lock, protect GVT itself, and all resource currently
 +       * not yet protected by special locks(vgpu and scheduler lock).
 +       */
        struct mutex lock;
 +      /* scheduler scope lock, protect gvt and vgpu schedule related data */
 +      struct mutex sched_lock;
 +
        struct drm_i915_private *dev_priv;
        struct idr vgpu_idr;    /* vGPU IDR pool */
  
  
        struct task_struct *service_thread;
        wait_queue_head_t service_thread_wq;
 +
 +      /* service_request is always used in bit operation, we should always
 +       * use it with atomic bit ops so that no need to use gvt big lock.
 +       */
        unsigned long service_request;
  
        struct {
@@@ -377,9 -363,9 +379,9 @@@ int intel_gvt_load_firmware(struct inte
  #define gvt_aperture_sz(gvt)    (gvt->dev_priv->ggtt.mappable_end)
  #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
  
 -#define gvt_ggtt_gm_sz(gvt)     (gvt->dev_priv->ggtt.base.total)
 +#define gvt_ggtt_gm_sz(gvt)     (gvt->dev_priv->ggtt.vm.total)
  #define gvt_ggtt_sz(gvt) \
 -      ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
 +      ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
  #define gvt_hidden_sz(gvt)      (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
  
  #define gvt_aperture_gmadr_base(gvt) (0)
@@@ -655,6 -641,33 +657,33 @@@ static inline bool intel_gvt_mmio_has_m
        return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
  }
  
+ /**
+  * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+  * @gvt: a GVT device
+  * @offset: register offset
+  *
+  * Returns:
+  * True if a MMIO has a in-context mask, false if it isn't.
+  *
+  */
+ static inline bool intel_gvt_mmio_is_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+ {
+       return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+ }
+ /**
+  * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+  * @gvt: a GVT device
+  * @offset: register offset
+  *
+  */
+ static inline void intel_gvt_mmio_set_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+ {
+       gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+ }
  int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
  void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
  int intel_gvt_debugfs_init(struct intel_gvt *gvt);
index 6b50f850dc283df67355cef90d980630cdf4356a,8f1caacdc78a4037efb56aeb1fa1cc488a5103f3..7a58ca5551977a086ce8b25dbe18aa26497f45bc
@@@ -55,8 -55,6 +55,8 @@@ unsigned long intel_gvt_get_device_type
                return D_SKL;
        else if (IS_KABYLAKE(gvt->dev_priv))
                return D_KBL;
 +      else if (IS_BROXTON(gvt->dev_priv))
 +              return D_BXT;
  
        return 0;
  }
@@@ -210,31 -208,6 +210,31 @@@ static int sanitize_fence_mmio_access(s
        return 0;
  }
  
 +static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
 +
 +      if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
 +              if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
 +                      gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
 +              else if (!ips)
 +                      gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
 +              else {
 +                      /* All engines must be enabled together for vGPU,
 +                       * since we don't know which engine the ppgtt will
 +                       * bind to when shadowing.
 +                       */
 +                      gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
 +                                   ips);
 +                      return -EINVAL;
 +              }
 +      }
 +
 +      write_vreg(vgpu, offset, p_data, bytes);
 +      return 0;
 +}
 +
  static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
                void *p_data, unsigned int bytes)
  {
@@@ -282,8 -255,7 +282,8 @@@ static int mul_force_wake_write(struct 
        new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
  
        if (IS_SKYLAKE(vgpu->gvt->dev_priv)
 -              || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
 +              || IS_KABYLAKE(vgpu->gvt->dev_priv)
 +              || IS_BROXTON(vgpu->gvt->dev_priv)) {
                switch (offset) {
                case FORCEWAKE_RENDER_GEN9_REG:
                        ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@@ -344,7 -316,6 +344,7 @@@ static int gdrst_mmio_write(struct inte
                }
        }
  
 +      /* vgpu_lock already hold by emulate mmio r/w */
        intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
  
        /* sw will wait for the device to ack the reset request */
@@@ -449,10 -420,7 +449,10 @@@ static int pipeconf_mmio_write(struct i
                vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
        else
                vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
 +      /* vgpu_lock already hold by emulate mmio r/w */
 +      mutex_unlock(&vgpu->vgpu_lock);
        intel_gvt_check_vblank_emulation(vgpu->gvt);
 +      mutex_lock(&vgpu->vgpu_lock);
        return 0;
  }
  
@@@ -889,8 -857,7 +889,8 @@@ static int dp_aux_ch_ctl_mmio_write(str
        data = vgpu_vreg(vgpu, offset);
  
        if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
 -              || IS_KABYLAKE(vgpu->gvt->dev_priv))
 +              || IS_KABYLAKE(vgpu->gvt->dev_priv)
 +              || IS_BROXTON(vgpu->gvt->dev_priv))
                && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
                /* SKL DPB/C/D aux ctl register changed */
                return 0;
@@@ -1242,8 -1209,8 +1242,8 @@@ static int pvinfo_mmio_write(struct int
                ret = handle_g2v_notification(vgpu, data);
                break;
        /* add xhot and yhot to handled list to avoid error log */
 -      case 0x78830:
 -      case 0x78834:
 +      case _vgtif_reg(cursor_x_hot):
 +      case _vgtif_reg(cursor_y_hot):
        case _vgtif_reg(pdp[0].lo):
        case _vgtif_reg(pdp[0].hi):
        case _vgtif_reg(pdp[1].lo):
@@@ -1402,16 -1369,6 +1402,16 @@@ static int mailbox_write(struct intel_v
                                *data0 = 0x1e1a1100;
                        else
                                *data0 = 0x61514b3d;
 +              } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
 +                      /**
 +                       * "Read memory latency" command on gen9.
 +                       * Below memory latency values are read
 +                       * from Broxton MRB.
 +                       */
 +                      if (!*data0)
 +                              *data0 = 0x16080707;
 +                      else
 +                              *data0 = 0x16161616;
                }
                break;
        case SKL_PCODE_CDCLK_CONTROL:
@@@ -1469,11 -1426,8 +1469,11 @@@ static int skl_power_well_ctl_write(str
  {
        u32 v = *(u32 *)p_data;
  
 -      v &= (1 << 31) | (1 << 29) | (1 << 9) |
 -           (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
 +      if (IS_BROXTON(vgpu->gvt->dev_priv))
 +              v &= (1 << 31) | (1 << 29);
 +      else
 +              v &= (1 << 31) | (1 << 29) | (1 << 9) |
 +                      (1 << 7) | (1 << 5) | (1 << 3) | (1 << 1);
        v |= (v >> 1);
  
        return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
@@@ -1493,109 -1447,6 +1493,109 @@@ static int skl_lcpll_write(struct intel
        return 0;
  }
  
 +static int bxt_de_pll_enable_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 v = *(u32 *)p_data;
 +
 +      if (v & BXT_DE_PLL_PLL_ENABLE)
 +              v |= BXT_DE_PLL_LOCK;
 +
 +      vgpu_vreg(vgpu, offset) = v;
 +
 +      return 0;
 +}
 +
 +static int bxt_port_pll_enable_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 v = *(u32 *)p_data;
 +
 +      if (v & PORT_PLL_ENABLE)
 +              v |= PORT_PLL_LOCK;
 +
 +      vgpu_vreg(vgpu, offset) = v;
 +
 +      return 0;
 +}
 +
 +static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 v = *(u32 *)p_data;
 +      u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
 +
 +      vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
 +      vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
 +      vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
 +
 +      vgpu_vreg(vgpu, offset) = v;
 +
 +      return 0;
 +}
 +
 +static int bxt_port_tx_dw3_read(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 v = vgpu_vreg(vgpu, offset);
 +
 +      v &= ~UNIQUE_TRANGE_EN_METHOD;
 +
 +      vgpu_vreg(vgpu, offset) = v;
 +
 +      return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 +}
 +
 +static int bxt_pcs_dw12_grp_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 v = *(u32 *)p_data;
 +
 +      if (offset == _PORT_PCS_DW12_GRP_A || offset == _PORT_PCS_DW12_GRP_B) {
 +              vgpu_vreg(vgpu, offset - 0x600) = v;
 +              vgpu_vreg(vgpu, offset - 0x800) = v;
 +      } else {
 +              vgpu_vreg(vgpu, offset - 0x400) = v;
 +              vgpu_vreg(vgpu, offset - 0x600) = v;
 +      }
 +
 +      vgpu_vreg(vgpu, offset) = v;
 +
 +      return 0;
 +}
 +
 +static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      u32 v = *(u32 *)p_data;
 +
 +      if (v & BIT(0)) {
 +              vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
 +                      ~PHY_RESERVED;
 +              vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
 +                      PHY_POWER_GOOD;
 +      }
 +
 +      if (v & BIT(1)) {
 +              vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
 +                      ~PHY_RESERVED;
 +              vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
 +                      PHY_POWER_GOOD;
 +      }
 +
 +
 +      vgpu_vreg(vgpu, offset) = v;
 +
 +      return 0;
 +}
 +
 +static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
 +              unsigned int offset, void *p_data, unsigned int bytes)
 +{
 +      vgpu_vreg(vgpu, offset) = 0;
 +      return 0;
 +}
 +
  static int mmio_read_from_hw(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
  {
@@@ -1806,9 -1657,7 +1806,9 @@@ static int init_generic_mmio_info(struc
  
        MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
  
 -      MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
 +      MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
 +              gamw_echo_dev_rw_ia_write);
 +
        MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
        MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
        MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
@@@ -2821,17 -2670,17 +2821,17 @@@ static int init_skl_mmio_info(struct in
        MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
        MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
        MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
 -      MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
 -      MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
 -      MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
 -      MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
 +      MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
 +      MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
 +      MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
 +      MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
  
        MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
        MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
        MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
        MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
  
 -      MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
  
 -      MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
  
 -      MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
        MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
  
 -      MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
 -      MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
 +      MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
 +              NULL, NULL);
 +      MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
 +              NULL, NULL);
  
        MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
        MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
        MMIO_D(RC6_LOCATION, D_SKL_PLUS);
        MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
 -      MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
 +      MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
 +              NULL, NULL);
  
        /* TRTT */
 -      MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
 -      MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
 -      MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
 -      MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
 -      MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
 -      MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
 -      MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
 +      MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 +      MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 +      MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 +      MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 +      MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 +      MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
 +              NULL, gen9_trtte_write);
 +      MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
  
 -      MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
  
 -      MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
  
 -      MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
  
 -      MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
        MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
        MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
  
        MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
 -      MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
 -      MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
 +      MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x6e554), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x2b20), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x65f00), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x65f08), D_SKL_PLUS);
 +      MMIO_D(_MMIO(0x320f0), D_SKL_PLUS);
  
        MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
        MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
  
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 -      MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
 +      MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                NULL, NULL);
  
        MMIO_D(_MMIO(0x4ab8), D_KBL);
 -      MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
 +      MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
 +
 +      return 0;
 +}
 +
 +static int init_bxt_mmio_info(struct intel_gvt *gvt)
 +{
 +      struct drm_i915_private *dev_priv = gvt->dev_priv;
 +      int ret;
 +
 +      MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
 +
 +      MMIO_D(GEN7_SAMPLER_INSTDONE, D_BXT);
 +      MMIO_D(GEN7_ROW_INSTDONE, D_BXT);
 +      MMIO_D(GEN8_FAULT_TLB_DATA0, D_BXT);
 +      MMIO_D(GEN8_FAULT_TLB_DATA1, D_BXT);
 +      MMIO_D(ERROR_GEN6, D_BXT);
 +      MMIO_D(DONE_REG, D_BXT);
 +      MMIO_D(EIR, D_BXT);
 +      MMIO_D(PGTBL_ER, D_BXT);
 +      MMIO_D(_MMIO(0x4194), D_BXT);
 +      MMIO_D(_MMIO(0x4294), D_BXT);
 +      MMIO_D(_MMIO(0x4494), D_BXT);
 +
 +      MMIO_RING_D(RING_PSMI_CTL, D_BXT);
 +      MMIO_RING_D(RING_DMA_FADD, D_BXT);
 +      MMIO_RING_D(RING_DMA_FADD_UDW, D_BXT);
 +      MMIO_RING_D(RING_IPEHR, D_BXT);
 +      MMIO_RING_D(RING_INSTPS, D_BXT);
 +      MMIO_RING_D(RING_BBADDR_UDW, D_BXT);
 +      MMIO_RING_D(RING_BBSTATE, D_BXT);
 +      MMIO_RING_D(RING_IPEIR, D_BXT);
 +
 +      MMIO_F(SOFT_SCRATCH(0), 16 * 4, 0, 0, 0, D_BXT, NULL, NULL);
 +
 +      MMIO_DH(BXT_P_CR_GT_DISP_PWRON, D_BXT, NULL, bxt_gt_disp_pwron_write);
 +      MMIO_D(BXT_RP_STATE_CAP, D_BXT);
 +      MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY0), D_BXT,
 +              NULL, bxt_phy_ctl_family_write);
 +      MMIO_DH(BXT_PHY_CTL_FAMILY(DPIO_PHY1), D_BXT,
 +              NULL, bxt_phy_ctl_family_write);
 +      MMIO_D(BXT_PHY_CTL(PORT_A), D_BXT);
 +      MMIO_D(BXT_PHY_CTL(PORT_B), D_BXT);
 +      MMIO_D(BXT_PHY_CTL(PORT_C), D_BXT);
 +      MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_A), D_BXT,
 +              NULL, bxt_port_pll_enable_write);
 +      MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_B), D_BXT,
 +              NULL, bxt_port_pll_enable_write);
 +      MMIO_DH(BXT_PORT_PLL_ENABLE(PORT_C), D_BXT, NULL,
 +              bxt_port_pll_enable_write);
 +
 +      MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY0), D_BXT);
 +      MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY0), D_BXT);
 +
 +      MMIO_D(BXT_PORT_CL1CM_DW0(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW9(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW10(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW28(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_CL1CM_DW30(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_CL2CM_DW6(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_REF_DW3(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_REF_DW6(DPIO_PHY1), D_BXT);
 +      MMIO_D(BXT_PORT_REF_DW8(DPIO_PHY1), D_BXT);
 +
 +      MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH0), D_BXT,
 +              NULL, bxt_pcs_dw12_grp_write);
 +      MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH0), D_BXT,
 +              bxt_port_tx_dw3_read, NULL);
 +      MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 0), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 1), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 2), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 3), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 6), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 8), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 9), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH0, 10), D_BXT);
 +
 +      MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY0, DPIO_CH1), D_BXT,
 +              NULL, bxt_pcs_dw12_grp_write);
 +      MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY0, DPIO_CH1), D_BXT,
 +              bxt_port_tx_dw3_read, NULL);
 +      MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY0, DPIO_CH1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 0), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 1), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 2), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 3), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 6), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 8), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 9), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY0, DPIO_CH1, 10), D_BXT);
 +
 +      MMIO_D(BXT_PORT_PLL_EBB_0(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PLL_EBB_4(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW10_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW10_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW12_LN01(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_PCS_DW12_LN23(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_DH(BXT_PORT_PCS_DW12_GRP(DPIO_PHY1, DPIO_CH0), D_BXT,
 +              NULL, bxt_pcs_dw12_grp_write);
 +      MMIO_D(BXT_PORT_TX_DW2_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW2_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_DH(BXT_PORT_TX_DW3_LN0(DPIO_PHY1, DPIO_CH0), D_BXT,
 +              bxt_port_tx_dw3_read, NULL);
 +      MMIO_D(BXT_PORT_TX_DW3_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW4_LN0(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW4_GRP(DPIO_PHY1, DPIO_CH0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
 +      MMIO_D(BXT_PORT_TX_DW14_LN(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 0), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 1), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 2), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 3), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 6), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 8), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 9), D_BXT);
 +      MMIO_D(BXT_PORT_PLL(DPIO_PHY1, DPIO_CH0, 10), D_BXT);
 +
 +      MMIO_D(BXT_DE_PLL_CTL, D_BXT);
 +      MMIO_DH(BXT_DE_PLL_ENABLE, D_BXT, NULL, bxt_de_pll_enable_write);
 +      MMIO_D(BXT_DSI_PLL_CTL, D_BXT);
 +      MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
 +
 +      MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
 +
 +      MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
 +      MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
 +      MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
 +
 +      MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
 +      MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
 +
 +      MMIO_D(RC6_CTX_BASE, D_BXT);
 +
 +      MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
 +      MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
 +      MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
 +      MMIO_D(GEN6_GFXPAUSE, D_BXT);
 +      MMIO_D(GEN8_L3SQCREG1, D_BXT);
 +
 +      MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
  
        return 0;
  }
@@@ -3297,16 -2965,6 +3297,16 @@@ int intel_gvt_setup_mmio_info(struct in
                ret = init_skl_mmio_info(gvt);
                if (ret)
                        goto err;
 +      } else if (IS_BROXTON(dev_priv)) {
 +              ret = init_broadwell_mmio_info(gvt);
 +              if (ret)
 +                      goto err;
 +              ret = init_skl_mmio_info(gvt);
 +              if (ret)
 +                      goto err;
 +              ret = init_bxt_mmio_info(gvt);
 +              if (ret)
 +                      goto err;
        }
  
        gvt->mmio.mmio_block = mmio_blocks;
@@@ -3387,6 -3045,30 +3387,30 @@@ int intel_vgpu_default_mmio_write(struc
        return 0;
  }
  
+ /**
+  * intel_vgpu_mask_mmio_write - write mask register
+  * @vgpu: a vGPU
+  * @offset: access offset
+  * @p_data: write data buffer
+  * @bytes: access data length
+  *
+  * Returns:
+  * Zero on success, negative error code if failed.
+  */
+ int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+ {
+       u32 mask, old_vreg;
+       old_vreg = vgpu_vreg(vgpu, offset);
+       write_vreg(vgpu, offset, p_data, bytes);
+       mask = vgpu_vreg(vgpu, offset) >> 16;
+       vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+                               (vgpu_vreg(vgpu, offset) & mask);
+       return 0;
+ }
  /**
   * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
   * force-nopriv register
index e474188b46d25830543d331a810b86257a89bafe,dac8c6401e26a010f5ed36bd441b9f429526c24e..1ffc69eba30e385a21469847c6681f420254d67d
@@@ -42,16 -42,15 +42,16 @@@ struct intel_vgpu
  #define D_BDW   (1 << 0)
  #define D_SKL (1 << 1)
  #define D_KBL (1 << 2)
 +#define D_BXT (1 << 3)
  
 -#define D_GEN9PLUS    (D_SKL | D_KBL)
 -#define D_GEN8PLUS    (D_BDW | D_SKL | D_KBL)
 +#define D_GEN9PLUS    (D_SKL | D_KBL | D_BXT)
 +#define D_GEN8PLUS    (D_BDW | D_SKL | D_KBL | D_BXT)
  
 -#define D_SKL_PLUS    (D_SKL | D_KBL)
 -#define D_BDW_PLUS    (D_BDW | D_SKL | D_KBL)
 +#define D_SKL_PLUS    (D_SKL | D_KBL | D_BXT)
 +#define D_BDW_PLUS    (D_BDW | D_SKL | D_KBL | D_BXT)
  
  #define D_PRE_SKL     (D_BDW)
 -#define D_ALL         (D_BDW | D_SKL | D_KBL)
 +#define D_ALL         (D_BDW | D_SKL | D_KBL | D_BXT)
  
  typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
                             unsigned int);
@@@ -99,4 -98,6 +99,6 @@@ bool intel_gvt_in_force_nonpriv_whiteli
  int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
                           void *pdata, unsigned int bytes, bool is_read);
  
+ int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+                                 void *p_data, unsigned int bytes);
  #endif
index 20be9a92600f0f99cd5a688929251c071307afe1,5ca9caf7552a6145b0ccb91a3f18a0c3d4764841..42e1e6bdcc2cfe64a3446eea8019b9a912141ba0
@@@ -364,8 -364,7 +364,8 @@@ static void handle_tlb_pending_event(st
         */
        fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
                                            FW_REG_READ | FW_REG_WRITE);
 -      if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
 +      if (ring_id == RCS && (IS_SKYLAKE(dev_priv) ||
 +                      IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
                fw |= FORCEWAKE_RENDER;
  
        intel_uncore_forcewake_get(dev_priv, fw);
@@@ -402,7 -401,7 +402,7 @@@ static void switch_mocs(struct intel_vg
        if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
                return;
  
 -      if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
 +      if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS)
                return;
  
        if (!pre && !gen9_render_mocs.initialized)
  
  #define CTX_CONTEXT_CONTROL_VAL       0x03
  
 -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
 +bool is_inhibit_context(struct intel_context *ce)
  {
 -      u32 *reg_state = ctx->__engine[ring_id].lrc_reg_state;
 +      const u32 *reg_state = ce->lrc_reg_state;
        u32 inhibit_mask =
                _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
  
@@@ -468,9 -467,7 +468,9 @@@ static void switch_mmio(struct intel_vg
        u32 old_v, new_v;
  
        dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
 -      if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
 +      if (IS_SKYLAKE(dev_priv)
 +              || IS_KABYLAKE(dev_priv)
 +              || IS_BROXTON(dev_priv))
                switch_mocs(pre, next, ring_id);
  
        for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
                 * state image on kabylake, it's initialized by lri command and
                 * save or restore with context together.
                 */
 -              if (IS_KABYLAKE(dev_priv) && mmio->in_context)
 +              if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv))
 +                      && mmio->in_context)
                        continue;
  
                // save
                         * itself.
                         */
                        if (mmio->in_context &&
 -                          !is_inhibit_context(s->shadow_ctx, ring_id))
 +                          !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
                                continue;
  
                        if (mmio->mask)
@@@ -578,16 -574,16 +578,18 @@@ void intel_gvt_init_engine_mmio_context
  {
        struct engine_mmio *mmio;
  
 -      if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
 +      if (IS_SKYLAKE(gvt->dev_priv) ||
 +              IS_KABYLAKE(gvt->dev_priv) ||
 +              IS_BROXTON(gvt->dev_priv))
                gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
        else
                gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
  
        for (mmio = gvt->engine_mmio_list.mmio;
             i915_mmio_reg_valid(mmio->reg); mmio++) {
-               if (mmio->in_context)
+               if (mmio->in_context) {
                        gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+                       intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+               }
        }
  }
index 0190377b02a6085d37e113a3624f40506f9659d9,9bae4db84cfb8b27494388baaf3611d8815718d4..8412119bd94058b7197c531c49fc7ad88a9e6fab
@@@ -136,24 -136,12 +136,24 @@@ nv50_dmac_create(struct nvif_device *de
  {
        struct nouveau_cli *cli = (void *)device->object.client;
        struct nv50_disp_core_channel_dma_v0 *args = data;
 +      u8 type = NVIF_MEM_COHERENT;
        int ret;
  
        mutex_init(&dmac->lock);
  
 -      ret = nvif_mem_init_map(&cli->mmu, NVIF_MEM_COHERENT, 0x1000,
 -                              &dmac->push);
 +      /* Pascal added support for 47-bit physical addresses, but some
 +       * parts of EVO still only accept 40-bit PAs.
 +       *
 +       * To avoid issues on systems with large amounts of RAM, and on
 +       * systems where an IOMMU maps pages at a high address, we need
 +       * to allocate push buffers in VRAM instead.
 +       *
 +       * This appears to match NVIDIA's behaviour on Pascal.
 +       */
 +      if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
 +              type |= NVIF_MEM_VRAM;
 +
 +      ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
        if (ret)
                return ret;
  
@@@ -228,19 -216,6 +228,19 @@@ voi
  evo_kick(u32 *push, struct nv50_dmac *evoc)
  {
        struct nv50_dmac *dmac = evoc;
 +
 +      /* Push buffer fetches are not coherent with BAR1, we need to ensure
 +       * writes have been flushed right through to VRAM before writing PUT.
 +       */
 +      if (dmac->push.type & NVIF_MEM_VRAM) {
 +              struct nvif_device *device = dmac->base.device;
 +              nvif_wr32(&device->object, 0x070000, 0x00000001);
 +              nvif_msec(device, 2000,
 +                      if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
 +                              break;
 +              );
 +      }
 +
        nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
        mutex_unlock(&dmac->lock);
  }
@@@ -449,7 -424,7 +449,7 @@@ nv50_dac_create(struct drm_connector *c
                         "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
        drm_encoder_helper_add(encoder, &nv50_dac_help);
  
 -      drm_mode_connector_attach_encoder(connector, encoder);
 +      drm_connector_attach_encoder(connector, encoder);
        return 0;
  }
  
@@@ -875,7 -850,7 +875,7 @@@ nv50_mstc_get_modes(struct drm_connecto
        int ret = 0;
  
        mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
 -      drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
 +      drm_connector_update_edid_property(&mstc->connector, mstc->edid);
        if (mstc->edid)
                ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
  
@@@ -952,11 -927,11 +952,11 @@@ nv50_mstc_new(struct nv50_mstm *mstm, s
        nouveau_conn_attach_properties(&mstc->connector);
  
        for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
 -              drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
 +              drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
  
        drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
        drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
 -      drm_mode_connector_set_path_property(&mstc->connector, path);
 +      drm_connector_set_path_property(&mstc->connector, path);
        return 0;
  }
  
@@@ -1032,7 -1007,7 +1032,7 @@@ nv50_mstm_destroy_connector(struct drm_
        mstc->port = NULL;
        drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
  
 -      drm_connector_unreference(&mstc->connector);
 +      drm_connector_put(&mstc->connector);
  }
  
  static void
@@@ -1443,7 -1418,7 +1443,7 @@@ nv50_sor_create(struct drm_connector *c
                         "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
        drm_encoder_helper_add(encoder, &nv50_sor_help);
  
 -      drm_mode_connector_attach_encoder(connector, encoder);
 +      drm_connector_attach_encoder(connector, encoder);
  
        if (dcbe->type == DCB_OUTPUT_DP) {
                struct nv50_disp *disp = nv50_disp(encoder->dev);
@@@ -1601,7 -1576,7 +1601,7 @@@ nv50_pior_create(struct drm_connector *
                         "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
        drm_encoder_helper_add(encoder, &nv50_pior_help);
  
 -      drm_mode_connector_attach_encoder(connector, encoder);
 +      drm_connector_attach_encoder(connector, encoder);
        return 0;
  }
  
   *****************************************************************************/
  
  static void
- nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
  {
+       struct nouveau_drm *drm = nouveau_drm(state->dev);
        struct nv50_disp *disp = nv50_disp(drm->dev);
        struct nv50_core *core = disp->core;
        struct nv50_mstm *mstm;
        }
  }
  
+ static void
+ nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+ {
+       struct drm_plane_state *new_plane_state;
+       struct drm_plane *plane;
+       int i;
+       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+               struct nv50_wndw *wndw = nv50_wndw(plane);
+               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+                       if (wndw->func->update)
+                               wndw->func->update(wndw, interlock);
+               }
+       }
+ }
  static void
  nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
  {
                        help->disable(encoder);
                        interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
                        if (outp->flush_disable) {
-                               nv50_disp_atomic_commit_core(drm, interlock);
+                               nv50_disp_atomic_commit_wndw(state, interlock);
+                               nv50_disp_atomic_commit_core(state, interlock);
                                memset(interlock, 0x00, sizeof(interlock));
                        }
                }
        /* Flush disable. */
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
                if (atom->flush_disable) {
-                       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-                               struct nv50_wndw *wndw = nv50_wndw(plane);
-                               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
-                                       if (wndw->func->update)
-                                               wndw->func->update(wndw, interlock);
-                               }
-                       }
-                       nv50_disp_atomic_commit_core(drm, interlock);
+                       nv50_disp_atomic_commit_wndw(state, interlock);
+                       nv50_disp_atomic_commit_core(state, interlock);
                        memset(interlock, 0x00, sizeof(interlock));
                }
        }
        }
  
        /* Flush update. */
-       for_each_new_plane_in_state(state, plane, new_plane_state, i) {
-               struct nv50_wndw *wndw = nv50_wndw(plane);
-               if (interlock[wndw->interlock.type] & wndw->interlock.data) {
-                       if (wndw->func->update)
-                               wndw->func->update(wndw, interlock);
-               }
-       }
+       nv50_disp_atomic_commit_wndw(state, interlock);
  
        if (interlock[NV50_DISP_INTERLOCK_CORE]) {
                if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+                   interlock[NV50_DISP_INTERLOCK_OVLY] ||
+                   interlock[NV50_DISP_INTERLOCK_WNDW] ||
                    !atom->state.legacy_cursor_update)
-                       nv50_disp_atomic_commit_core(drm, interlock);
+                       nv50_disp_atomic_commit_core(state, interlock);
                else
                        disp->core->func->update(disp->core, interlock, false);
        }
@@@ -1896,7 -1878,7 +1903,7 @@@ nv50_disp_atomic_commit(struct drm_devi
                nv50_disp_atomic_commit_tail(state);
  
        drm_for_each_crtc(crtc, dev) {
-               if (crtc->state->enable) {
+               if (crtc->state->active) {
                        if (!drm->have_disp_power_ref) {
                                drm->have_disp_power_ref = true;
                                return 0;
@@@ -2144,10 -2126,6 +2151,6 @@@ nv50_display_destroy(struct drm_device 
        kfree(disp);
  }
  
- MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
- static int nouveau_atomic = 0;
- module_param_named(atomic, nouveau_atomic, int, 0400);
  int
  nv50_display_create(struct drm_device *dev)
  {
        disp->disp = &nouveau_display(dev)->disp;
        dev->mode_config.funcs = &nv50_disp_func;
        dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
-       if (nouveau_atomic)
-               dev->driver->driver_features |= DRIVER_ATOMIC;
  
        /* small shared memory area we use for notifiers and semaphores */
        ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
                connector->funcs->destroy(connector);
        }
  
 +      /* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
 +      dev->vblank_disable_immediate = true;
 +
  out:
        if (ret)
                nv50_display_destroy(dev);
index 22a15478d23dcb1c524b074e96708ccccec7d8a5,af68eae4c626154938f523fb37d2dad133407e21..51932c72334ef6529abb18bc44762cc01ba2d176
@@@ -363,11 -363,19 +363,11 @@@ module_param_named(hdmimhz, nouveau_hdm
  struct nouveau_encoder *
  find_encoder(struct drm_connector *connector, int type)
  {
 -      struct drm_device *dev = connector->dev;
        struct nouveau_encoder *nv_encoder;
        struct drm_encoder *enc;
 -      int i, id;
 -
 -      for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 -              id = connector->encoder_ids[i];
 -              if (!id)
 -                      break;
 +      int i;
  
 -              enc = drm_encoder_find(dev, NULL, id);
 -              if (!enc)
 -                      continue;
 +      drm_connector_for_each_possible_encoder(connector, enc, i) {
                nv_encoder = nouveau_encoder(enc);
  
                if (type == DCB_OUTPUT_ANY ||
@@@ -412,7 -420,7 +412,7 @@@ nouveau_connector_ddc_detect(struct drm
        struct nouveau_connector *nv_connector = nouveau_connector(connector);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
 -      struct nouveau_encoder *nv_encoder;
 +      struct nouveau_encoder *nv_encoder = NULL;
        struct drm_encoder *encoder;
        int i, panel = -ENODEV;
  
                }
        }
  
 -      for (i = 0; nv_encoder = NULL, i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 -              int id = connector->encoder_ids[i];
 -              if (id == 0)
 -                      break;
 -
 -              encoder = drm_encoder_find(dev, NULL, id);
 -              if (!encoder)
 -                      continue;
 +      drm_connector_for_each_possible_encoder(connector, encoder, i) {
                nv_encoder = nouveau_encoder(encoder);
  
                if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
@@@ -550,7 -565,7 +550,7 @@@ nouveau_connector_detect(struct drm_con
  
        /* Cleanup the previous EDID block. */
        if (nv_connector->edid) {
 -              drm_mode_connector_update_edid_property(connector, NULL);
 +              drm_connector_update_edid_property(connector, NULL);
                kfree(nv_connector->edid);
                nv_connector->edid = NULL;
        }
                else
                        nv_connector->edid = drm_get_edid(connector, i2c);
  
 -              drm_mode_connector_update_edid_property(connector,
 +              drm_connector_update_edid_property(connector,
                                                        nv_connector->edid);
                if (!nv_connector->edid) {
                        NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
@@@ -657,7 -672,7 +657,7 @@@ nouveau_connector_detect_lvds(struct dr
  
        /* Cleanup the previous EDID block. */
        if (nv_connector->edid) {
 -              drm_mode_connector_update_edid_property(connector, NULL);
 +              drm_connector_update_edid_property(connector, NULL);
                kfree(nv_connector->edid);
                nv_connector->edid = NULL;
        }
@@@ -721,7 -736,7 +721,7 @@@ out
                status = connector_status_unknown;
  #endif
  
 -      drm_mode_connector_update_edid_property(connector, nv_connector->edid);
 +      drm_connector_update_edid_property(connector, nv_connector->edid);
        nouveau_connector_set_encoder(connector, nv_encoder);
        return status;
  }
@@@ -1193,14 -1208,19 +1193,19 @@@ nouveau_connector_create(struct drm_dev
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_connector *nv_connector = NULL;
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
        int type, ret = 0;
        bool dummy;
  
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                nv_connector = nouveau_connector(connector);
-               if (nv_connector->index == index)
+               if (nv_connector->index == index) {
+                       drm_connector_list_iter_end(&conn_iter);
                        return connector;
+               }
        }
+       drm_connector_list_iter_end(&conn_iter);
  
        nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
        if (!nv_connector)
index dfa23637072698dede382bf0d68f15d71b9e4ef1,ec7861457b84a4ee4a2be3b721c0cbcbe3f66a31..139368b31916b0f5a680916f0bc261ece493ec8f
@@@ -205,7 -205,7 +205,7 @@@ nouveau_user_framebuffer_destroy(struc
        struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
  
        if (fb->nvbo)
 -              drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
 +              drm_gem_object_put_unlocked(&fb->nvbo->gem);
  
        drm_framebuffer_cleanup(drm_fb);
        kfree(fb);
@@@ -287,7 -287,7 +287,7 @@@ nouveau_user_framebuffer_create(struct 
        if (ret == 0)
                return &fb->base;
  
 -      drm_gem_object_unreference_unlocked(gem);
 +      drm_gem_object_put_unlocked(gem);
        return ERR_PTR(ret);
  }
  
@@@ -404,6 -404,7 +404,7 @@@ nouveau_display_init(struct drm_device 
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
        int ret;
  
        ret = disp->init(dev);
                return ret;
  
        /* enable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                struct nouveau_connector *conn = nouveau_connector(connector);
                nvif_notify_get(&conn->hpd);
        }
+       drm_connector_list_iter_end(&conn_iter);
  
        /* enable flip completion events */
        nvif_notify_get(&drm->flip);
@@@ -427,6 -430,7 +430,7 @@@ nouveau_display_fini(struct drm_device 
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
  
        if (!suspend) {
                if (drm_drv_uses_atomic_modeset(dev))
        nvif_notify_put(&drm->flip);
  
        /* disable hotplug interrupts */
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
                struct nouveau_connector *conn = nouveau_connector(connector);
                nvif_notify_put(&conn->hpd);
        }
+       drm_connector_list_iter_end(&conn_iter);
  
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
@@@ -939,7 -945,7 +945,7 @@@ nouveau_display_dumb_create(struct drm_
                return ret;
  
        ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
 -      drm_gem_object_unreference_unlocked(&bo->gem);
 +      drm_gem_object_put_unlocked(&bo->gem);
        return ret;
  }
  
@@@ -954,7 -960,7 +960,7 @@@ nouveau_display_dumb_map_offset(struct 
        if (gem) {
                struct nouveau_bo *bo = nouveau_gem_object(gem);
                *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
 -              drm_gem_object_unreference_unlocked(gem);
 +              drm_gem_object_put_unlocked(gem);
                return 0;
        }
  
index c779ee3c665b0f8ce37d163843c624ee54930a62,f5d3158f0378a4537742fee31df766ce23027e76..c7ec86d6c3c910fdb73a7774eb921a414c156363
@@@ -81,6 -81,10 +81,10 @@@ MODULE_PARM_DESC(modeset, "enable drive
  int nouveau_modeset = -1;
  module_param_named(modeset, nouveau_modeset, int, 0400);
  
+ MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+ static int nouveau_atomic = 0;
+ module_param_named(atomic, nouveau_atomic, int, 0400);
  MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
  static int nouveau_runtime_pm = -1;
  module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@@ -509,6 -513,9 +513,9 @@@ static int nouveau_drm_probe(struct pci
  
        pci_set_master(pdev);
  
+       if (nouveau_atomic)
+               driver_pci.driver_features |= DRIVER_ATOMIC;
        ret = drm_get_pci_dev(pdev, pent, &driver_pci);
        if (ret) {
                nvkm_device_del(&device);
@@@ -874,22 -881,11 +881,11 @@@ nouveau_pmops_runtime_resume(struct dev
  static int
  nouveau_pmops_runtime_idle(struct device *dev)
  {
-       struct pci_dev *pdev = to_pci_dev(dev);
-       struct drm_device *drm_dev = pci_get_drvdata(pdev);
-       struct nouveau_drm *drm = nouveau_drm(drm_dev);
-       struct drm_crtc *crtc;
        if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
  
-       list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
-               if (crtc->enabled) {
-                       DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
-                       return -EBUSY;
-               }
-       }
        pm_runtime_mark_last_busy(dev);
        pm_runtime_autosuspend(dev);
        /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
@@@ -912,10 -908,8 +908,10 @@@ nouveau_drm_open(struct drm_device *dev
        get_task_comm(tmpname, current);
        snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
  
 -      if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
 -              return ret;
 +      if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
 +              ret = -ENOMEM;
 +              goto done;
 +      }
  
        ret = nouveau_cli_init(drm, name, cli);
        if (ret)
index df73bec354e8d23de655fac4f9759d3aa0ac9419,e6ccafcb9c414d5f62a98005ed95a5d1f4113b77..b56524d343c3e84eb109badc3a763471fe95eff8
@@@ -274,7 -274,7 +274,7 @@@ nouveau_gem_ioctl_new(struct drm_devic
        }
  
        /* drop reference from allocate - handle holds it now */
 -      drm_gem_object_unreference_unlocked(&nvbo->gem);
 +      drm_gem_object_put_unlocked(&nvbo->gem);
        return ret;
  }
  
@@@ -354,7 -354,7 +354,7 @@@ validate_fini_no_ticket(struct validate
                list_del(&nvbo->entry);
                nvbo->reserved_by = NULL;
                ttm_bo_unreserve(&nvbo->bo);
 -              drm_gem_object_unreference_unlocked(&nvbo->gem);
 +              drm_gem_object_put_unlocked(&nvbo->gem);
        }
  }
  
@@@ -400,14 -400,14 +400,14 @@@ retry
                nvbo = nouveau_gem_object(gem);
                if (nvbo == res_bo) {
                        res_bo = NULL;
 -                      drm_gem_object_unreference_unlocked(gem);
 +                      drm_gem_object_put_unlocked(gem);
                        continue;
                }
  
                if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
                        NV_PRINTK(err, cli, "multiple instances of buffer %d on "
                                      "validation list\n", b->handle);
 -                      drm_gem_object_unreference_unlocked(gem);
 +                      drm_gem_object_put_unlocked(gem);
                        ret = -EINVAL;
                        break;
                }
@@@ -616,7 -616,7 +616,7 @@@ nouveau_gem_pushbuf_reloc_apply(struct 
                struct nouveau_bo *nvbo;
                uint32_t data;
  
-               if (unlikely(r->bo_index > req->nr_buffers)) {
+               if (unlikely(r->bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc bo index invalid\n");
                        ret = -EINVAL;
                        break;
                if (b->presumed.valid)
                        continue;
  
-               if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+               if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
                        NV_PRINTK(err, cli, "reloc container bo index invalid\n");
                        ret = -EINVAL;
                        break;
@@@ -894,7 -894,7 +894,7 @@@ nouveau_gem_ioctl_cpu_prep(struct drm_d
                ret = lret;
  
        nouveau_bo_sync_for_cpu(nvbo);
 -      drm_gem_object_unreference_unlocked(gem);
 +      drm_gem_object_put_unlocked(gem);
  
        return ret;
  }
@@@ -913,7 -913,7 +913,7 @@@ nouveau_gem_ioctl_cpu_fini(struct drm_d
        nvbo = nouveau_gem_object(gem);
  
        nouveau_bo_sync_for_device(nvbo);
 -      drm_gem_object_unreference_unlocked(gem);
 +      drm_gem_object_put_unlocked(gem);
        return 0;
  }
  
@@@ -930,7 -930,7 +930,7 @@@ nouveau_gem_ioctl_info(struct drm_devic
                return -ENOENT;
  
        ret = nouveau_gem_info(file_priv, gem, req);
 -      drm_gem_object_unreference_unlocked(gem);
 +      drm_gem_object_put_unlocked(gem);
        return ret;
  }
  
index b04ea0f3da75af6a1c119c52b5f4efcb7c861b19,9c81301d0eedabd120c2cb26e1e530ce15b71205..0eb38ac8e86e51112b5800a69b3e45956c54cf6f
@@@ -32,9 -32,11 +32,12 @@@ obj-$(CONFIG_DRM_SUN4I)             += sun4i-tcon.
  obj-$(CONFIG_DRM_SUN4I)               += sun4i_tv.o
  obj-$(CONFIG_DRM_SUN4I)               += sun6i_drc.o
  
- obj-$(CONFIG_DRM_SUN4I_BACKEND)       += sun4i-backend.o sun4i-frontend.o
+ obj-$(CONFIG_DRM_SUN4I_BACKEND)       += sun4i-backend.o
+ ifdef CONFIG_DRM_SUN4I_BACKEND
+ obj-$(CONFIG_DRM_SUN4I)               += sun4i-frontend.o
+ endif
  obj-$(CONFIG_DRM_SUN4I_HDMI)  += sun4i-drm-hdmi.o
  obj-$(CONFIG_DRM_SUN6I_DSI)   += sun6i-dsi.o
  obj-$(CONFIG_DRM_SUN8I_DW_HDMI)       += sun8i-drm-hdmi.o
  obj-$(CONFIG_DRM_SUN8I_MIXER) += sun8i-mixer.o
 +obj-$(CONFIG_DRM_SUN8I_TCON_TOP) += sun8i_tcon_top.o
diff --combined drivers/pci/pci.c
index 22adaf35b136bee7a43ed09377c718ef52ddc2ea,316496e99da9ba56b10d52ba2e0e0f4c9494890b..aa1684d99b709698dc47db379456cf641f3f5057
@@@ -3579,6 -3579,44 +3579,44 @@@ void pci_unmap_iospace(struct resource 
  }
  EXPORT_SYMBOL(pci_unmap_iospace);
  
+ static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+ {
+       struct resource **res = ptr;
+       pci_unmap_iospace(*res);
+ }
+ /**
+  * devm_pci_remap_iospace - Managed pci_remap_iospace()
+  * @dev: Generic device to remap IO address for
+  * @res: Resource describing the I/O space
+  * @phys_addr: physical address of range to be mapped
+  *
+  * Managed pci_remap_iospace().  Map is automatically unmapped on driver
+  * detach.
+  */
+ int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+                          phys_addr_t phys_addr)
+ {
+       const struct resource **ptr;
+       int error;
+       ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+       error = pci_remap_iospace(res, phys_addr);
+       if (error) {
+               devres_free(ptr);
+       } else  {
+               *ptr = res;
+               devres_add(dev, ptr);
+       }
+       return error;
+ }
+ EXPORT_SYMBOL(devm_pci_remap_iospace);
  /**
   * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
   * @dev: Generic device to remap IO address for
@@@ -5222,7 -5260,6 +5260,7 @@@ enum pci_bus_speed pcie_get_speed_cap(s
  
        return PCI_SPEED_UNKNOWN;
  }
 +EXPORT_SYMBOL(pcie_get_speed_cap);
  
  /**
   * pcie_get_width_cap - query for the PCI device's link width capability
@@@ -5241,7 -5278,6 +5279,7 @@@ enum pcie_link_width pcie_get_width_cap
  
        return PCIE_LNK_WIDTH_UNKNOWN;
  }
 +EXPORT_SYMBOL(pcie_get_width_cap);
  
  /**
   * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
diff --combined include/linux/pci.h
index 6e0c0803b24133f2cabbc03e5a09da2c4556a3e7,abd5d5e17aeed8a1402176d7be6da548ac1d91c5..e04ab6265566dc3337e972cce5801a70a9ba58bf
@@@ -261,9 -261,6 +261,9 @@@ enum pci_bus_speed 
        PCI_SPEED_UNKNOWN               = 0xff,
  };
  
 +enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
 +enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
 +
  struct pci_cap_saved_data {
        u16             cap_nr;
        bool            cap_extended;
@@@ -1243,6 -1240,8 +1243,8 @@@ int pci_register_io_range(struct fwnode
  unsigned long pci_address_to_pio(phys_addr_t addr);
  phys_addr_t pci_pio_to_address(unsigned long pio);
  int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+ int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+                          phys_addr_t phys_addr);
  void pci_unmap_iospace(struct resource *res);
  void __iomem *devm_pci_remap_cfgspace(struct device *dev,
                                      resource_size_t offset,
This page took 0.367993 seconds and 4 git commands to generate.