]> Git Repo - linux.git/commitdiff
Merge tag 'drm-misc-next-2019-06-14' of git://anongit.freedesktop.org/drm/drm-misc...
authorDaniel Vetter <[email protected]>
Fri, 14 Jun 2019 09:31:13 +0000 (11:31 +0200)
committerDaniel Vetter <[email protected]>
Fri, 14 Jun 2019 09:44:24 +0000 (11:44 +0200)
drm-misc-next for v5.3:

UAPI Changes:

Cross-subsystem Changes:
- Add code to signal all dma-fences when freed with pending signals.
- Annotate reservation object access in CONFIG_DEBUG_MUTEXES

Core Changes:
- Assorted documentation fixes.
- Use irqsave/restore spinlock to add crc entry.
- Move code around to drm_client, for internal modeset clients.
- Make drm_crtc.h and drm_debugfs.h self-contained.
- Remove drm_fb_helper_connector.
- Add bootsplash to todo.
- Fix lock ordering in pan_display_legacy.
- Support pinning buffers to current location in gem-vram.
- Remove the now unused locking functions from gem-vram.
- Remove the now unused kmap-object argument from vram helpers.
- Stop checking return value of debugfs_create.
- Add atomic encoder enable/disable helpers.
- pass drm_atomic_state to atomic connector check.
- Add atomic support for bridge enable/disable.
- Add self refresh helpers to core.

Driver Changes:
- Add extra delay to make MTP SDM845 work.
- Small fixes to virtio, vkms, sii902x, sii9234, ast, mcde, analogix, rockchip.
- Add zpos and ?BGR8888 support to meson.
- More removals of drm_os_linux and drmP headers for amd, radeon, sti, r128, r128, savage, sis.
- Allow synopsis to unwedge the i2c hdmi bus.
- Add orientation quirks for GPD panels.
- Edid cleanups and fixing handling for edid < 1.2.
- Add runtime pm to stm.
- Handle s/r in dw-hdmi.
- Add hooks for power on/off to dsi for stm.
- Remove virtio dirty tracking code, done in drm core.
- Rework BO handling in ast and mgag200.

Tiny conflict in drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c,
needed #include <linux/slab.h> to make it compile.

Signed-off-by: Daniel Vetter <[email protected]>
From: Maarten Lankhorst <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
86 files changed:
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc_helper.c
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/rcar-du/rcar_lvds.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
include/drm/drm_bridge.h

index d8584b74f5e03140fc1c2e17daf131b783b78e5f,fbec83bfb4ad326663e95bab2f6da82d4479572f..cbcd253d18d59dcadc8ca80a12264516aae93370
@@@ -44,9 -44,9 +44,9 @@@
  #include <drm/ttm/ttm_module.h>
  #include <drm/ttm/ttm_execbuf_util.h>
  
- #include <drm/drmP.h>
- #include <drm/drm_gem.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_gem.h>
+ #include <drm/drm_ioctl.h>
  #include <drm/gpu_scheduler.h>
  
  #include <kgd_kfd_interface.h>
@@@ -118,6 -118,7 +118,6 @@@ extern int amdgpu_disp_priority
  extern int amdgpu_hw_i2c;
  extern int amdgpu_pcie_gen2;
  extern int amdgpu_msi;
 -extern int amdgpu_lockup_timeout;
  extern int amdgpu_dpm;
  extern int amdgpu_fw_load_type;
  extern int amdgpu_aspm;
@@@ -210,7 -211,6 +210,7 @@@ struct amdgpu_irq_src
  struct amdgpu_fpriv;
  struct amdgpu_bo_va_mapping;
  struct amdgpu_atif;
 +struct kfd_vm_fault_info;
  
  enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_EOP = 0,
@@@ -415,7 -415,6 +415,7 @@@ struct amdgpu_fpriv 
  };
  
  int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
 +int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
  
  int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                  unsigned size, struct amdgpu_ib *ib);
@@@ -559,8 -558,6 +559,8 @@@ struct amdgpu_asic_funcs 
                               uint64_t *count1);
        /* do we need to reset the asic at init time (e.g., kexec) */
        bool (*need_reset_on_init)(struct amdgpu_device *adev);
 +      /* PCIe replay counter */
 +      uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
  };
  
  /*
@@@ -642,11 -639,6 +642,11 @@@ struct nbio_hdp_flush_reg 
        u32 ref_and_mask_sdma1;
  };
  
 +struct amdgpu_mmio_remap {
 +      u32 reg_offset;
 +      resource_size_t bus_addr;
 +};
 +
  struct amdgpu_nbio_funcs {
        const struct nbio_hdp_flush_reg *hdp_flush_reg;
        u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
        void (*detect_hw_virt)(struct amdgpu_device *adev);
 +      void (*remap_hdp_registers)(struct amdgpu_device *adev);
  };
  
  struct amdgpu_df_funcs {
                                      u32 *flags);
        void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
                                            bool enable);
 +      int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
 +                                       int is_enable);
 +      int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
 +                                       int is_disable);
 +      void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
 +                                       uint64_t *count);
  };
  /* Define the HW IP blocks will be used in driver , add more if necessary */
  enum amd_hw_ip_block_type {
@@@ -779,7 -764,6 +779,7 @@@ struct amdgpu_device 
        void __iomem                    *rmmio;
        /* protects concurrent MM_INDEX/DATA based register access */
        spinlock_t mmio_idx_lock;
 +      struct amdgpu_mmio_remap        rmmio_remap;
        /* protects concurrent SMC based register access */
        spinlock_t smc_idx_lock;
        amdgpu_rreg_t                   smc_rreg;
        const struct amdgpu_df_funcs    *df_funcs;
  
        /* delayed work_func for deferring clockgating during resume */
 -      struct delayed_work     late_init_work;
 +      struct delayed_work     delayed_init_work;
  
        struct amdgpu_virt      virt;
        /* firmware VRAM reservation */
        struct work_struct              xgmi_reset_work;
  
        bool                            in_baco_reset;
 +
 +      long                            gfx_timeout;
 +      long                            sdma_timeout;
 +      long                            video_timeout;
 +      long                            compute_timeout;
 +
 +      uint64_t                        unique_id;
  };
  
  static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@@ -1088,7 -1065,6 +1088,7 @@@ int emu_soc_asic_init(struct amdgpu_dev
  #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
  #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
  #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
 +#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
  
  /* Common functions */
  bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@@ -1105,9 -1081,6 +1105,9 @@@ void amdgpu_device_program_register_seq
                                             const u32 array_size);
  
  bool amdgpu_device_is_px(struct drm_device *dev);
 +bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
 +                                    struct amdgpu_device *peer_adev);
 +
  /* atpx handler */
  #if defined(CONFIG_VGA_SWITCHEROO)
  void amdgpu_register_atpx_handler(void);
index 4af3989e4a755310a35d56ea28ebd13f91df10f8,822049a78e9f774fcaf534f95ebecfba5e777b5e..c8887a1c852abda54cb5072f203d573985c936b8
  
  #include "amdgpu_amdkfd.h"
  #include "amd_shared.h"
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
 +#include "amdgpu_dma_buf.h"
  #include <linux/module.h>
  #include <linux/dma-buf.h>
 +#include "amdgpu_xgmi.h"
  
  static const unsigned int compute_vmid_bitmap = 0xFF00;
  
@@@ -150,8 -148,7 +150,8 @@@ void amdgpu_amdkfd_device_init(struct a
                };
  
                /* this is going to have a few of the MSBs set that we need to
 -               * clear */
 +               * clear
 +               */
                bitmap_complement(gpu_resources.queue_bitmap,
                                  adev->gfx.mec.queue_bitmap,
                                  KGD_MAX_QUEUES);
                                  gpu_resources.queue_bitmap);
  
                /* According to linux/bitmap.h we shouldn't use bitmap_clear if
 -               * nbits is not compile time constant */
 +               * nbits is not compile time constant
 +               */
                last_valid_bit = 1 /* only first MEC can have compute queues */
                                * adev->gfx.mec.num_pipe_per_mec
                                * adev->gfx.mec.num_queue_per_pipe;
@@@ -339,40 -335,6 +339,40 @@@ void amdgpu_amdkfd_free_gtt_mem(struct 
        amdgpu_bo_unref(&(bo));
  }
  
 +int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
 +                              void **mem_obj)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 +      struct amdgpu_bo *bo = NULL;
 +      struct amdgpu_bo_param bp;
 +      int r;
 +
 +      memset(&bp, 0, sizeof(bp));
 +      bp.size = size;
 +      bp.byte_align = 1;
 +      bp.domain = AMDGPU_GEM_DOMAIN_GWS;
 +      bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 +      bp.type = ttm_bo_type_device;
 +      bp.resv = NULL;
 +
 +      r = amdgpu_bo_create(adev, &bp, &bo);
 +      if (r) {
 +              dev_err(adev->dev,
 +                      "failed to allocate gws BO for amdkfd (%d)\n", r);
 +              return r;
 +      }
 +
 +      *mem_obj = bo;
 +      return 0;
 +}
 +
 +void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
 +{
 +      struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
 +
 +      amdgpu_bo_unref(&bo);
 +}
 +
  uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
                                      enum kgd_engine_type type)
  {
@@@ -556,34 -518,6 +556,34 @@@ uint64_t amdgpu_amdkfd_get_hive_id(stru
  
        return adev->gmc.xgmi.hive_id;
  }
 +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
 +{
 +      struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
 +      struct amdgpu_device *adev = (struct amdgpu_device *)dst;
 +      int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
 +
 +      if (ret < 0) {
 +              DRM_ERROR("amdgpu: failed to get  xgmi hops count between node %d and %d. ret = %d\n",
 +                      adev->gmc.xgmi.physical_node_id,
 +                      peer_adev->gmc.xgmi.physical_node_id, ret);
 +              ret = 0;
 +      }
 +      return  (uint8_t)ret;
 +}
 +
 +uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 +
 +      return adev->rmmio_remap.bus_addr;
 +}
 +
 +uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
 +{
 +      struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 +
 +      return adev->gds.gws_size;
 +}
  
  int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t vmid, uint64_t gpu_addr,
index c6abcf72e822cba875c3fcbc2f123bc610186619,c49d5ae4e29ecd8812f54ec2a5a381bf4baf2651..5f459bf5f6222b71030eff48282e31a8c7dce138
@@@ -23,7 -23,7 +23,7 @@@
  #include <linux/fdtable.h>
  #include <linux/uaccess.h>
  #include <linux/mmu_context.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_amdkfd.h"
  #include "cikd.h"
@@@ -310,7 -310,7 +310,7 @@@ static inline uint32_t get_sdma_base_ad
        retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
                        m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
  
 -      pr_debug("kfd: sdma base address: 0x%x\n", retval);
 +      pr_debug("sdma base address: 0x%x\n", retval);
  
        return retval;
  }
index 4e8b4e9499263a950bee0df94f12fa3d7cc56fab,68f4b131574079ba3fcb90a867b7f4e160048dda..6d2f6144960667c296f636fcb112517887b4e07c
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/fdtable.h>
  #include <linux/uaccess.h>
  #include <linux/mmu_context.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_amdkfd.h"
  #include "gfx_v8_0.h"
@@@ -266,7 -266,7 +266,7 @@@ static inline uint32_t get_sdma_base_ad
  
        retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
                m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
 -      pr_debug("kfd: sdma base address: 0x%x\n", retval);
 +      pr_debug("sdma base address: 0x%x\n", retval);
  
        return retval;
  }
index d5af41143d120dcaf6bbc779b545c8ff89fa4857,1a3ec47238b02e503f897bba92ba598264d642de..85395f2d83a65fcfe27f81863afc8abfb21e86db
@@@ -26,7 -26,7 +26,7 @@@
  #include <linux/fdtable.h>
  #include <linux/uaccess.h>
  #include <linux/mmu_context.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_amdkfd.h"
  #include "soc15_hw_ip.h"
@@@ -225,8 -225,8 +225,8 @@@ static void kgd_program_sh_mem_settings
  
        lock_srbm(kgd, 0, 0, 0, vmid);
  
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
        /* APE1 no longer exists on GFX9 */
  
        unlock_srbm(kgd);
@@@ -369,7 -369,7 +369,7 @@@ static int kgd_hqd_load(struct kgd_dev 
                value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
                value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
                        ((mec << 5) | (pipe << 3) | queue_id | 0x80));
 -              WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
 +              WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
        }
  
        /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
  
        for (reg = hqd_base;
             reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
 -              WREG32(reg, mqd_hqd[reg - hqd_base]);
 +              WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
  
  
        /* Activate doorbell logic before triggering WPTR poll. */
        data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
                             CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
  
        if (wptr) {
                /* Don't read wptr with get_user because the user
                guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
                guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
  
 -              WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
 +              WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
                       lower_32_bits(guessed_wptr));
 -              WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
 +              WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
                       upper_32_bits(guessed_wptr));
 -              WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
 +              WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
                       lower_32_bits((uintptr_t)wptr));
 -              WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
 +              WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
                       upper_32_bits((uintptr_t)wptr));
                WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
                       get_queue_mask(adev, pipe_id, queue_id));
        }
  
        /* Start the EOP fetcher */
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
               REG_SET_FIELD(m->cp_hqd_eop_rptr,
                             CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
  
        data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
  
        release_queue(kgd);
  
@@@ -633,7 -633,7 +633,7 @@@ static int kgd_hqd_destroy(struct kgd_d
        acquire_queue(kgd, pipe_id, queue_id);
  
        if (m->cp_hqd_vmid == 0)
 -              WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
 +              WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
  
        switch (reset_type) {
        case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
                break;
        }
  
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
  
        end_jiffies = (utimeout * HZ / 1000) + jiffies;
        while (true) {
@@@ -726,8 -726,29 +726,8 @@@ static uint16_t get_atc_vmid_pasid_mapp
        return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
  }
  
 -static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
 -{
 -      struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
 -
 -      /* Use legacy mode tlb invalidation.
 -       *
 -       * Currently on Raven the code below is broken for anything but
 -       * legacy mode due to a MMHUB power gating problem. A workaround
 -       * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
 -       * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
 -       * bit.
 -       *
 -       * TODO 1: agree on the right set of invalidation registers for
 -       * KFD use. Use the last one for now. Invalidate both GC and
 -       * MMHUB.
 -       *
 -       * TODO 2: support range-based invalidation, requires kfg2kgd
 -       * interface change
 -       */
 -      amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
 -}
 -
 -static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
 +static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
 +                      uint32_t flush_type)
  {
        signed long r;
        uint32_t seq;
                        PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
                        PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
                        PACKET3_INVALIDATE_TLBS_PASID(pasid) |
 -                      PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(0)); /* legacy */
 +                      PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
        amdgpu_fence_emit_polling(ring, &seq);
        amdgpu_ring_commit(ring);
        spin_unlock(&adev->gfx.kiq.ring_lock);
@@@ -759,16 -780,12 +759,16 @@@ static int invalidate_tlbs(struct kgd_d
        struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
        int vmid;
        struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
 +      uint32_t flush_type = 0;
  
        if (adev->in_gpu_reset)
                return -EIO;
 +      if (adev->gmc.xgmi.num_physical_nodes &&
 +              adev->asic_type == CHIP_VEGA20)
 +              flush_type = 2;
  
        if (ring->sched.ready)
 -              return invalidate_tlbs_with_kiq(adev, pasid);
 +              return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
  
        for (vmid = 0; vmid < 16; vmid++) {
                if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
                if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) {
                        if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid)
                                == pasid) {
 -                              write_vmid_invalidate_request(kgd, vmid);
 +                              amdgpu_gmc_flush_gpu_tlb(adev, vmid,
 +                                                       flush_type);
                                break;
                        }
                }
@@@ -795,22 -811,7 +795,22 @@@ static int invalidate_tlbs_vmid(struct 
                return 0;
        }
  
 -      write_vmid_invalidate_request(kgd, vmid);
 +      /* Use legacy mode tlb invalidation.
 +       *
 +       * Currently on Raven the code below is broken for anything but
 +       * legacy mode due to a MMHUB power gating problem. A workaround
 +       * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
 +       * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
 +       * bit.
 +       *
 +       * TODO 1: agree on the right set of invalidation registers for
 +       * KFD use. Use the last one for now. Invalidate both GC and
 +       * MMHUB.
 +       *
 +       * TODO 2: support range-based invalidation, requires kfg2kgd
 +       * interface change
 +       */
 +      amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
        return 0;
  }
  
@@@ -837,7 -838,7 +837,7 @@@ static int kgd_wave_control_execute(str
  
        mutex_lock(&adev->grbm_idx_mutex);
  
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
 +      WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
        WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
  
        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
                SE_BROADCAST_WRITES, 1);
  
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
 +      WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
        mutex_unlock(&adev->grbm_idx_mutex);
  
        return 0;
index 81e0e758cc548362313df15f0ed508e4b20b5cf5,0d27376a070766d6d26c5e20c823f1ca27d3bada..df26bf34b67536593c8ff1370a26426325420cb7
  
  #define pr_fmt(fmt) "kfd2kgd: " fmt
  
+ #include <linux/dma-buf.h>
  #include <linux/list.h>
  #include <linux/pagemap.h>
  #include <linux/sched/mm.h>
- #include <linux/dma-buf.h>
- #include <drm/drmP.h>
+ #include <linux/sched/task.h>
  #include "amdgpu_object.h"
  #include "amdgpu_vm.h"
  #include "amdgpu_amdkfd.h"
 +#include "amdgpu_dma_buf.h"
  
  /* Special VM and GART address alignment needed for VI pre-Fiji due to
   * a HW bug.
@@@ -457,17 -457,6 +458,17 @@@ static void add_kgd_mem_to_kfd_bo_list(
        mutex_unlock(&process_info->lock);
  }
  
 +static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
 +              struct amdkfd_process_info *process_info)
 +{
 +      struct ttm_validate_buffer *bo_list_entry;
 +
 +      bo_list_entry = &mem->validate_list;
 +      mutex_lock(&process_info->lock);
 +      list_del(&bo_list_entry->head);
 +      mutex_unlock(&process_info->lock);
 +}
 +
  /* Initializes user pages. It registers the MMU notifier and validates
   * the userptr BO in the GTT domain.
   *
@@@ -503,12 -492,28 +504,12 @@@ static int init_user_pages(struct kgd_m
                goto out;
        }
  
 -      /* If no restore worker is running concurrently, user_pages
 -       * should not be allocated
 -       */
 -      WARN(mem->user_pages, "Leaking user_pages array");
 -
 -      mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 -                                         sizeof(struct page *),
 -                                         GFP_KERNEL | __GFP_ZERO);
 -      if (!mem->user_pages) {
 -              pr_err("%s: Failed to allocate pages array\n", __func__);
 -              ret = -ENOMEM;
 -              goto unregister_out;
 -      }
 -
 -      ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages);
 +      ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
        if (ret) {
                pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
 -              goto free_out;
 +              goto unregister_out;
        }
  
 -      amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages);
 -
        ret = amdgpu_bo_reserve(bo, true);
        if (ret) {
                pr_err("%s: Failed to reserve BO\n", __func__);
        amdgpu_bo_unreserve(bo);
  
  release_out:
 -      if (ret)
 -              release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
 -free_out:
 -      kvfree(mem->user_pages);
 -      mem->user_pages = NULL;
 +      amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
  unregister_out:
        if (ret)
                amdgpu_mn_unregister(bo);
@@@ -580,12 -589,13 +581,12 @@@ static int reserve_bo_and_vm(struct kgd
        ctx->kfd_bo.priority = 0;
        ctx->kfd_bo.tv.bo = &bo->tbo;
        ctx->kfd_bo.tv.num_shared = 1;
 -      ctx->kfd_bo.user_pages = NULL;
        list_add(&ctx->kfd_bo.tv.head, &ctx->list);
  
        amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
  
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
 -                                   false, &ctx->duplicates);
 +                                   false, &ctx->duplicates, true);
        if (!ret)
                ctx->reserved = true;
        else {
@@@ -643,6 -653,7 +644,6 @@@ static int reserve_bo_and_cond_vms(stru
        ctx->kfd_bo.priority = 0;
        ctx->kfd_bo.tv.bo = &bo->tbo;
        ctx->kfd_bo.tv.num_shared = 1;
 -      ctx->kfd_bo.user_pages = NULL;
        list_add(&ctx->kfd_bo.tv.head, &ctx->list);
  
        i = 0;
        }
  
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
 -                                   false, &ctx->duplicates);
 +                                   false, &ctx->duplicates, true);
        if (!ret)
                ctx->reserved = true;
        else
@@@ -886,9 -897,6 +887,9 @@@ static int init_kfd_vm(struct amdgpu_v
                                  AMDGPU_FENCE_OWNER_KFD, false);
        if (ret)
                goto wait_pd_fail;
 +      ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
 +      if (ret)
 +              goto reserve_shared_fail;
        amdgpu_bo_fence(vm->root.base.bo,
                        &vm->process_info->eviction_fence->base, true);
        amdgpu_bo_unreserve(vm->root.base.bo);
  
        return 0;
  
 +reserve_shared_fail:
  wait_pd_fail:
  validate_pd_fail:
        amdgpu_bo_unreserve(vm->root.base.bo);
@@@ -1103,8 -1110,7 +1104,8 @@@ int amdgpu_amdkfd_gpuvm_alloc_memory_of
                if (!offset || !*offset)
                        return -EINVAL;
                user_addr = *offset;
 -      } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
 +      } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
 +                      ALLOC_MEM_FLAGS_MMIO_REMAP)) {
                domain = AMDGPU_GEM_DOMAIN_GTT;
                alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
                bo_type = ttm_bo_type_sg;
  
        if (user_addr) {
                ret = init_user_pages(*mem, current->mm, user_addr);
 -              if (ret) {
 -                      mutex_lock(&avm->process_info->lock);
 -                      list_del(&(*mem)->validate_list.head);
 -                      mutex_unlock(&avm->process_info->lock);
 +              if (ret)
                        goto allocate_init_user_pages_failed;
 -              }
        }
  
        if (offset)
        return 0;
  
  allocate_init_user_pages_failed:
 +      remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
        amdgpu_bo_unref(&bo);
        /* Don't unreserve system mem limit twice */
        goto err_reserve_limit;
@@@ -1254,6 -1263,15 +1255,6 @@@ int amdgpu_amdkfd_gpuvm_free_memory_of_
        list_del(&bo_list_entry->head);
        mutex_unlock(&process_info->lock);
  
 -      /* Free user pages if necessary */
 -      if (mem->user_pages) {
 -              pr_debug("%s: Freeing user_pages array\n", __func__);
 -              if (mem->user_pages[0])
 -                      release_pages(mem->user_pages,
 -                                      mem->bo->tbo.ttm->num_pages);
 -              kvfree(mem->user_pages);
 -      }
 -
        ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
        if (unlikely(ret))
                return ret;
        /* Free the sync object */
        amdgpu_sync_free(&mem->sync);
  
 -      /* If the SG is not NULL, it's one we created for a doorbell
 -       * BO. We need to free it.
 +      /* If the SG is not NULL, it's one we created for a doorbell or mmio
 +       * remap BO. We need to free it.
         */
        if (mem->bo->tbo.sg) {
                sg_free_table(mem->bo->tbo.sg);
@@@ -1392,7 -1410,7 +1393,7 @@@ int amdgpu_amdkfd_gpuvm_map_memory_to_g
                        ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
                                              is_invalid_userptr);
                        if (ret) {
 -                              pr_err("Failed to map radeon bo to gpuvm\n");
 +                              pr_err("Failed to map bo to gpuvm\n");
                                goto map_bo_to_gpuvm_failed;
                        }
  
@@@ -1727,11 -1745,25 +1728,11 @@@ static int update_invalid_user_pages(st
  
                bo = mem->bo;
  
 -              if (!mem->user_pages) {
 -                      mem->user_pages =
 -                              kvmalloc_array(bo->tbo.ttm->num_pages,
 -                                               sizeof(struct page *),
 -                                               GFP_KERNEL | __GFP_ZERO);
 -                      if (!mem->user_pages) {
 -                              pr_err("%s: Failed to allocate pages array\n",
 -                                     __func__);
 -                              return -ENOMEM;
 -                      }
 -              } else if (mem->user_pages[0]) {
 -                      release_pages(mem->user_pages, bo->tbo.ttm->num_pages);
 -              }
 -
                /* Get updated user pages */
                ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
 -                                                 mem->user_pages);
 +                                                 bo->tbo.ttm->pages);
                if (ret) {
 -                      mem->user_pages[0] = NULL;
 +                      bo->tbo.ttm->pages[0] = NULL;
                        pr_info("%s: Failed to get user pages: %d\n",
                                __func__, ret);
                        /* Pretend it succeeded. It will fail later
                         * stalled user mode queues.
                         */
                }
 -
 -              /* Mark the BO as valid unless it was invalidated
 -               * again concurrently
 -               */
 -              if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid)
 -                      return -EAGAIN;
        }
  
        return 0;
  }
  
 +/* Remove invalid userptr BOs from hmm track list
 + *
 + * Stop HMM track the userptr update
 + */
 +static void untrack_invalid_user_pages(struct amdkfd_process_info *process_info)
 +{
 +      struct kgd_mem *mem, *tmp_mem;
 +      struct amdgpu_bo *bo;
 +
 +      list_for_each_entry_safe(mem, tmp_mem,
 +                               &process_info->userptr_inval_list,
 +                               validate_list.head) {
 +              bo = mem->bo;
 +              amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 +      }
 +}
 +
  /* Validate invalid userptr BOs
   *
   * Validates BOs on the userptr_inval_list, and moves them back to the
@@@ -1786,8 -1807,7 +1787,8 @@@ static int validate_invalid_user_pages(
                                     GFP_KERNEL);
        if (!pd_bo_list_entries) {
                pr_err("%s: Failed to allocate PD BO list entries\n", __func__);
 -              return -ENOMEM;
 +              ret = -ENOMEM;
 +              goto out_no_mem;
        }
  
        INIT_LIST_HEAD(&resv_list);
        }
  
        /* Reserve all BOs and page tables for validation */
 -      ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
 +      ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
 +                                   true);
        WARN(!list_empty(&duplicates), "Duplicates should be empty");
        if (ret)
 -              goto out;
 +              goto out_free;
  
        amdgpu_sync_create(&sync);
  
  
                bo = mem->bo;
  
 -              /* Copy pages array and validate the BO if we got user pages */
 -              if (mem->user_pages[0]) {
 -                      amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
 -                                                   mem->user_pages);
 +              /* Validate the BO if we got user pages */
 +              if (bo->tbo.ttm->pages[0]) {
                        amdgpu_bo_placement_from_domain(bo, mem->domain);
                        ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        if (ret) {
                        }
                }
  
 -              /* Validate succeeded, now the BO owns the pages, free
 -               * our copy of the pointer array. Put this BO back on
 -               * the userptr_valid_list. If we need to revalidate
 -               * it, we need to start from scratch.
 -               */
 -              kvfree(mem->user_pages);
 -              mem->user_pages = NULL;
                list_move_tail(&mem->validate_list.head,
                               &process_info->userptr_valid_list);
  
 +              /* Stop HMM track the userptr update. We dont check the return
 +               * value for concurrent CPU page table update because we will
 +               * reschedule the restore worker if process_info->evicted_bos
 +               * is updated.
 +               */
 +              amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 +
                /* Update mapping. If the BO was not validated
                 * (because we couldn't get user pages), this will
                 * clear the page table entries, which will result in
@@@ -1877,9 -1898,8 +1878,9 @@@ unreserve_out
        ttm_eu_backoff_reservation(&ticket, &resv_list);
        amdgpu_sync_wait(&sync, false);
        amdgpu_sync_free(&sync);
 -out:
 +out_free:
        kfree(pd_bo_list_entries);
 +out_no_mem:
  
        return ret;
  }
@@@ -1944,9 -1964,7 +1945,9 @@@ static void amdgpu_amdkfd_restore_userp
                 * hanging. No point trying again.
                 */
        }
 +
  unlock_out:
 +      untrack_invalid_user_pages(process_info);
        mutex_unlock(&process_info->lock);
        mmput(mm);
        put_task_struct(usertask);
@@@ -2015,7 -2033,7 +2016,7 @@@ int amdgpu_amdkfd_gpuvm_restore_process
        }
  
        ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
 -                                   false, &duplicate_save);
 +                                   false, &duplicate_save, true);
        if (ret) {
                pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
                goto ttm_reserve_fail;
@@@ -2113,88 -2131,3 +2114,88 @@@ ttm_reserve_fail
        kfree(pd_bo_list);
        return ret;
  }
 +
 +int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
 +{
 +      struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
 +      struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
 +      int ret;
 +
 +      if (!info || !gws)
 +              return -EINVAL;
 +
 +      *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
 +      if (!*mem)
 +              return -ENOMEM;
 +
 +      mutex_init(&(*mem)->lock);
 +      (*mem)->bo = amdgpu_bo_ref(gws_bo);
 +      (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
 +      (*mem)->process_info = process_info;
 +      add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
 +      amdgpu_sync_create(&(*mem)->sync);
 +
 +
 +      /* Validate gws bo the first time it is added to process */
 +      mutex_lock(&(*mem)->process_info->lock);
 +      ret = amdgpu_bo_reserve(gws_bo, false);
 +      if (unlikely(ret)) {
 +              pr_err("Reserve gws bo failed %d\n", ret);
 +              goto bo_reservation_failure;
 +      }
 +
 +      ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
 +      if (ret) {
 +              pr_err("GWS BO validate failed %d\n", ret);
 +              goto bo_validation_failure;
 +      }
 +      /* GWS resource is shared b/t amdgpu and amdkfd
 +       * Add process eviction fence to bo so they can
 +       * evict each other.
 +       */
 +      amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
 +      amdgpu_bo_unreserve(gws_bo);
 +      mutex_unlock(&(*mem)->process_info->lock);
 +
 +      return ret;
 +
 +bo_validation_failure:
 +      amdgpu_bo_unreserve(gws_bo);
 +bo_reservation_failure:
 +      mutex_unlock(&(*mem)->process_info->lock);
 +      amdgpu_sync_free(&(*mem)->sync);
 +      remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
 +      amdgpu_bo_unref(&gws_bo);
 +      mutex_destroy(&(*mem)->lock);
 +      kfree(*mem);
 +      *mem = NULL;
 +      return ret;
 +}
 +
 +int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
 +{
 +      int ret;
 +      struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
 +      struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
 +      struct amdgpu_bo *gws_bo = kgd_mem->bo;
 +
 +      /* Remove BO from process's validate list so restore worker won't touch
 +       * it anymore
 +       */
 +      remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
 +
 +      ret = amdgpu_bo_reserve(gws_bo, false);
 +      if (unlikely(ret)) {
 +              pr_err("Reserve gws bo failed %d\n", ret);
 +              //TODO add BO back to validate_list?
 +              return ret;
 +      }
 +      amdgpu_amdkfd_remove_eviction_fence(gws_bo,
 +                      process_info->eviction_fence);
 +      amdgpu_bo_unreserve(gws_bo);
 +      amdgpu_sync_free(&kgd_mem->sync);
 +      amdgpu_bo_unref(&gws_bo);
 +      mutex_destroy(&kgd_mem->lock);
 +      kfree(mem);
 +      return 0;
 +}
index d497467b7fc679db1b8e13067482a26ab520b992,7d2f37434c1a2c595842484b809f82f69707ce2b..7bcf86c61999513c13c72bcdc1fccc3c24633ff9
@@@ -28,7 -28,8 +28,8 @@@
   *    Christian König <[email protected]>
   */
  
- #include <drm/drmP.h>
+ #include <linux/uaccess.h>
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  
@@@ -81,9 -82,9 +82,9 @@@ int amdgpu_bo_list_create(struct amdgpu
                return -ENOMEM;
  
        kref_init(&list->refcount);
 -      list->gds_obj = adev->gds.gds_gfx_bo;
 -      list->gws_obj = adev->gds.gws_gfx_bo;
 -      list->oa_obj = adev->gds.oa_gfx_bo;
 +      list->gds_obj = NULL;
 +      list->gws_obj = NULL;
 +      list->oa_obj = NULL;
  
        array = amdgpu_bo_list_array_entry(list, 0);
        memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
index fff558cf385b8ec4b016372bb44e5f8cb4183c6d,7cd24c1d7919950fc246fc319e1d910649940ed2..dc63707e426fa35214a70b8a4c775a345c57a99f
   * Authors:
   *    Jerome Glisse <[email protected]>
   */
+ #include <linux/file.h>
  #include <linux/pagemap.h>
  #include <linux/sync_file.h>
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_syncobj.h>
  #include "amdgpu.h"
@@@ -52,6 -54,7 +54,6 @@@ static int amdgpu_cs_user_fence_chunk(s
        p->uf_entry.tv.bo = &bo->tbo;
        /* One for TTM and one for the CS job */
        p->uf_entry.tv.num_shared = 2;
 -      p->uf_entry.user_pages = NULL;
  
        drm_gem_object_put_unlocked(gobj);
  
@@@ -541,14 -544,14 +543,14 @@@ static int amdgpu_cs_list_validate(stru
                if (usermm && usermm != current->mm)
                        return -EPERM;
  
 -              /* Check if we have user pages and nobody bound the BO already */
 -              if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
 -                  lobj->user_pages) {
 +              if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
 +                  lobj->user_invalidated && lobj->user_pages) {
                        amdgpu_bo_placement_from_domain(bo,
                                                        AMDGPU_GEM_DOMAIN_CPU);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                        if (r)
                                return r;
 +
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
                                                     lobj->user_pages);
                        binding_userptr = true;
@@@ -579,6 -582,7 +581,6 @@@ static int amdgpu_cs_parser_bos(struct 
        struct amdgpu_bo *gds;
        struct amdgpu_bo *gws;
        struct amdgpu_bo *oa;
 -      unsigned tries = 10;
        int r;
  
        INIT_LIST_HEAD(&p->validated);
        if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
                list_add(&p->uf_entry.tv.head, &p->validated);
  
 -      while (1) {
 -              struct list_head need_pages;
 -
 -              r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 -                                         &duplicates);
 -              if (unlikely(r != 0)) {
 -                      if (r != -ERESTARTSYS)
 -                              DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 -                      goto error_free_pages;
 -              }
 -
 -              INIT_LIST_HEAD(&need_pages);
 -              amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 -                      struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 -
 -                      if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
 -                               &e->user_invalidated) && e->user_pages) {
 -
 -                              /* We acquired a page array, but somebody
 -                               * invalidated it. Free it and try again
 -                               */
 -                              release_pages(e->user_pages,
 -                                            bo->tbo.ttm->num_pages);
 -                              kvfree(e->user_pages);
 -                              e->user_pages = NULL;
 -                      }
 -
 -                      if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
 -                          !e->user_pages) {
 -                              list_del(&e->tv.head);
 -                              list_add(&e->tv.head, &need_pages);
 -
 -                              amdgpu_bo_unreserve(bo);
 -                      }
 +      /* Get userptr backing pages. If pages are updated after registered
 +       * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
 +       * amdgpu_ttm_backend_bind() to flush and invalidate new pages
 +       */
 +      amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 +              struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 +              bool userpage_invalidated = false;
 +              int i;
 +
 +              e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
 +                                      sizeof(struct page *),
 +                                      GFP_KERNEL | __GFP_ZERO);
 +              if (!e->user_pages) {
 +                      DRM_ERROR("calloc failure\n");
 +                      return -ENOMEM;
                }
  
 -              if (list_empty(&need_pages))
 -                      break;
 -
 -              /* Unreserve everything again. */
 -              ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 -
 -              /* We tried too many times, just abort */
 -              if (!--tries) {
 -                      r = -EDEADLK;
 -                      DRM_ERROR("deadlock in %s\n", __func__);
 -                      goto error_free_pages;
 +              r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
 +              if (r) {
 +                      kvfree(e->user_pages);
 +                      e->user_pages = NULL;
 +                      return r;
                }
  
 -              /* Fill the page arrays for all userptrs. */
 -              list_for_each_entry(e, &need_pages, tv.head) {
 -                      struct ttm_tt *ttm = e->tv.bo->ttm;
 -
 -                      e->user_pages = kvmalloc_array(ttm->num_pages,
 -                                                       sizeof(struct page*),
 -                                                       GFP_KERNEL | __GFP_ZERO);
 -                      if (!e->user_pages) {
 -                              r = -ENOMEM;
 -                              DRM_ERROR("calloc failure in %s\n", __func__);
 -                              goto error_free_pages;
 -                      }
 -
 -                      r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
 -                      if (r) {
 -                              DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
 -                              kvfree(e->user_pages);
 -                              e->user_pages = NULL;
 -                              goto error_free_pages;
 +              for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
 +                      if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
 +                              userpage_invalidated = true;
 +                              break;
                        }
                }
 +              e->user_invalidated = userpage_invalidated;
 +      }
  
 -              /* And try again. */
 -              list_splice(&need_pages, &p->validated);
 +      r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
 +                                 &duplicates, true);
 +      if (unlikely(r != 0)) {
 +              if (r != -ERESTARTSYS)
 +                      DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
 +              goto out;
        }
  
        amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
  error_validate:
        if (r)
                ttm_eu_backoff_reservation(&p->ticket, &p->validated);
 -
 -error_free_pages:
 -
 -      amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
 -              if (!e->user_pages)
 -                      continue;
 -
 -              release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
 -              kvfree(e->user_pages);
 -      }
 -
 +out:
        return r;
  }
  
@@@ -1008,9 -1056,11 +1010,9 @@@ static int amdgpu_cs_ib_fill(struct amd
                j++;
        }
  
 -      /* UVD & VCE fw doesn't support user fences */
 +      /* MM engine doesn't support user fences */
        ring = to_amdgpu_ring(parser->entity->rq->sched);
 -      if (parser->job->uf_addr && (
 -          ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
 -          ring->funcs->type == AMDGPU_RING_TYPE_VCE))
 +      if (parser->job->uf_addr && ring->funcs->no_user_fence)
                return -EINVAL;
  
        return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
@@@ -1280,6 -1330,7 +1282,6 @@@ static int amdgpu_cs_submit(struct amdg
        struct amdgpu_bo_list_entry *e;
        struct amdgpu_job *job;
        uint64_t seq;
 -
        int r;
  
        job = p->job;
        if (r)
                goto error_unlock;
  
 -      /* No memory allocation is allowed while holding the mn lock */
 +      /* No memory allocation is allowed while holding the mn lock.
 +       * p->mn is hold until amdgpu_cs_submit is finished and fence is added
 +       * to BOs.
 +       */
        amdgpu_mn_lock(p->mn);
 +
 +      /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
 +       * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
 +       */
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
  
 -              if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
 -                      r = -ERESTARTSYS;
 -                      goto error_abort;
 -              }
 +              r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 +      }
 +      if (r) {
 +              r = -EAGAIN;
 +              goto error_abort;
        }
  
        job->owner = p->filp;
@@@ -1401,7 -1444,6 +1403,7 @@@ int amdgpu_cs_ioctl(struct drm_device *
  
  out:
        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 +
        return r;
  }
  
index d00fd5dd307a9a0ae8167fc5c8882433fbc7c4be,aa423887943f5057bcf0a99e5a0b145e5fed9528..66af78a6fa3b3d887389e9443e8b3368c0953def
   */
  #include <linux/power_supply.h>
  #include <linux/kthread.h>
+ #include <linux/module.h>
  #include <linux/console.h>
  #include <linux/slab.h>
- #include <drm/drmP.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_probe_helper.h>
  #include <drm/amdgpu_drm.h>
@@@ -97,28 -98,6 +98,28 @@@ static const char *amdgpu_asic_name[] 
        "LAST",
  };
  
 +/**
 + * DOC: pcie_replay_count
 + *
 + * The amdgpu driver provides a sysfs API for reporting the total number
 + * of PCIe replays (NAKs)
 + * The file pcie_replay_count is used for this and returns the total
 + * number of replays as a sum of the NAKs generated and NAKs received
 + */
 +
 +static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
 +              struct device_attribute *attr, char *buf)
 +{
 +      struct drm_device *ddev = dev_get_drvdata(dev);
 +      struct amdgpu_device *adev = ddev->dev_private;
 +      uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 +
 +      return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
 +}
 +
 +static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
 +              amdgpu_device_get_pcie_replay_count, NULL);
 +
  static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
  
  /**
@@@ -932,10 -911,8 +933,10 @@@ def_value
   * Validates certain module parameters and updates
   * the associated values used by the driver (all asics).
   */
 -static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
 +static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
  {
 +      int ret = 0;
 +
        if (amdgpu_sched_jobs < 4) {
                dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
                         amdgpu_sched_jobs);
                amdgpu_vram_page_split = 1024;
        }
  
 -      if (amdgpu_lockup_timeout == 0) {
 -              dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
 -              amdgpu_lockup_timeout = 10000;
 +      ret = amdgpu_device_get_job_timeout_settings(adev);
 +      if (ret) {
 +              dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
 +              return ret;
        }
  
        adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
 +
 +      return ret;
  }
  
  /**
@@@ -1532,26 -1506,12 +1533,26 @@@ static int amdgpu_device_ip_early_init(
                r = amdgpu_virt_request_full_gpu(adev, true);
                if (r)
                        return -EAGAIN;
 +
 +              /* query the reg access mode at the very beginning */
 +              amdgpu_virt_init_reg_access_mode(adev);
        }
  
        adev->pm.pp_feature = amdgpu_pp_feature_mask;
        if (amdgpu_sriov_vf(adev))
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
  
 +      /* Read BIOS */
 +      if (!amdgpu_get_bios(adev))
 +              return -EINVAL;
 +
 +      r = amdgpu_atombios_init(adev);
 +      if (r) {
 +              dev_err(adev->dev, "amdgpu_atombios_init failed\n");
 +              amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
 +              return r;
 +      }
 +
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
                        DRM_ERROR("disabled ip block: %d <%s>\n",
@@@ -1591,7 -1551,6 +1592,7 @@@ static int amdgpu_device_ip_hw_init_pha
                if (adev->ip_blocks[i].status.hw)
                        continue;
                if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
 +                  (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
                        r = adev->ip_blocks[i].version->funcs->hw_init(adev);
                        if (r) {
@@@ -1869,43 -1828,6 +1870,43 @@@ static int amdgpu_device_set_pg_state(s
        return 0;
  }
  
 +static int amdgpu_device_enable_mgpu_fan_boost(void)
 +{
 +      struct amdgpu_gpu_instance *gpu_ins;
 +      struct amdgpu_device *adev;
 +      int i, ret = 0;
 +
 +      mutex_lock(&mgpu_info.mutex);
 +
 +      /*
 +       * MGPU fan boost feature should be enabled
 +       * only when there are two or more dGPUs in
 +       * the system
 +       */
 +      if (mgpu_info.num_dgpu < 2)
 +              goto out;
 +
 +      for (i = 0; i < mgpu_info.num_dgpu; i++) {
 +              gpu_ins = &(mgpu_info.gpu_ins[i]);
 +              adev = gpu_ins->adev;
 +              if (!(adev->flags & AMD_IS_APU) &&
 +                  !gpu_ins->mgpu_fan_enabled &&
 +                  adev->powerplay.pp_funcs &&
 +                  adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
 +                      ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
 +                      if (ret)
 +                              break;
 +
 +                      gpu_ins->mgpu_fan_enabled = 1;
 +              }
 +      }
 +
 +out:
 +      mutex_unlock(&mgpu_info.mutex);
 +
 +      return ret;
 +}
 +
  /**
   * amdgpu_device_ip_late_init - run late init for hardware IPs
   *
@@@ -1939,15 -1861,11 +1940,15 @@@ static int amdgpu_device_ip_late_init(s
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
  
 -      queue_delayed_work(system_wq, &adev->late_init_work,
 -                         msecs_to_jiffies(AMDGPU_RESUME_MS));
 -
        amdgpu_device_fill_reset_magic(adev);
  
 +      r = amdgpu_device_enable_mgpu_fan_boost();
 +      if (r)
 +              DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
 +
 +      /* set to low pstate by default */
 +      amdgpu_xgmi_set_pstate(adev, 0);
 +
        return 0;
  }
  
@@@ -2046,20 -1964,65 +2047,20 @@@ static int amdgpu_device_ip_fini(struc
        return 0;
  }
  
 -static int amdgpu_device_enable_mgpu_fan_boost(void)
 -{
 -      struct amdgpu_gpu_instance *gpu_ins;
 -      struct amdgpu_device *adev;
 -      int i, ret = 0;
 -
 -      mutex_lock(&mgpu_info.mutex);
 -
 -      /*
 -       * MGPU fan boost feature should be enabled
 -       * only when there are two or more dGPUs in
 -       * the system
 -       */
 -      if (mgpu_info.num_dgpu < 2)
 -              goto out;
 -
 -      for (i = 0; i < mgpu_info.num_dgpu; i++) {
 -              gpu_ins = &(mgpu_info.gpu_ins[i]);
 -              adev = gpu_ins->adev;
 -              if (!(adev->flags & AMD_IS_APU) &&
 -                  !gpu_ins->mgpu_fan_enabled &&
 -                  adev->powerplay.pp_funcs &&
 -                  adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
 -                      ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
 -                      if (ret)
 -                              break;
 -
 -                      gpu_ins->mgpu_fan_enabled = 1;
 -              }
 -      }
 -
 -out:
 -      mutex_unlock(&mgpu_info.mutex);
 -
 -      return ret;
 -}
 -
  /**
 - * amdgpu_device_ip_late_init_func_handler - work handler for ib test
 + * amdgpu_device_delayed_init_work_handler - work handler for IB tests
   *
   * @work: work_struct.
   */
 -static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
 +static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
  {
        struct amdgpu_device *adev =
 -              container_of(work, struct amdgpu_device, late_init_work.work);
 +              container_of(work, struct amdgpu_device, delayed_init_work.work);
        int r;
  
        r = amdgpu_ib_ring_tests(adev);
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 -
 -      r = amdgpu_device_enable_mgpu_fan_boost();
 -      if (r)
 -              DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
 -
 -      /*set to low pstate by default */
 -      amdgpu_xgmi_set_pstate(adev, 0);
 -
  }
  
  static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@@ -2511,9 -2474,7 +2512,9 @@@ int amdgpu_device_init(struct amdgpu_de
        mutex_init(&adev->lock_reset);
        mutex_init(&adev->virt.dpm_mutex);
  
 -      amdgpu_device_check_arguments(adev);
 +      r = amdgpu_device_check_arguments(adev);
 +      if (r)
 +              return r;
  
        spin_lock_init(&adev->mmio_idx_lock);
        spin_lock_init(&adev->smc_idx_lock);
        INIT_LIST_HEAD(&adev->ring_lru_list);
        spin_lock_init(&adev->ring_lru_list_lock);
  
 -      INIT_DELAYED_WORK(&adev->late_init_work,
 -                        amdgpu_device_ip_late_init_func_handler);
 +      INIT_DELAYED_WORK(&adev->delayed_init_work,
 +                        amdgpu_device_delayed_init_work_handler);
        INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
                          amdgpu_device_delay_enable_gfx_off);
  
                goto fence_driver_init;
        }
  
 -      /* Read BIOS */
 -      if (!amdgpu_get_bios(adev)) {
 -              r = -EINVAL;
 -              goto failed;
 -      }
 -
 -      r = amdgpu_atombios_init(adev);
 -      if (r) {
 -              dev_err(adev->dev, "amdgpu_atombios_init failed\n");
 -              amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
 -              goto failed;
 -      }
 -
        /* detect if we are with an SRIOV vbios */
        amdgpu_device_detect_sriov_bios(adev);
  
@@@ -2699,10 -2673,6 +2700,10 @@@ fence_driver_init
        if (r)
                DRM_ERROR("registering pm debugfs failed (%d).\n", r);
  
 +      r = amdgpu_ucode_sysfs_init(adev);
 +      if (r)
 +              DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
 +
        r = amdgpu_debugfs_gem_init(adev);
        if (r)
                DRM_ERROR("registering gem debugfs failed (%d).\n", r);
        }
  
        /* must succeed. */
 -      amdgpu_ras_post_init(adev);
 +      amdgpu_ras_resume(adev);
 +
 +      queue_delayed_work(system_wq, &adev->delayed_init_work,
 +                         msecs_to_jiffies(AMDGPU_RESUME_MS));
 +
 +      r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
 +      if (r) {
 +              dev_err(adev->dev, "Could not create pcie_replay_count");
 +              return r;
 +      }
  
        return 0;
  
@@@ -2795,7 -2756,7 +2796,7 @@@ void amdgpu_device_fini(struct amdgpu_d
                adev->firmware.gpu_info_fw = NULL;
        }
        adev->accel_working = false;
 -      cancel_delayed_work_sync(&adev->late_init_work);
 +      cancel_delayed_work_sync(&adev->delayed_init_work);
        /* free i2c buses */
        if (!amdgpu_device_has_dc_support(adev))
                amdgpu_i2c_fini(adev);
        adev->rmmio = NULL;
        amdgpu_device_doorbell_fini(adev);
        amdgpu_debugfs_regs_cleanup(adev);
 +      device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
 +      amdgpu_ucode_sysfs_fini(adev);
  }
  
  
@@@ -2858,7 -2817,7 +2859,7 @@@ int amdgpu_device_suspend(struct drm_de
        if (fbcon)
                amdgpu_fbdev_set_suspend(adev, 1);
  
 -      cancel_delayed_work_sync(&adev->late_init_work);
 +      cancel_delayed_work_sync(&adev->delayed_init_work);
  
        if (!amdgpu_device_has_dc_support(adev)) {
                /* turn off display hw */
  
        amdgpu_amdkfd_suspend(adev);
  
 +      amdgpu_ras_suspend(adev);
 +
        r = amdgpu_device_ip_suspend_phase1(adev);
  
        /* evict vram memory */
@@@ -2978,9 -2935,6 +2979,9 @@@ int amdgpu_device_resume(struct drm_dev
        if (r)
                return r;
  
 +      queue_delayed_work(system_wq, &adev->delayed_init_work,
 +                         msecs_to_jiffies(AMDGPU_RESUME_MS));
 +
        if (!amdgpu_device_has_dc_support(adev)) {
                /* pin cursors */
                list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                return r;
  
        /* Make sure IB tests flushed */
 -      flush_delayed_work(&adev->late_init_work);
 +      flush_delayed_work(&adev->delayed_init_work);
  
        /* blat the mode back in */
        if (fbcon) {
  
        drm_kms_helper_poll_enable(dev);
  
 +      amdgpu_ras_resume(adev);
 +
        /*
         * Most of the connector probing functions try to acquire runtime pm
         * refs to ensure that the GPU is powered on when connector polling is
@@@ -3504,13 -3456,6 +3505,13 @@@ static int amdgpu_do_asic_reset(struct 
                                if (vram_lost)
                                        amdgpu_device_fill_reset_magic(tmp_adev);
  
 +                              r = amdgpu_device_ip_late_init(tmp_adev);
 +                              if (r)
 +                                      goto out;
 +
 +                              /* must succeed. */
 +                              amdgpu_ras_resume(tmp_adev);
 +
                                /* Update PSP FW topology after reset */
                                if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
                                        r = amdgpu_xgmi_update_topology(hive, tmp_adev);
@@@ -3595,8 -3540,6 +3596,8 @@@ int amdgpu_device_gpu_recover(struct am
  
        dev_info(adev->dev, "GPU reset begin!\n");
  
 +      cancel_delayed_work_sync(&adev->delayed_init_work);
 +
        hive = amdgpu_get_xgmi_hive(adev, false);
  
        /*
@@@ -3753,6 -3696,43 +3754,6 @@@ skip_hw_reset
        return r;
  }
  
 -static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
 -                                                enum pci_bus_speed *speed,
 -                                                enum pcie_link_width *width)
 -{
 -      struct pci_dev *pdev = adev->pdev;
 -      enum pci_bus_speed cur_speed;
 -      enum pcie_link_width cur_width;
 -      u32 ret = 1;
 -
 -      *speed = PCI_SPEED_UNKNOWN;
 -      *width = PCIE_LNK_WIDTH_UNKNOWN;
 -
 -      while (pdev) {
 -              cur_speed = pcie_get_speed_cap(pdev);
 -              cur_width = pcie_get_width_cap(pdev);
 -              ret = pcie_bandwidth_available(adev->pdev, NULL,
 -                                                     NULL, &cur_width);
 -              if (!ret)
 -                      cur_width = PCIE_LNK_WIDTH_RESRV;
 -
 -              if (cur_speed != PCI_SPEED_UNKNOWN) {
 -                      if (*speed == PCI_SPEED_UNKNOWN)
 -                              *speed = cur_speed;
 -                      else if (cur_speed < *speed)
 -                              *speed = cur_speed;
 -              }
 -
 -              if (cur_width != PCIE_LNK_WIDTH_UNKNOWN) {
 -                      if (*width == PCIE_LNK_WIDTH_UNKNOWN)
 -                              *width = cur_width;
 -                      else if (cur_width < *width)
 -                              *width = cur_width;
 -              }
 -              pdev = pci_upstream_bridge(pdev);
 -      }
 -}
 -
  /**
   * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
   *
@@@ -3786,8 -3766,8 +3787,8 @@@ static void amdgpu_device_get_pcie_info
        if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
                return;
  
 -      amdgpu_device_get_min_pci_speed_width(adev, &platform_speed_cap,
 -                                            &platform_link_width);
 +      pcie_bandwidth_available(adev->pdev, NULL,
 +                               &platform_speed_cap, &platform_link_width);
  
        if (adev->pm.pcie_gen_mask == 0) {
                /* asic caps */
index 30e6ad8a90bb9ae3d44ddee9e99ff68d8d9e6ef3,6a15f85086c11191d5daec16d93ccce676889f7a..535650967b1a596c46e119867d25f3b493b908c1
@@@ -23,7 -23,7 +23,7 @@@
   * Authors: Dave Airlie
   *          Alex Deucher
   */
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
  #include "amdgpu.h"
  #include "amdgpu_i2c.h"
  #include "amdgpu_display.h"
  #include <asm/div64.h>
  
+ #include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/drm_edid.h>
  #include <drm/drm_gem_framebuffer_helper.h>
  #include <drm/drm_fb_helper.h>
+ #include <drm/drm_vblank.h>
  
  static void amdgpu_display_flip_callback(struct dma_fence *f,
                                         struct dma_fence_cb *cb)
@@@ -631,6 -633,10 +633,6 @@@ int amdgpu_display_modeset_create_props
                                         amdgpu_dither_enum_list, sz);
  
        if (amdgpu_device_has_dc_support(adev)) {
 -              adev->mode_info.max_bpc_property =
 -                      drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
 -              if (!adev->mode_info.max_bpc_property)
 -                      return -ENOMEM;
                adev->mode_info.abm_level_property =
                        drm_property_create_range(adev->ddev, 0,
                                                "abm level", 0, 4);
index 4711cf1b5bd2fce54e2f5822c2285a72f317e3a8,0000000000000000000000000000000000000000..489041df1f45636da40966578799e5cecf39bcb5
mode 100644,000000..100644
--- /dev/null
@@@ -1,450 -1,0 +1,448 @@@
- #include <drm/drmP.h>
 +/*
 + * Copyright 2019 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * based on nouveau_prime.c
 + *
 + * Authors: Alex Deucher
 + */
 +
 +/**
 + * DOC: PRIME Buffer Sharing
 + *
 + * The following callback implementations are used for :ref:`sharing GEM buffer
 + * objects between different devices via PRIME <prime_buffer_sharing>`.
 + */
 +
 +#include "amdgpu.h"
 +#include "amdgpu_display.h"
 +#include "amdgpu_gem.h"
 +#include <drm/amdgpu_drm.h>
 +#include <linux/dma-buf.h>
 +#include <linux/dma-fence-array.h>
 +
 +/**
 + * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
 + * implementation
 + * @obj: GEM buffer object (BO)
 + *
 + * Returns:
 + * A scatter/gather table for the pinned pages of the BO's memory.
 + */
 +struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +      int npages = bo->tbo.num_pages;
 +
 +      return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
 +}
 +
 +/**
 + * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
 + * @obj: GEM BO
 + *
 + * Sets up an in-kernel virtual mapping of the BO's memory.
 + *
 + * Returns:
 + * The virtual address of the mapping or an error pointer.
 + */
 +void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +      int ret;
 +
 +      ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
 +                        &bo->dma_buf_vmap);
 +      if (ret)
 +              return ERR_PTR(ret);
 +
 +      return bo->dma_buf_vmap.virtual;
 +}
 +
 +/**
 + * amdgpu_gem_prime_vunmap - &dma_buf_ops.vunmap implementation
 + * @obj: GEM BO
 + * @vaddr: Virtual address (unused)
 + *
 + * Tears down the in-kernel virtual mapping of the BO's memory.
 + */
 +void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +
 +      ttm_bo_kunmap(&bo->dma_buf_vmap);
 +}
 +
 +/**
 + * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
 + * @obj: GEM BO
 + * @vma: Virtual memory area
 + *
 + * Sets up a userspace mapping of the BO's memory in the given
 + * virtual memory area.
 + *
 + * Returns:
 + * 0 on success or a negative error code on failure.
 + */
 +int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
 +                        struct vm_area_struct *vma)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 +      unsigned asize = amdgpu_bo_size(bo);
 +      int ret;
 +
 +      if (!vma->vm_file)
 +              return -ENODEV;
 +
 +      if (adev == NULL)
 +              return -ENODEV;
 +
 +      /* Check for valid size. */
 +      if (asize < vma->vm_end - vma->vm_start)
 +              return -EINVAL;
 +
 +      if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
 +          (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 +              return -EPERM;
 +      }
 +      vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
 +
 +      /* prime mmap does not need to check access, so allow here */
 +      ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
 +      if (ret)
 +              return ret;
 +
 +      ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
 +      drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
 +
 +      return ret;
 +}
 +
 +static int
 +__reservation_object_make_exclusive(struct reservation_object *obj)
 +{
 +      struct dma_fence **fences;
 +      unsigned int count;
 +      int r;
 +
 +      if (!reservation_object_get_list(obj)) /* no shared fences to convert */
 +              return 0;
 +
 +      r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
 +      if (r)
 +              return r;
 +
 +      if (count == 0) {
 +              /* Now that was unexpected. */
 +      } else if (count == 1) {
 +              reservation_object_add_excl_fence(obj, fences[0]);
 +              dma_fence_put(fences[0]);
 +              kfree(fences);
 +      } else {
 +              struct dma_fence_array *array;
 +
 +              array = dma_fence_array_create(count, fences,
 +                                             dma_fence_context_alloc(1), 0,
 +                                             false);
 +              if (!array)
 +                      goto err_fences_put;
 +
 +              reservation_object_add_excl_fence(obj, &array->base);
 +              dma_fence_put(&array->base);
 +      }
 +
 +      return 0;
 +
 +err_fences_put:
 +      while (count--)
 +              dma_fence_put(fences[count]);
 +      kfree(fences);
 +      return -ENOMEM;
 +}
 +
 +/**
 + * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation
 + * @dma_buf: Shared DMA buffer
 + * @attach: DMA-buf attachment
 + *
 + * Makes sure that the shared DMA buffer can be accessed by the target device.
 + * For now, simply pins it to the GTT domain, where it should be accessible by
 + * all DMA devices.
 + *
 + * Returns:
 + * 0 on success or a negative error code on failure.
 + */
 +static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
 +                                   struct dma_buf_attachment *attach)
 +{
 +      struct drm_gem_object *obj = dma_buf->priv;
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 +      long r;
 +
 +      r = drm_gem_map_attach(dma_buf, attach);
 +      if (r)
 +              return r;
 +
 +      r = amdgpu_bo_reserve(bo, false);
 +      if (unlikely(r != 0))
 +              goto error_detach;
 +
 +
 +      if (attach->dev->driver != adev->dev->driver) {
 +              /*
 +               * We only create shared fences for internal use, but importers
 +               * of the dmabuf rely on exclusive fences for implicitly
 +               * tracking write hazards. As any of the current fences may
 +               * correspond to a write, we need to convert all existing
 +               * fences on the reservation object into a single exclusive
 +               * fence.
 +               */
 +              r = __reservation_object_make_exclusive(bo->tbo.resv);
 +              if (r)
 +                      goto error_unreserve;
 +      }
 +
 +      /* pin buffer into GTT */
 +      r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
 +      if (r)
 +              goto error_unreserve;
 +
 +      if (attach->dev->driver != adev->dev->driver)
 +              bo->prime_shared_count++;
 +
 +error_unreserve:
 +      amdgpu_bo_unreserve(bo);
 +
 +error_detach:
 +      if (r)
 +              drm_gem_map_detach(dma_buf, attach);
 +      return r;
 +}
 +
 +/**
 + * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation
 + * @dma_buf: Shared DMA buffer
 + * @attach: DMA-buf attachment
 + *
 + * This is called when a shared DMA buffer no longer needs to be accessible by
 + * another device. For now, simply unpins the buffer from GTT.
 + */
 +static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf,
 +                                    struct dma_buf_attachment *attach)
 +{
 +      struct drm_gem_object *obj = dma_buf->priv;
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 +      int ret = 0;
 +
 +      ret = amdgpu_bo_reserve(bo, true);
 +      if (unlikely(ret != 0))
 +              goto error;
 +
 +      amdgpu_bo_unpin(bo);
 +      if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
 +              bo->prime_shared_count--;
 +      amdgpu_bo_unreserve(bo);
 +
 +error:
 +      drm_gem_map_detach(dma_buf, attach);
 +}
 +
 +/**
 + * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
 + * @obj: GEM BO
 + *
 + * Returns:
 + * The BO's reservation object.
 + */
 +struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 +
 +      return bo->tbo.resv;
 +}
 +
 +/**
 + * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
 + * @dma_buf: Shared DMA buffer
 + * @direction: Direction of DMA transfer
 + *
 + * This is called before CPU access to the shared DMA buffer's memory. If it's
 + * a read access, the buffer is moved to the GTT domain if possible, for optimal
 + * CPU read performance.
 + *
 + * Returns:
 + * 0 on success or a negative error code on failure.
 + */
 +static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 +                                         enum dma_data_direction direction)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
 +      struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 +      struct ttm_operation_ctx ctx = { true, false };
 +      u32 domain = amdgpu_display_supported_domains(adev);
 +      int ret;
 +      bool reads = (direction == DMA_BIDIRECTIONAL ||
 +                    direction == DMA_FROM_DEVICE);
 +
 +      if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
 +              return 0;
 +
 +      /* move to gtt */
 +      ret = amdgpu_bo_reserve(bo, false);
 +      if (unlikely(ret != 0))
 +              return ret;
 +
 +      if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
 +              amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 +              ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 +      }
 +
 +      amdgpu_bo_unreserve(bo);
 +      return ret;
 +}
 +
 +const struct dma_buf_ops amdgpu_dmabuf_ops = {
 +      .attach = amdgpu_dma_buf_map_attach,
 +      .detach = amdgpu_dma_buf_map_detach,
 +      .map_dma_buf = drm_gem_map_dma_buf,
 +      .unmap_dma_buf = drm_gem_unmap_dma_buf,
 +      .release = drm_gem_dmabuf_release,
 +      .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
 +      .mmap = drm_gem_dmabuf_mmap,
 +      .vmap = drm_gem_dmabuf_vmap,
 +      .vunmap = drm_gem_dmabuf_vunmap,
 +};
 +
 +/**
 + * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
 + * @dev: DRM device
 + * @gobj: GEM BO
 + * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
 + *
 + * The main work is done by the &drm_gem_prime_export helper, which in turn
 + * uses &amdgpu_gem_prime_res_obj.
 + *
 + * Returns:
 + * Shared DMA buffer representing the GEM BO from the given device.
 + */
 +struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
 +                                      struct drm_gem_object *gobj,
 +                                      int flags)
 +{
 +      struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 +      struct dma_buf *buf;
 +
 +      if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
 +          bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 +              return ERR_PTR(-EPERM);
 +
 +      buf = drm_gem_prime_export(dev, gobj, flags);
 +      if (!IS_ERR(buf)) {
 +              buf->file->f_mapping = dev->anon_inode->i_mapping;
 +              buf->ops = &amdgpu_dmabuf_ops;
 +      }
 +
 +      return buf;
 +}
 +
 +/**
 + * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table
 + * implementation
 + * @dev: DRM device
 + * @attach: DMA-buf attachment
 + * @sg: Scatter/gather table
 + *
 + * Imports shared DMA buffer memory exported by another device.
 + *
 + * Returns:
 + * A new GEM BO of the given DRM device, representing the memory
 + * described by the given DMA-buf attachment and scatter/gather table.
 + */
 +struct drm_gem_object *
 +amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 +                               struct dma_buf_attachment *attach,
 +                               struct sg_table *sg)
 +{
 +      struct reservation_object *resv = attach->dmabuf->resv;
 +      struct amdgpu_device *adev = dev->dev_private;
 +      struct amdgpu_bo *bo;
 +      struct amdgpu_bo_param bp;
 +      int ret;
 +
 +      memset(&bp, 0, sizeof(bp));
 +      bp.size = attach->dmabuf->size;
 +      bp.byte_align = PAGE_SIZE;
 +      bp.domain = AMDGPU_GEM_DOMAIN_CPU;
 +      bp.flags = 0;
 +      bp.type = ttm_bo_type_sg;
 +      bp.resv = resv;
 +      ww_mutex_lock(&resv->lock, NULL);
 +      ret = amdgpu_bo_create(adev, &bp, &bo);
 +      if (ret)
 +              goto error;
 +
 +      bo->tbo.sg = sg;
 +      bo->tbo.ttm->sg = sg;
 +      bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 +      bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
 +      if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
 +              bo->prime_shared_count = 1;
 +
 +      ww_mutex_unlock(&resv->lock);
 +      return &bo->gem_base;
 +
 +error:
 +      ww_mutex_unlock(&resv->lock);
 +      return ERR_PTR(ret);
 +}
 +
 +/**
 + * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
 + * @dev: DRM device
 + * @dma_buf: Shared DMA buffer
 + *
 + * The main work is done by the &drm_gem_prime_import helper, which in turn
 + * uses &amdgpu_gem_prime_import_sg_table.
 + *
 + * Returns:
 + * GEM BO representing the shared DMA buffer for the given device.
 + */
 +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
 +                                          struct dma_buf *dma_buf)
 +{
 +      struct drm_gem_object *obj;
 +
 +      if (dma_buf->ops == &amdgpu_dmabuf_ops) {
 +              obj = dma_buf->priv;
 +              if (obj->dev == dev) {
 +                      /*
 +                       * Importing dmabuf exported from out own gem increases
 +                       * refcount on gem itself instead of f_count of dmabuf.
 +                       */
 +                      drm_gem_object_get(obj);
 +                      return obj;
 +              }
 +      }
 +
 +      return drm_gem_prime_import(dev, dma_buf);
 +}
index 78706dfa753ab7ad1202ebcb7571bba52e107c06,2e2e9508c3390e3a22368359825fe10a26dea19a..0a577a3890244694eeb6e91604a540c398c478df
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include <drm/drmP.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_drv.h>
  #include <drm/drm_gem.h>
+ #include <drm/drm_vblank.h>
  #include "amdgpu_drv.h"
  
  #include <drm/drm_pciids.h>
  #include <linux/console.h>
  #include <linux/module.h>
+ #include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include <linux/vga_switcheroo.h>
  #include <drm/drm_probe_helper.h>
  
  #include "amdgpu.h"
  #include "amdgpu_irq.h"
 -#include "amdgpu_gem.h"
 +#include "amdgpu_dma_buf.h"
  
  #include "amdgpu_amdkfd.h"
  
@@@ -81,8 -83,6 +83,8 @@@
  #define KMS_DRIVER_MINOR      32
  #define KMS_DRIVER_PATCHLEVEL 0
  
 +#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH        256
 +
  int amdgpu_vram_limit = 0;
  int amdgpu_vis_vram_limit = 0;
  int amdgpu_gart_size = -1; /* auto */
@@@ -95,7 -95,7 +97,7 @@@ int amdgpu_disp_priority = 0
  int amdgpu_hw_i2c = 0;
  int amdgpu_pcie_gen2 = -1;
  int amdgpu_msi = -1;
 -int amdgpu_lockup_timeout = 10000;
 +char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH];
  int amdgpu_dpm = -1;
  int amdgpu_fw_load_type = -1;
  int amdgpu_aspm = -1;
@@@ -229,21 -229,12 +231,21 @@@ MODULE_PARM_DESC(msi, "MSI support (1 
  module_param_named(msi, amdgpu_msi, int, 0444);
  
  /**
 - * DOC: lockup_timeout (int)
 - * Set GPU scheduler timeout value in ms. Value 0 is invalidated, will be adjusted to 10000.
 - * Negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET). The default is 10000.
 - */
 -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
 -module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
 + * DOC: lockup_timeout (string)
 + * Set GPU scheduler timeout value in ms.
 + *
 + * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
 + * multiple values specified. 0 and negative values are invalidated. They will be adjusted
 + * to default timeout.
 + *  - With one value specified, the setting will apply to all non-compute jobs.
 + *  - With multiple values specified, the first one will be for GFX. The second one is for Compute.
 + *    And the third and fourth ones are for SDMA and Video.
 + * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
 + * jobs is 10000. And there is no timeout enforced on compute jobs.
 + */
 +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and no timeout for compute jobs), "
 +              "format is [Non-Compute] or [GFX,Compute,SDMA,Video]");
 +module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
  
  /**
   * DOC: dpm (int)
@@@ -666,16 -657,6 +668,16 @@@ MODULE_PARM_DESC(noretry
  int halt_if_hws_hang;
  module_param(halt_if_hws_hang, int, 0644);
  MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
 +
 +/**
 + * DOC: hws_gws_support(bool)
 + * Whether HWS support gws barriers. Default value: false (not supported)
 + * This will be replaced with a MEC firmware version check once firmware
 + * is ready
 + */
 +bool hws_gws_support;
 +module_param(hws_gws_support, bool, 0444);
 +MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
  #endif
  
  /**
@@@ -1237,62 -1218,6 +1239,62 @@@ int amdgpu_file_to_fpriv(struct file *f
        return 0;
  }
  
 +int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
 +{
 +      char *input = amdgpu_lockup_timeout;
 +      char *timeout_setting = NULL;
 +      int index = 0;
 +      long timeout;
 +      int ret = 0;
 +
 +      /*
 +       * By default timeout for non compute jobs is 10000.
 +       * And there is no timeout enforced on compute jobs.
 +       */
 +      adev->gfx_timeout = adev->sdma_timeout = adev->video_timeout = 10000;
 +      adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
 +
 +      if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
 +              while ((timeout_setting = strsep(&input, ",")) &&
 +                              strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) {
 +                      ret = kstrtol(timeout_setting, 0, &timeout);
 +                      if (ret)
 +                              return ret;
 +
 +                      /* Invalidate 0 and negative values */
 +                      if (timeout <= 0) {
 +                              index++;
 +                              continue;
 +                      }
 +
 +                      switch (index++) {
 +                      case 0:
 +                              adev->gfx_timeout = timeout;
 +                              break;
 +                      case 1:
 +                              adev->compute_timeout = timeout;
 +                              break;
 +                      case 2:
 +                              adev->sdma_timeout = timeout;
 +                              break;
 +                      case 3:
 +                              adev->video_timeout = timeout;
 +                              break;
 +                      default:
 +                              break;
 +                      }
 +              }
 +              /*
 +               * There is only one value specified and
 +               * it should apply to all non-compute jobs.
 +               */
 +              if (index == 1)
 +                      adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
 +      }
 +
 +      return ret;
 +}
 +
  static bool
  amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
                                 bool in_vblank_irq, int *vpos, int *hpos,
index 3a483f7e89c75eb5458a209ea2eddb541b00154c,2d0d1c43c7866dc7bdb25e0bfc225706263e1148..df49fa4bbf61e9a19336a646ae6691dc53068082
@@@ -34,7 -34,9 +34,9 @@@
  #include <linux/kref.h>
  #include <linux/slab.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  
@@@ -427,13 -429,9 +429,13 @@@ int amdgpu_fence_driver_start_ring(stru
  int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
                                  unsigned num_hw_submission)
  {
 +      struct amdgpu_device *adev = ring->adev;
        long timeout;
        int r;
  
 +      if (!adev)
 +              return -EINVAL;
 +
        /* Check that num_hw_submission is a power of two */
        if ((num_hw_submission & (num_hw_submission - 1)) != 0)
                return -EINVAL;
  
        /* No need to setup the GPU scheduler for KIQ ring */
        if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
 -              /* for non-sriov case, no timeout enforce on compute ring */
 -              if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
 -                              && !amdgpu_sriov_vf(ring->adev))
 -                      timeout = MAX_SCHEDULE_TIMEOUT;
 -              else
 -                      timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
 +              switch (ring->funcs->type) {
 +              case AMDGPU_RING_TYPE_GFX:
 +                      timeout = adev->gfx_timeout;
 +                      break;
 +              case AMDGPU_RING_TYPE_COMPUTE:
 +                      /*
 +                       * For non-sriov case, no timeout enforce
 +                       * on compute ring by default. Unless user
 +                       * specifies a timeout for compute ring.
 +                       *
 +                       * For sriov case, always use the timeout
 +                       * as gfx ring
 +                       */
 +                      if (!amdgpu_sriov_vf(ring->adev))
 +                              timeout = adev->compute_timeout;
 +                      else
 +                              timeout = adev->gfx_timeout;
 +                      break;
 +              case AMDGPU_RING_TYPE_SDMA:
 +                      timeout = adev->sdma_timeout;
 +                      break;
 +              default:
 +                      timeout = adev->video_timeout;
 +                      break;
 +              }
  
                r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
                                   num_hw_submission, amdgpu_job_hang_limit,
index d513a5ad03dd3121ec88f8b57f61060b11cd14e8,a806d403d29a01d377a36500883190c729380e97..37b526c6f494856dcd7ed56a8bdad535c76fc88e
   *          Jerome Glisse
   */
  #include <linux/ktime.h>
+ #include <linux/module.h>
  #include <linux/pagemap.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include <drm/amdgpu_drm.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_display.h"
  #include "amdgpu_xgmi.h"
@@@ -171,7 -175,7 +175,7 @@@ void amdgpu_gem_object_close(struct drm
  
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
  
 -      r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
 +      r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
@@@ -330,24 -334,26 +334,24 @@@ int amdgpu_gem_userptr_ioctl(struct drm
  
                r = amdgpu_bo_reserve(bo, true);
                if (r)
 -                      goto free_pages;
 +                      goto user_pages_done;
  
                amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
                r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
                amdgpu_bo_unreserve(bo);
                if (r)
 -                      goto free_pages;
 +                      goto user_pages_done;
        }
  
        r = drm_gem_handle_create(filp, gobj, &handle);
 -      /* drop reference from allocate - handle holds it now */
 -      drm_gem_object_put_unlocked(gobj);
        if (r)
 -              return r;
 +              goto user_pages_done;
  
        args->handle = handle;
 -      return 0;
  
 -free_pages:
 -      release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
 +user_pages_done:
 +      if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
 +              amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
  
  release_object:
        drm_gem_object_put_unlocked(gobj);
@@@ -608,7 -614,7 +612,7 @@@ int amdgpu_gem_va_ioctl(struct drm_devi
  
        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
  
 -      r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 +      r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
        if (r)
                goto error_unref;
  
index 7ab1241bd9e5638d8226ae1c37365f1803124b4c,7e9eafce8a355fe8bba698a878349d7dbbec8a2c..9d76e0923a5a3a4d705a0905b63036344b3f3bd7
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/kthread.h>
  #include <linux/wait.h>
  #include <linux/sched.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_trace.h"
  
@@@ -51,8 -51,6 +51,8 @@@ static void amdgpu_job_timedout(struct 
  
        if (amdgpu_device_should_recover_gpu(ring->adev))
                amdgpu_device_gpu_recover(ring->adev, job);
 +      else
 +              drm_sched_suspend_timeout(&ring->sched);
  }
  
  int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
index 0f7cc98961d532825d3e0ec4b9e26deaadd211bf,ad7c4bef5797c4157a22b20d136cdcca93b558f4..a70e5a32749aca162b37eec236f2fea5563f5d50
@@@ -25,8 -25,9 +25,9 @@@
   *          Alex Deucher
   *          Jerome Glisse
   */
- #include <drm/drmP.h>
  #include "amdgpu.h"
+ #include <drm/drm_debugfs.h>
  #include <drm/amdgpu_drm.h>
  #include "amdgpu_sched.h"
  #include "amdgpu_uvd.h"
@@@ -35,6 -36,8 +36,8 @@@
  
  #include <linux/vga_switcheroo.h>
  #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ #include <linux/pci.h>
  #include <linux/pm_runtime.h>
  #include "amdgpu_amdkfd.h"
  #include "amdgpu_gem.h"
@@@ -590,10 -593,13 +593,10 @@@ static int amdgpu_info_ioctl(struct drm
                struct drm_amdgpu_info_gds gds_info;
  
                memset(&gds_info, 0, sizeof(gds_info));
 -              gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size;
 -              gds_info.compute_partition_size = adev->gds.mem.cs_partition_size;
 -              gds_info.gds_total_size = adev->gds.mem.total_size;
 -              gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size;
 -              gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size;
 -              gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size;
 -              gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size;
 +              gds_info.compute_partition_size = adev->gds.gds_size;
 +              gds_info.gds_total_size = adev->gds.gds_size;
 +              gds_info.gws_per_compute_partition = adev->gds.gws_size;
 +              gds_info.oa_per_compute_partition = adev->gds.oa_size;
                return copy_to_user(out, &gds_info,
                                    min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
        }
@@@ -974,7 -980,7 +977,7 @@@ int amdgpu_driver_open_kms(struct drm_d
        int r, pasid;
  
        /* Ensure IB tests are run on ring */
 -      flush_delayed_work(&adev->late_init_work);
 +      flush_delayed_work(&adev->delayed_init_work);
  
        file_priv->driver_priv = NULL;
  
index 41ccee49a2244a0db0d7d114dbffcb305e438742,19f8909e9647fd91962c55e69978b34b0d266299..4ff4cf5988eaa396a1a3fb34c8f4afd01c2e1ffa
@@@ -45,9 -45,9 +45,9 @@@
  
  #include <linux/firmware.h>
  #include <linux/module.h>
 -#include <linux/mmu_notifier.h>
 +#include <linux/hmm.h>
  #include <linux/interval_tree.h>
- #include <drm/drmP.h>
  #include <drm/drm.h>
  
  #include "amdgpu.h"
   *
   * @adev: amdgpu device pointer
   * @mm: process address space
 - * @mn: MMU notifier structure
   * @type: type of MMU notifier
   * @work: destruction work item
   * @node: hash table node to find structure by adev and mn
   * @lock: rw semaphore protecting the notifier nodes
   * @objects: interval tree containing amdgpu_mn_nodes
 - * @read_lock: mutex for recursive locking of @lock
 - * @recursion: depth of recursion
 + * @mirror: HMM mirror function support
   *
   * Data for each amdgpu device and process address space.
   */
@@@ -71,6 -73,7 +71,6 @@@ struct amdgpu_mn 
        /* constant after initialisation */
        struct amdgpu_device    *adev;
        struct mm_struct        *mm;
 -      struct mmu_notifier     mn;
        enum amdgpu_mn_type     type;
  
        /* only used on destruction */
@@@ -82,9 -85,8 +82,9 @@@
        /* objects protected by lock */
        struct rw_semaphore     lock;
        struct rb_root_cached   objects;
 -      struct mutex            read_lock;
 -      atomic_t                recursion;
 +
 +      /* HMM mirror */
 +      struct hmm_mirror       mirror;
  };
  
  /**
@@@ -101,7 -103,7 +101,7 @@@ struct amdgpu_mn_node 
  };
  
  /**
 - * amdgpu_mn_destroy - destroy the MMU notifier
 + * amdgpu_mn_destroy - destroy the HMM mirror
   *
   * @work: previously sheduled work item
   *
@@@ -127,26 -129,28 +127,26 @@@ static void amdgpu_mn_destroy(struct wo
        }
        up_write(&amn->lock);
        mutex_unlock(&adev->mn_lock);
 -      mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
 +
 +      hmm_mirror_unregister(&amn->mirror);
        kfree(amn);
  }
  
  /**
 - * amdgpu_mn_release - callback to notify about mm destruction
 + * amdgpu_hmm_mirror_release - callback to notify about mm destruction
   *
 - * @mn: our notifier
 - * @mm: the mm this callback is about
 + * @mirror: the HMM mirror (mm) this callback is about
   *
 - * Shedule a work item to lazy destroy our notifier.
 + * Shedule a work item to lazy destroy HMM mirror.
   */
 -static void amdgpu_mn_release(struct mmu_notifier *mn,
 -                            struct mm_struct *mm)
 +static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror)
  {
 -      struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
 +      struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
  
        INIT_WORK(&amn->work, amdgpu_mn_destroy);
        schedule_work(&amn->work);
  }
  
 -
  /**
   * amdgpu_mn_lock - take the write side lock for this notifier
   *
@@@ -177,10 -181,14 +177,10 @@@ void amdgpu_mn_unlock(struct amdgpu_mn 
  static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
  {
        if (blockable)
 -              mutex_lock(&amn->read_lock);
 -      else if (!mutex_trylock(&amn->read_lock))
 +              down_read(&amn->lock);
 +      else if (!down_read_trylock(&amn->lock))
                return -EAGAIN;
  
 -      if (atomic_inc_return(&amn->recursion) == 1)
 -              down_read_non_owner(&amn->lock);
 -      mutex_unlock(&amn->read_lock);
 -
        return 0;
  }
  
   */
  static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
  {
 -      if (atomic_dec_return(&amn->recursion) == 0)
 -              up_read_non_owner(&amn->lock);
 +      up_read(&amn->lock);
  }
  
  /**
@@@ -220,132 -229,149 +220,132 @@@ static void amdgpu_mn_invalidate_node(s
                        true, false, MAX_SCHEDULE_TIMEOUT);
                if (r <= 0)
                        DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 -
 -              amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
        }
  }
  
  /**
 - * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
 + * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change
   *
 - * @mn: our notifier
 - * @range: mmu notifier context
 + * @mirror: the hmm_mirror (mm) is about to update
 + * @update: the update start, end address
   *
   * Block for operations on BOs to finish and mark pages as accessed and
   * potentially dirty.
   */
 -static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
 -                      const struct mmu_notifier_range *range)
 +static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
 +                      const struct hmm_update *update)
  {
 -      struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
 +      struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
 +      unsigned long start = update->start;
 +      unsigned long end = update->end;
 +      bool blockable = update->blockable;
        struct interval_tree_node *it;
 -      unsigned long end;
  
        /* notification is exclusive, but interval is inclusive */
 -      end = range->end - 1;
 +      end -= 1;
  
        /* TODO we should be able to split locking for interval tree and
         * amdgpu_mn_invalidate_node
         */
 -      if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
 +      if (amdgpu_mn_read_lock(amn, blockable))
                return -EAGAIN;
  
 -      it = interval_tree_iter_first(&amn->objects, range->start, end);
 +      it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
  
 -              if (!mmu_notifier_range_blockable(range)) {
 +              if (!blockable) {
                        amdgpu_mn_read_unlock(amn);
                        return -EAGAIN;
                }
  
                node = container_of(it, struct amdgpu_mn_node, it);
 -              it = interval_tree_iter_next(it, range->start, end);
 +              it = interval_tree_iter_next(it, start, end);
  
 -              amdgpu_mn_invalidate_node(node, range->start, end);
 +              amdgpu_mn_invalidate_node(node, start, end);
        }
  
 +      amdgpu_mn_read_unlock(amn);
 +
        return 0;
  }
  
  /**
 - * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change
 + * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
   *
 - * @mn: our notifier
 - * @mm: the mm this callback is about
 - * @start: start of updated range
 - * @end: end of updated range
 + * @mirror: the hmm_mirror (mm) is about to update
 + * @update: the update start, end address
   *
   * We temporarily evict all BOs between start and end. This
   * necessitates evicting all user-mode queues of the process. The BOs
   * are restorted in amdgpu_mn_invalidate_range_end_hsa.
   */
 -static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
 -                      const struct mmu_notifier_range *range)
 +static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
 +                      const struct hmm_update *update)
  {
 -      struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
 +      struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
 +      unsigned long start = update->start;
 +      unsigned long end = update->end;
 +      bool blockable = update->blockable;
        struct interval_tree_node *it;
 -      unsigned long end;
  
        /* notification is exclusive, but interval is inclusive */
 -      end = range->end - 1;
 +      end -= 1;
  
 -      if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range)))
 +      if (amdgpu_mn_read_lock(amn, blockable))
                return -EAGAIN;
  
 -      it = interval_tree_iter_first(&amn->objects, range->start, end);
 +      it = interval_tree_iter_first(&amn->objects, start, end);
        while (it) {
                struct amdgpu_mn_node *node;
                struct amdgpu_bo *bo;
  
 -              if (!mmu_notifier_range_blockable(range)) {
 +              if (!blockable) {
                        amdgpu_mn_read_unlock(amn);
                        return -EAGAIN;
                }
  
                node = container_of(it, struct amdgpu_mn_node, it);
 -              it = interval_tree_iter_next(it, range->start, end);
 +              it = interval_tree_iter_next(it, start, end);
  
                list_for_each_entry(bo, &node->bos, mn_list) {
                        struct kgd_mem *mem = bo->kfd_bo;
  
                        if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
 -                                                       range->start,
 -                                                       end))
 -                              amdgpu_amdkfd_evict_userptr(mem, range->mm);
 +                                                       start, end))
 +                              amdgpu_amdkfd_evict_userptr(mem, amn->mm);
                }
        }
  
 +      amdgpu_mn_read_unlock(amn);
 +
        return 0;
  }
  
 -/**
 - * amdgpu_mn_invalidate_range_end - callback to notify about mm change
 - *
 - * @mn: our notifier
 - * @mm: the mm this callback is about
 - * @start: start of updated range
 - * @end: end of updated range
 - *
 - * Release the lock again to allow new command submissions.
 +/* Low bits of any reasonable mm pointer will be unused due to struct
 + * alignment. Use these bits to make a unique key from the mm pointer
 + * and notifier type.
   */
 -static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
 -                      const struct mmu_notifier_range *range)
 -{
 -      struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
 -
 -      amdgpu_mn_read_unlock(amn);
 -}
 +#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
  
 -static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
 +static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
        [AMDGPU_MN_TYPE_GFX] = {
 -              .release = amdgpu_mn_release,
 -              .invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx,
 -              .invalidate_range_end = amdgpu_mn_invalidate_range_end,
 +              .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
 +              .release = amdgpu_hmm_mirror_release
        },
        [AMDGPU_MN_TYPE_HSA] = {
 -              .release = amdgpu_mn_release,
 -              .invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa,
 -              .invalidate_range_end = amdgpu_mn_invalidate_range_end,
 +              .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa,
 +              .release = amdgpu_hmm_mirror_release
        },
  };
  
 -/* Low bits of any reasonable mm pointer will be unused due to struct
 - * alignment. Use these bits to make a unique key from the mm pointer
 - * and notifier type.
 - */
 -#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
 -
  /**
 - * amdgpu_mn_get - create notifier context
 + * amdgpu_mn_get - create HMM mirror context
   *
   * @adev: amdgpu device pointer
   * @type: type of MMU notifier context
   *
 - * Creates a notifier context for current->mm.
 + * Creates a HMM mirror context for current->mm.
   */
  struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
                                enum amdgpu_mn_type type)
        amn->mm = mm;
        init_rwsem(&amn->lock);
        amn->type = type;
 -      amn->mn.ops = &amdgpu_mn_ops[type];
        amn->objects = RB_ROOT_CACHED;
 -      mutex_init(&amn->read_lock);
 -      atomic_set(&amn->recursion, 0);
  
 -      r = __mmu_notifier_register(&amn->mn, mm);
 +      amn->mirror.ops = &amdgpu_hmm_mirror_ops[type];
 +      r = hmm_mirror_register(&amn->mirror, mm);
        if (r)
                goto free_amn;
  
@@@ -404,7 -432,7 +404,7 @@@ free_amn
   * @bo: amdgpu buffer object
   * @addr: userptr addr we should monitor
   *
 - * Registers an MMU notifier for the given BO at the specified address.
 + * Registers an HMM mirror for the given BO at the specified address.
   * Returns 0 on success, -ERRNO if anything goes wrong.
   */
  int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
  }
  
  /**
 - * amdgpu_mn_unregister - unregister a BO for notifier updates
 + * amdgpu_mn_unregister - unregister a BO for HMM mirror updates
   *
   * @bo: amdgpu buffer object
   *
 - * Remove any registration of MMU notifier updates from the buffer object.
 + * Remove any registration of HMM mirror updates from the buffer object.
   */
  void amdgpu_mn_unregister(struct amdgpu_bo *bo)
  {
        mutex_unlock(&adev->mn_lock);
  }
  
 +/* flags used by HMM internal, not related to CPU/GPU PTE flags */
 +static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
 +              (1 << 0), /* HMM_PFN_VALID */
 +              (1 << 1), /* HMM_PFN_WRITE */
 +              0 /* HMM_PFN_DEVICE_PRIVATE */
 +};
 +
 +static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
 +              0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
 +              0, /* HMM_PFN_NONE */
 +              0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
 +};
 +
 +void amdgpu_hmm_init_range(struct hmm_range *range)
 +{
 +      if (range) {
 +              range->flags = hmm_range_flags;
 +              range->values = hmm_range_values;
 +              range->pfn_shift = PAGE_SHIFT;
 +              range->pfns = NULL;
 +              INIT_LIST_HEAD(&range->list);
 +      }
 +}
index a73e1903d29b143a7d96c12d9f0ca698e3332492,15b42f80ada2040a3147a2c879cc2f32336b2cff..c95e6f926a1509b2b368806b28cbc238a9cac38a
@@@ -22,7 -22,9 +22,9 @@@
   * Authors: RafaÅ‚ MiÅ‚ecki <[email protected]>
   *          Alex Deucher <[email protected]>
   */
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
  #include "amdgpu.h"
  #include "amdgpu_drv.h"
  #include "amdgpu_pm.h"
@@@ -31,6 -33,7 +33,7 @@@
  #include "amdgpu_smu.h"
  #include "atom.h"
  #include <linux/power_supply.h>
+ #include <linux/pci.h>
  #include <linux/hwmon.h>
  #include <linux/hwmon-sysfs.h>
  #include <linux/nospec.h>
@@@ -67,15 -70,6 +70,15 @@@ static const struct cg_flag_name clocks
        {0, NULL},
  };
  
 +static const struct hwmon_temp_label {
 +      enum PP_HWMON_TEMP channel;
 +      const char *label;
 +} temp_label[] = {
 +      {PP_TEMP_EDGE, "edge"},
 +      {PP_TEMP_JUNCTION, "junction"},
 +      {PP_TEMP_MEM, "mem"},
 +};
 +
  void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
  {
        if (adev->pm.dpm_enabled) {
@@@ -767,11 -761,7 +770,11 @@@ static ssize_t amdgpu_set_ppfeature_sta
  
        pr_debug("featuremask = 0x%llx\n", featuremask);
  
 -      if (adev->powerplay.pp_funcs->set_ppfeature_status) {
 +      if (is_support_sw_smu(adev)) {
 +              ret = smu_set_ppfeature_status(&adev->smu, featuremask);
 +              if (ret)
 +                      return -EINVAL;
 +      } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
                ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
                if (ret)
                        return -EINVAL;
@@@ -787,9 -777,7 +790,9 @@@ static ssize_t amdgpu_get_ppfeature_sta
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
  
 -      if (adev->powerplay.pp_funcs->get_ppfeature_status)
 +      if (is_support_sw_smu(adev)) {
 +              return smu_get_ppfeature_status(&adev->smu, buf);
 +      } else if (adev->powerplay.pp_funcs->get_ppfeature_status)
                return amdgpu_dpm_get_ppfeature_status(adev, buf);
  
        return snprintf(buf, PAGE_SIZE, "\n");
@@@ -1317,32 -1305,6 +1320,32 @@@ static ssize_t amdgpu_get_busy_percent(
        return snprintf(buf, PAGE_SIZE, "%d\n", value);
  }
  
 +/**
 + * DOC: mem_busy_percent
 + *
 + * The amdgpu driver provides a sysfs API for reading how busy the VRAM
 + * is as a percentage.  The file mem_busy_percent is used for this.
 + * The SMU firmware computes a percentage of load based on the
 + * aggregate activity level in the IP cores.
 + */
 +static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
 +              struct device_attribute *attr,
 +              char *buf)
 +{
 +      struct drm_device *ddev = dev_get_drvdata(dev);
 +      struct amdgpu_device *adev = ddev->dev_private;
 +      int r, value, size = sizeof(value);
 +
 +      /* read the IP busy sensor */
 +      r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
 +                                 (void *)&value, &size);
 +
 +      if (r)
 +              return r;
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", value);
 +}
 +
  /**
   * DOC: pcie_bw
   *
@@@ -1368,29 -1330,6 +1371,29 @@@ static ssize_t amdgpu_get_pcie_bw(struc
                        count0, count1, pcie_get_mps(adev->pdev));
  }
  
 +/**
 + * DOC: unique_id
 + *
 + * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
 + * The file unique_id is used for this.
 + * This will provide a Unique ID that will persist from machine to machine
 + *
 + * NOTE: This will only work for GFX9 and newer. This file will be absent
 + * on unsupported ASICs (GFX8 and older)
 + */
 +static ssize_t amdgpu_get_unique_id(struct device *dev,
 +              struct device_attribute *attr,
 +              char *buf)
 +{
 +      struct drm_device *ddev = dev_get_drvdata(dev);
 +      struct amdgpu_device *adev = ddev->dev_private;
 +
 +      if (adev->unique_id)
 +              return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
 +
 +      return 0;
 +}
 +
  static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
  static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
                   amdgpu_get_dpm_forced_performance_level,
@@@ -1435,13 -1374,10 +1438,13 @@@ static DEVICE_ATTR(pp_od_clk_voltage, S
                amdgpu_set_pp_od_clk_voltage);
  static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
                amdgpu_get_busy_percent, NULL);
 +static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
 +              amdgpu_get_memory_busy_percent, NULL);
  static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
  static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
                amdgpu_get_ppfeature_status,
                amdgpu_set_ppfeature_status);
 +static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
  
  static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
                                      struct device_attribute *attr,
  {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
        struct drm_device *ddev = adev->ddev;
 +      int channel = to_sensor_dev_attr(attr)->index;
        int r, temp, size = sizeof(temp);
  
        /* Can't get temperature when the card is off */
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
                return -EINVAL;
  
 -      /* get the temperature */
 -      r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
 -                                 (void *)&temp, &size);
 -      if (r)
 -              return r;
 +      if (channel >= PP_TEMP_MAX)
 +              return -EINVAL;
 +
 +      switch (channel) {
 +      case PP_TEMP_JUNCTION:
 +              /* get current junction temperature */
 +              r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
 +                                         (void *)&temp, &size);
 +              if (r)
 +                      return r;
 +              break;
 +      case PP_TEMP_EDGE:
 +              /* get current edge temperature */
 +              r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
 +                                         (void *)&temp, &size);
 +              if (r)
 +                      return r;
 +              break;
 +      case PP_TEMP_MEM:
 +              /* get current memory temperature */
 +              r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
 +                                         (void *)&temp, &size);
 +              if (r)
 +                      return r;
 +              break;
 +      }
  
        return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  }
@@@ -1503,76 -1417,6 +1506,76 @@@ static ssize_t amdgpu_hwmon_show_temp_t
        return snprintf(buf, PAGE_SIZE, "%d\n", temp);
  }
  
 +static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
 +                                           struct device_attribute *attr,
 +                                           char *buf)
 +{
 +      struct amdgpu_device *adev = dev_get_drvdata(dev);
 +      int hyst = to_sensor_dev_attr(attr)->index;
 +      int temp;
 +
 +      if (hyst)
 +              temp = adev->pm.dpm.thermal.min_hotspot_temp;
 +      else
 +              temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 +}
 +
 +static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
 +                                           struct device_attribute *attr,
 +                                           char *buf)
 +{
 +      struct amdgpu_device *adev = dev_get_drvdata(dev);
 +      int hyst = to_sensor_dev_attr(attr)->index;
 +      int temp;
 +
 +      if (hyst)
 +              temp = adev->pm.dpm.thermal.min_mem_temp;
 +      else
 +              temp = adev->pm.dpm.thermal.max_mem_crit_temp;
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 +}
 +
 +static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
 +                                           struct device_attribute *attr,
 +                                           char *buf)
 +{
 +      int channel = to_sensor_dev_attr(attr)->index;
 +
 +      if (channel >= PP_TEMP_MAX)
 +              return -EINVAL;
 +
 +      return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
 +}
 +
 +static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
 +                                           struct device_attribute *attr,
 +                                           char *buf)
 +{
 +      struct amdgpu_device *adev = dev_get_drvdata(dev);
 +      int channel = to_sensor_dev_attr(attr)->index;
 +      int temp = 0;
 +
 +      if (channel >= PP_TEMP_MAX)
 +              return -EINVAL;
 +
 +      switch (channel) {
 +      case PP_TEMP_JUNCTION:
 +              temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
 +              break;
 +      case PP_TEMP_EDGE:
 +              temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
 +              break;
 +      case PP_TEMP_MEM:
 +              temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
 +              break;
 +      }
 +
 +      return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 +}
 +
  static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
@@@ -2142,20 -1986,11 +2145,20 @@@ static ssize_t amdgpu_hwmon_show_mclk_l
   *
   * hwmon interfaces for GPU temperature:
   *
 - * - temp1_input: the on die GPU temperature in millidegrees Celsius
 + * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
 + *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
 + *
 + * - temp[1-3]_label: temperature channel label
 + *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
 + *
 + * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
 + *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
   *
 - * - temp1_crit: temperature critical max value in millidegrees Celsius
 + * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
 + *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
   *
 - * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
 + * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
 + *   - these are supported on SOC15 dGPUs only
   *
   * hwmon interfaces for GPU voltage:
   *
   *
   */
  
 -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
 +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
  static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
  static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
 +static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
 +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
 +static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
 +static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
 +static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
 +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
 +static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
 +static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
 +static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
 +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
 +static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
 +static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
  static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
  static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
  static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
@@@ -2244,18 -2067,6 +2247,18 @@@ static struct attribute *hwmon_attribut
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_crit.dev_attr.attr,
        &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
 +      &sensor_dev_attr_temp2_input.dev_attr.attr,
 +      &sensor_dev_attr_temp2_crit.dev_attr.attr,
 +      &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
 +      &sensor_dev_attr_temp3_input.dev_attr.attr,
 +      &sensor_dev_attr_temp3_crit.dev_attr.attr,
 +      &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
 +      &sensor_dev_attr_temp1_emergency.dev_attr.attr,
 +      &sensor_dev_attr_temp2_emergency.dev_attr.attr,
 +      &sensor_dev_attr_temp3_emergency.dev_attr.attr,
 +      &sensor_dev_attr_temp1_label.dev_attr.attr,
 +      &sensor_dev_attr_temp2_label.dev_attr.attr,
 +      &sensor_dev_attr_temp3_label.dev_attr.attr,
        &sensor_dev_attr_pwm1.dev_attr.attr,
        &sensor_dev_attr_pwm1_enable.dev_attr.attr,
        &sensor_dev_attr_pwm1_min.dev_attr.attr,
@@@ -2378,22 -2189,6 +2381,22 @@@ static umode_t hwmon_attributes_visible
             attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
                return 0;
  
 +      /* only SOC15 dGPUs support hotspot and mem temperatures */
 +      if (((adev->flags & AMD_IS_APU) ||
 +           adev->asic_type < CHIP_VEGA10) &&
 +          (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
 +           attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
 +              return 0;
 +
        return effective_mode;
  }
  
@@@ -2820,16 -2615,6 +2823,16 @@@ int amdgpu_pm_sysfs_init(struct amdgpu_
                                "gpu_busy_level\n");
                return ret;
        }
 +      /* APU does not have its own dedicated memory */
 +      if (!(adev->flags & AMD_IS_APU)) {
 +              ret = device_create_file(adev->dev,
 +                              &dev_attr_mem_busy_percent);
 +              if (ret) {
 +                      DRM_ERROR("failed to create device file "
 +                                      "mem_busy_percent\n");
 +                      return ret;
 +              }
 +      }
        /* PCIe Perf counters won't work on APU nodes */
        if (!(adev->flags & AMD_IS_APU)) {
                ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
                        return ret;
                }
        }
 +      if (adev->unique_id)
 +              ret = device_create_file(adev->dev, &dev_attr_unique_id);
 +      if (ret) {
 +              DRM_ERROR("failed to create device file unique_id\n");
 +              return ret;
 +      }
        ret = amdgpu_debugfs_pm_init(adev);
        if (ret) {
                DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@@ -2901,12 -2680,8 +2904,12 @@@ void amdgpu_pm_sysfs_fini(struct amdgpu
                device_remove_file(adev->dev,
                                &dev_attr_pp_od_clk_voltage);
        device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
 +      if (!(adev->flags & AMD_IS_APU))
 +              device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
        if (!(adev->flags & AMD_IS_APU))
                device_remove_file(adev->dev, &dev_attr_pcie_bw);
 +      if (adev->unique_id)
 +              device_remove_file(adev->dev, &dev_attr_unique_id);
        if ((adev->asic_type >= CHIP_VEGA10) &&
            !(adev->flags & AMD_IS_APU))
                device_remove_file(adev->dev, &dev_attr_ppfeatures);
@@@ -3003,10 -2778,6 +3006,10 @@@ static int amdgpu_debugfs_pm_info_pp(st
        /* GPU Load */
        if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
                seq_printf(m, "GPU Load: %u %%\n", value);
 +      /* MEM Load */
 +      if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
 +              seq_printf(m, "MEM Load: %u %%\n", value);
 +
        seq_printf(m, "\n");
  
        /* SMC feature mask */
index af9835c8395df379f07cb2e470a135205fa764a1,d72d603b55fc1b3efa59364a2bd8d4d410106fe6..909be1bf2294b292ef701cfe540cd4cc6119c937
@@@ -24,7 -24,7 +24,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_psp.h"
  #include "amdgpu_ucode.h"
@@@ -289,34 -289,6 +289,34 @@@ static int psp_asd_load(struct psp_cont
        return ret;
  }
  
 +static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 +              uint32_t id, uint32_t value)
 +{
 +      cmd->cmd_id = GFX_CMD_ID_PROG_REG;
 +      cmd->cmd.cmd_setup_reg_prog.reg_value = value;
 +      cmd->cmd.cmd_setup_reg_prog.reg_id = id;
 +}
 +
 +int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
 +              uint32_t value)
 +{
 +      struct psp_gfx_cmd_resp *cmd = NULL;
 +      int ret = 0;
 +
 +      if (reg >= PSP_REG_LAST)
 +              return -EINVAL;
 +
 +      cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
 +      if (!cmd)
 +              return -ENOMEM;
 +
 +      psp_prep_reg_prog_cmd_buf(cmd, reg, value);
 +      ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 +
 +      kfree(cmd);
 +      return ret;
 +}
 +
  static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
                                          uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
                                          uint32_t xgmi_ta_size, uint32_t shared_size)
index 011630f62f8504660daadd5645074fe4037e5698,4fea7f835506c9d5f2c3d87952699272d6347e38..4d387557cc37abdd93f3ac458d2a907e856f790b
@@@ -24,6 -24,8 +24,8 @@@
  #include <linux/debugfs.h>
  #include <linux/list.h>
  #include <linux/module.h>
+ #include <linux/uaccess.h>
  #include "amdgpu.h"
  #include "amdgpu_ras.h"
  #include "amdgpu_atomfirmware.h"
@@@ -90,12 -92,6 +92,12 @@@ struct ras_manager 
        struct ras_err_data err_data;
  };
  
 +struct ras_badpage {
 +      unsigned int bp;
 +      unsigned int size;
 +      unsigned int flags;
 +};
 +
  const char *ras_error_string[] = {
        "none",
        "parity",
@@@ -124,16 -120,9 +126,16 @@@ const char *ras_block_string[] = 
  #define ras_err_str(i) (ras_error_string[ffs(i)])
  #define ras_block_str(i) (ras_block_string[i])
  
 -#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
 +#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS         1
 +#define AMDGPU_RAS_FLAG_INIT_NEED_RESET               2
  #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
  
 +static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
 +              uint64_t offset, uint64_t size,
 +              struct amdgpu_bo **bo_ptr);
 +static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
 +              struct amdgpu_bo **bo_ptr);
 +
  static void amdgpu_ras_self_test(struct amdgpu_device *adev)
  {
        /* TODO */
@@@ -250,8 -239,8 +252,8 @@@ static int amdgpu_ras_debugfs_ctrl_pars
  
        return 0;
  }
 -/*
 - * DOC: ras debugfs control interface
 +/**
 + * DOC: AMDGPU RAS debugfs control interface
   *
   * It accepts struct ras_debug_if who has two members.
   *
@@@ -313,7 -302,6 +315,7 @@@ static ssize_t amdgpu_ras_debugfs_ctrl_
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
        struct ras_debug_if data;
 +      struct amdgpu_bo *bo;
        int ret = 0;
  
        ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
                ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
                break;
        case 2:
 +              ret = amdgpu_ras_reserve_vram(adev,
 +                              data.inject.address, PAGE_SIZE, &bo);
 +              /* This address might be used already on failure. In fact we can
 +               * perform an injection in such case.
 +               */
 +              if (ret)
 +                      break;
 +              data.inject.address = amdgpu_bo_gpu_offset(bo);
                ret = amdgpu_ras_error_inject(adev, &data.inject);
 +              amdgpu_ras_release_vram(adev, &bo);
                break;
        default:
                ret = -EINVAL;
@@@ -544,8 -523,6 +546,8 @@@ int amdgpu_ras_feature_enable(struct am
                                enable ? "enable":"disable",
                                ras_block_str(head->block),
                                ret);
 +              if (ret == TA_RAS_STATUS__RESET_NEEDED)
 +                      return -EAGAIN;
                return -EINVAL;
        }
  
@@@ -566,32 -543,16 +568,32 @@@ int amdgpu_ras_feature_enable_on_boot(s
                return -EINVAL;
  
        if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
 -              /* If ras is enabled by vbios, we set up ras object first in
 -               * both case. For enable, that is all what we need do. For
 -               * disable, we need perform a ras TA disable cmd after that.
 -               */
 -              ret = __amdgpu_ras_feature_enable(adev, head, 1);
 -              if (ret)
 -                      return ret;
 +              if (enable) {
 +                      /* There is no harm to issue a ras TA cmd regardless of
 +                       * the currecnt ras state.
 +                       * If current state == target state, it will do nothing
 +                       * But sometimes it requests driver to reset and repost
 +                       * with error code -EAGAIN.
 +                       */
 +                      ret = amdgpu_ras_feature_enable(adev, head, 1);
 +                      /* With old ras TA, we might fail to enable ras.
 +                       * Log it and just setup the object.
 +                       * TODO need remove this WA in the future.
 +                       */
 +                      if (ret == -EINVAL) {
 +                              ret = __amdgpu_ras_feature_enable(adev, head, 1);
 +                              if (!ret)
 +                                      DRM_INFO("RAS INFO: %s setup object\n",
 +                                              ras_block_str(head->block));
 +                      }
 +              } else {
 +                      /* setup the object then issue a ras TA disable cmd.*/
 +                      ret = __amdgpu_ras_feature_enable(adev, head, 1);
 +                      if (ret)
 +                              return ret;
  
 -              if (!enable)
                        ret = amdgpu_ras_feature_enable(adev, head, 0);
 +              }
        } else
                ret = amdgpu_ras_feature_enable(adev, head, enable);
  
@@@ -732,77 -693,6 +734,77 @@@ int amdgpu_ras_query_error_count(struc
  
  /* sysfs begin */
  
 +static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
 +              struct ras_badpage **bps, unsigned int *count);
 +
 +static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
 +{
 +      switch (flags) {
 +      case 0:
 +              return "R";
 +      case 1:
 +              return "P";
 +      case 2:
 +      default:
 +              return "F";
 +      };
 +}
 +
 +/*
 + * DOC: ras sysfs gpu_vram_bad_pages interface
 + *
 + * It allows user to read the bad pages of vram on the gpu through
 + * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
 + *
 + * It outputs multiple lines, and each line stands for one gpu page.
 + *
 + * The format of one line is below,
 + * gpu pfn : gpu page size : flags
 + *
 + * gpu pfn and gpu page size are printed in hex format.
 + * flags can be one of below character,
 + * R: reserved, this gpu page is reserved and not able to use.
 + * P: pending for reserve, this gpu page is marked as bad, will be reserved
 + *    in next window of page_reserve.
 + * F: unable to reserve. this gpu page can't be reserved due to some reasons.
 + *
 + * examples:
 + * 0x00000001 : 0x00001000 : R
 + * 0x00000002 : 0x00001000 : P
 + */
 +
 +static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
 +              struct kobject *kobj, struct bin_attribute *attr,
 +              char *buf, loff_t ppos, size_t count)
 +{
 +      struct amdgpu_ras *con =
 +              container_of(attr, struct amdgpu_ras, badpages_attr);
 +      struct amdgpu_device *adev = con->adev;
 +      const unsigned int element_size =
 +              sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
 +      unsigned int start = div64_ul(ppos + element_size - 1, element_size);
 +      unsigned int end = div64_ul(ppos + count - 1, element_size);
 +      ssize_t s = 0;
 +      struct ras_badpage *bps = NULL;
 +      unsigned int bps_count = 0;
 +
 +      memset(buf, 0, count);
 +
 +      if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
 +              return 0;
 +
 +      for (; start < end && start < bps_count; start++)
 +              s += scnprintf(&buf[s], element_size + 1,
 +                              "0x%08x : 0x%08x : %1s\n",
 +                              bps[start].bp,
 +                              bps[start].size,
 +                              amdgpu_ras_badpage_flags_str(bps[start].flags));
 +
 +      kfree(bps);
 +
 +      return s;
 +}
 +
  static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
                struct device_attribute *attr, char *buf)
  {
@@@ -843,14 -733,9 +845,14 @@@ static int amdgpu_ras_sysfs_create_feat
                &con->features_attr.attr,
                NULL
        };
 +      struct bin_attribute *bin_attrs[] = {
 +              &con->badpages_attr,
 +              NULL
 +      };
        struct attribute_group group = {
                .name = "ras",
                .attrs = attrs,
 +              .bin_attrs = bin_attrs,
        };
  
        con->features_attr = (struct device_attribute) {
                },
                        .show = amdgpu_ras_sysfs_features_read,
        };
 +
 +      con->badpages_attr = (struct bin_attribute) {
 +              .attr = {
 +                      .name = "gpu_vram_bad_pages",
 +                      .mode = S_IRUGO,
 +              },
 +              .size = 0,
 +              .private = NULL,
 +              .read = amdgpu_ras_sysfs_badpages_read,
 +      };
 +
        sysfs_attr_init(attrs[0]);
 +      sysfs_bin_attr_init(bin_attrs[0]);
  
        return sysfs_create_group(&adev->dev->kobj, &group);
  }
@@@ -884,14 -757,9 +886,14 @@@ static int amdgpu_ras_sysfs_remove_feat
                &con->features_attr.attr,
                NULL
        };
 +      struct bin_attribute *bin_attrs[] = {
 +              &con->badpages_attr,
 +              NULL
 +      };
        struct attribute_group group = {
                .name = "ras",
                .attrs = attrs,
 +              .bin_attrs = bin_attrs,
        };
  
        sysfs_remove_group(&adev->dev->kobj, &group);
@@@ -1223,53 -1091,6 +1225,53 @@@ static int amdgpu_ras_interrupt_remove_
  /* ih end */
  
  /* recovery begin */
 +
 +/* return 0 on success.
 + * caller need free bps.
 + */
 +static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
 +              struct ras_badpage **bps, unsigned int *count)
 +{
 +      struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 +      struct ras_err_handler_data *data;
 +      int i = 0;
 +      int ret = 0;
 +
 +      if (!con || !con->eh_data || !bps || !count)
 +              return -EINVAL;
 +
 +      mutex_lock(&con->recovery_lock);
 +      data = con->eh_data;
 +      if (!data || data->count == 0) {
 +              *bps = NULL;
 +              goto out;
 +      }
 +
 +      *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
 +      if (!*bps) {
 +              ret = -ENOMEM;
 +              goto out;
 +      }
 +
 +      for (; i < data->count; i++) {
 +              (*bps)[i] = (struct ras_badpage){
 +                      .bp = data->bps[i].bp,
 +                      .size = AMDGPU_GPU_PAGE_SIZE,
 +                      .flags = 0,
 +              };
 +
 +              if (data->last_reserved <= i)
 +                      (*bps)[i].flags = 1;
 +              else if (data->bps[i].bo == NULL)
 +                      (*bps)[i].flags = 2;
 +      }
 +
 +      *count = data->count;
 +out:
 +      mutex_unlock(&con->recovery_lock);
 +      return ret;
 +}
 +
  static void amdgpu_ras_do_recovery(struct work_struct *work)
  {
        struct amdgpu_ras *ras =
@@@ -1521,19 -1342,6 +1523,19 @@@ static int amdgpu_ras_recovery_fini(str
  }
  /* recovery end */
  
 +/* return 0 if ras will reset gpu and repost.*/
 +int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
 +              unsigned int block)
 +{
 +      struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 +
 +      if (!ras)
 +              return -EINVAL;
 +
 +      ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
 +      return 0;
 +}
 +
  /*
   * check hardware's ras ability which will be saved in hw_supported.
   * if hardware does not support ras, we can skip some ras initializtion and
@@@ -1609,10 -1417,8 +1611,10 @@@ recovery_out
        return -EINVAL;
  }
  
 -/* do some init work after IP late init as dependence */
 -void amdgpu_ras_post_init(struct amdgpu_device *adev)
 +/* do some init work after IP late init as dependence.
 + * and it runs in resume/gpu reset/booting up cases.
 + */
 +void amdgpu_ras_resume(struct amdgpu_device *adev)
  {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj, *tmp;
                        }
                }
        }
 +
 +      if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
 +              con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
 +              /* setup ras obj state as disabled.
 +               * for init_by_vbios case.
 +               * if we want to enable ras, just enable it in a normal way.
 +               * If we want do disable it, need setup ras obj as enabled,
 +               * then issue another TA disable cmd.
 +               * See feature_enable_on_boot
 +               */
 +              amdgpu_ras_disable_all_features(adev, 1);
 +              amdgpu_ras_reset_gpu(adev, 0);
 +      }
 +}
 +
 +void amdgpu_ras_suspend(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 +
 +      if (!con)
 +              return;
 +
 +      amdgpu_ras_disable_all_features(adev, 0);
 +      /* Make sure all ras objects are disabled. */
 +      if (con->features)
 +              amdgpu_ras_disable_all_features(adev, 1);
  }
  
  /* do some fini work before IP fini as dependence */
index 7138dc1dd1f44b9e871984c5a724481e99df7422,a8a1fcab299bbbc5b8ba121b93fc9048e666fe35..d81bebf7631078087ac0169bed65fb80cb57054c
   *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
   *    Dave Airlie
   */
+ #include <linux/dma-mapping.h>
+ #include <linux/iommu.h>
++#include <linux/hmm.h>
+ #include <linux/pagemap.h>
+ #include <linux/sched/task.h>
+ #include <linux/seq_file.h>
+ #include <linux/slab.h>
+ #include <linux/swap.h>
+ #include <linux/swiotlb.h>
  #include <drm/ttm/ttm_bo_api.h>
  #include <drm/ttm/ttm_bo_driver.h>
  #include <drm/ttm/ttm_placement.h>
  #include <drm/ttm/ttm_module.h>
  #include <drm/ttm/ttm_page_alloc.h>
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
  #include <drm/amdgpu_drm.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
- #include <linux/swiotlb.h>
- #include <linux/swap.h>
- #include <linux/pagemap.h>
- #include <linux/debugfs.h>
- #include <linux/iommu.h>
- #include <linux/hmm.h>
++
  #include "amdgpu.h"
  #include "amdgpu_object.h"
  #include "amdgpu_trace.h"
@@@ -704,191 -707,143 +709,191 @@@ static unsigned long amdgpu_ttm_io_mem_
  /*
   * TTM backend functions.
   */
 -struct amdgpu_ttm_gup_task_list {
 -      struct list_head        list;
 -      struct task_struct      *task;
 -};
 -
  struct amdgpu_ttm_tt {
        struct ttm_dma_tt       ttm;
        u64                     offset;
        uint64_t                userptr;
        struct task_struct      *usertask;
        uint32_t                userflags;
 -      spinlock_t              guptasklock;
 -      struct list_head        guptasks;
 -      atomic_t                mmu_invalidations;
 -      uint32_t                last_set_pages;
 +#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 +      struct hmm_range        *ranges;
 +      int                     nr_ranges;
 +#endif
  };
  
  /**
 - * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
 - * pointer to memory
 + * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
 + * memory and start HMM tracking CPU page table update
   *
 - * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
 - * This provides a wrapper around the get_user_pages() call to provide
 - * device accessible pages that back user memory.
 + * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
 + * once afterwards to stop HMM tracking
   */
 +#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 +
 +/* Support Userptr pages cross max 16 vmas */
 +#define MAX_NR_VMAS   (16)
 +
  int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        struct mm_struct *mm = gtt->usertask->mm;
 -      unsigned int flags = 0;
 -      unsigned pinned = 0;
 -      int r;
 +      unsigned long start = gtt->userptr;
 +      unsigned long end = start + ttm->num_pages * PAGE_SIZE;
 +      struct vm_area_struct *vma = NULL, *vmas[MAX_NR_VMAS];
 +      struct hmm_range *ranges;
 +      unsigned long nr_pages, i;
 +      uint64_t *pfns, f;
 +      int r = 0;
  
        if (!mm) /* Happens during process shutdown */
                return -ESRCH;
  
 -      if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 -              flags |= FOLL_WRITE;
 -
        down_read(&mm->mmap_sem);
  
 -      if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
 -              /*
 -               * check that we only use anonymous memory to prevent problems
 -               * with writeback
 -               */
 -              unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
 -              struct vm_area_struct *vma;
 +      /* user pages may cross multiple VMAs */
 +      gtt->nr_ranges = 0;
 +      do {
 +              unsigned long vm_start;
 +
 +              if (gtt->nr_ranges >= MAX_NR_VMAS) {
 +                      DRM_ERROR("Too many VMAs in userptr range\n");
 +                      r = -EFAULT;
 +                      goto out;
 +              }
  
 -              vma = find_vma(mm, gtt->userptr);
 -              if (!vma || vma->vm_file || vma->vm_end < end) {
 -                      up_read(&mm->mmap_sem);
 -                      return -EPERM;
 +              vm_start = vma ? vma->vm_end : start;
 +              vma = find_vma(mm, vm_start);
 +              if (unlikely(!vma || vm_start < vma->vm_start)) {
 +                      r = -EFAULT;
 +                      goto out;
                }
 +              vmas[gtt->nr_ranges++] = vma;
 +      } while (end > vma->vm_end);
 +
 +      DRM_DEBUG_DRIVER("0x%lx nr_ranges %d pages 0x%lx\n",
 +              start, gtt->nr_ranges, ttm->num_pages);
 +
 +      if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
 +              vmas[0]->vm_file)) {
 +              r = -EPERM;
 +              goto out;
        }
  
 -      /* loop enough times using contiguous pages of memory */
 -      do {
 -              unsigned num_pages = ttm->num_pages - pinned;
 -              uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 -              struct page **p = pages + pinned;
 -              struct amdgpu_ttm_gup_task_list guptask;
 +      ranges = kvmalloc_array(gtt->nr_ranges, sizeof(*ranges), GFP_KERNEL);
 +      if (unlikely(!ranges)) {
 +              r = -ENOMEM;
 +              goto out;
 +      }
  
 -              guptask.task = current;
 -              spin_lock(&gtt->guptasklock);
 -              list_add(&guptask.list, &gtt->guptasks);
 -              spin_unlock(&gtt->guptasklock);
 +      pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
 +      if (unlikely(!pfns)) {
 +              r = -ENOMEM;
 +              goto out_free_ranges;
 +      }
  
 -              if (mm == current->mm)
 -                      r = get_user_pages(userptr, num_pages, flags, p, NULL);
 -              else
 -                      r = get_user_pages_remote(gtt->usertask,
 -                                      mm, userptr, num_pages,
 -                                      flags, p, NULL, NULL);
 +      for (i = 0; i < gtt->nr_ranges; i++)
 +              amdgpu_hmm_init_range(&ranges[i]);
  
 -              spin_lock(&gtt->guptasklock);
 -              list_del(&guptask.list);
 -              spin_unlock(&gtt->guptasklock);
 +      f = ranges[0].flags[HMM_PFN_VALID];
 +      f |= amdgpu_ttm_tt_is_readonly(ttm) ?
 +                              0 : ranges[0].flags[HMM_PFN_WRITE];
 +      memset64(pfns, f, ttm->num_pages);
  
 -              if (r < 0)
 -                      goto release_pages;
 +      for (nr_pages = 0, i = 0; i < gtt->nr_ranges; i++) {
 +              ranges[i].vma = vmas[i];
 +              ranges[i].start = max(start, vmas[i]->vm_start);
 +              ranges[i].end = min(end, vmas[i]->vm_end);
 +              ranges[i].pfns = pfns + nr_pages;
 +              nr_pages += (ranges[i].end - ranges[i].start) / PAGE_SIZE;
  
 -              pinned += r;
 +              r = hmm_vma_fault(&ranges[i], true);
 +              if (unlikely(r))
 +                      break;
 +      }
 +      if (unlikely(r)) {
 +              while (i--)
 +                      hmm_vma_range_done(&ranges[i]);
  
 -      } while (pinned < ttm->num_pages);
 +              goto out_free_pfns;
 +      }
  
        up_read(&mm->mmap_sem);
 +
 +      for (i = 0; i < ttm->num_pages; i++) {
 +              pages[i] = hmm_pfn_to_page(&ranges[0], pfns[i]);
 +              if (!pages[i]) {
 +                      pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
 +                             i, pfns[i]);
 +                      goto out_invalid_pfn;
 +              }
 +      }
 +      gtt->ranges = ranges;
 +
        return 0;
  
 -release_pages:
 -      release_pages(pages, pinned);
 +out_free_pfns:
 +      kvfree(pfns);
 +out_free_ranges:
 +      kvfree(ranges);
 +out:
        up_read(&mm->mmap_sem);
 +
        return r;
 +
 +out_invalid_pfn:
 +      for (i = 0; i < gtt->nr_ranges; i++)
 +              hmm_vma_range_done(&ranges[i]);
 +      kvfree(pfns);
 +      kvfree(ranges);
 +      return -ENOMEM;
  }
  
  /**
 - * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
 + * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change
 + * Check if the pages backing this ttm range have been invalidated
   *
 - * Called by amdgpu_cs_list_validate(). This creates the page list
 - * that backs user memory and will ultimately be mapped into the device
 - * address space.
 + * Returns: true if pages are still valid
   */
 -void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
 +bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 -      unsigned i;
 +      bool r = false;
 +      int i;
  
 -      gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
 -      for (i = 0; i < ttm->num_pages; ++i) {
 -              if (ttm->pages[i])
 -                      put_page(ttm->pages[i]);
 +      if (!gtt || !gtt->userptr)
 +              return false;
  
 -              ttm->pages[i] = pages ? pages[i] : NULL;
 +      DRM_DEBUG_DRIVER("user_pages_done 0x%llx nr_ranges %d pages 0x%lx\n",
 +              gtt->userptr, gtt->nr_ranges, ttm->num_pages);
 +
 +      WARN_ONCE(!gtt->ranges || !gtt->ranges[0].pfns,
 +              "No user pages to check\n");
 +
 +      if (gtt->ranges) {
 +              for (i = 0; i < gtt->nr_ranges; i++)
 +                      r |= hmm_vma_range_done(&gtt->ranges[i]);
 +              kvfree(gtt->ranges[0].pfns);
 +              kvfree(gtt->ranges);
 +              gtt->ranges = NULL;
        }
 +
 +      return r;
  }
 +#endif
  
  /**
 - * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty
 + * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
   *
 - * Called while unpinning userptr pages
 + * Called by amdgpu_cs_list_validate(). This creates the page list
 + * that backs user memory and will ultimately be mapped into the device
 + * address space.
   */
 -void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
 +void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
  {
 -      struct amdgpu_ttm_tt *gtt = (void *)ttm;
 -      unsigned i;
 -
 -      for (i = 0; i < ttm->num_pages; ++i) {
 -              struct page *page = ttm->pages[i];
 -
 -              if (!page)
 -                      continue;
 +      unsigned long i;
  
 -              if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
 -                      set_page_dirty(page);
 -
 -              mark_page_accessed(page);
 -      }
 +      for (i = 0; i < ttm->num_pages; ++i)
 +              ttm->pages[i] = pages ? pages[i] : NULL;
  }
  
  /**
@@@ -950,14 -905,10 +955,14 @@@ static void amdgpu_ttm_tt_unpin_userptr
        /* unmap the pages mapped to the device */
        dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
  
 -      /* mark the pages as dirty */
 -      amdgpu_ttm_tt_mark_user_pages(ttm);
 -
        sg_free_table(ttm->sg);
 +
 +#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 +      if (gtt->ranges &&
 +          ttm->pages[0] == hmm_pfn_to_page(&gtt->ranges[0],
 +                                           gtt->ranges[0].pfns[0]))
 +              WARN_ONCE(1, "Missing get_user_page_done\n");
 +#endif
  }
  
  int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
@@@ -1307,6 -1258,11 +1312,6 @@@ int amdgpu_ttm_tt_set_userptr(struct tt
        gtt->usertask = current->group_leader;
        get_task_struct(gtt->usertask);
  
 -      spin_lock_init(&gtt->guptasklock);
 -      INIT_LIST_HEAD(&gtt->guptasks);
 -      atomic_set(&gtt->mmu_invalidations, 0);
 -      gtt->last_set_pages = 0;
 -
        return 0;
  }
  
@@@ -1335,6 -1291,7 +1340,6 @@@ bool amdgpu_ttm_tt_affect_userptr(struc
                                  unsigned long end)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 -      struct amdgpu_ttm_gup_task_list *entry;
        unsigned long size;
  
        if (gtt == NULL || !gtt->userptr)
        if (gtt->userptr > end || gtt->userptr + size <= start)
                return false;
  
 -      /* Search the lists of tasks that hold this mapping and see
 -       * if current is one of them.  If it is return false.
 -       */
 -      spin_lock(&gtt->guptasklock);
 -      list_for_each_entry(entry, &gtt->guptasks, list) {
 -              if (entry->task == current) {
 -                      spin_unlock(&gtt->guptasklock);
 -                      return false;
 -              }
 -      }
 -      spin_unlock(&gtt->guptasklock);
 -
 -      atomic_inc(&gtt->mmu_invalidations);
 -
        return true;
  }
  
  /**
 - * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
 - */
 -bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
 -                                     int *last_invalidated)
 -{
 -      struct amdgpu_ttm_tt *gtt = (void *)ttm;
 -      int prev_invalidated = *last_invalidated;
 -
 -      *last_invalidated = atomic_read(&gtt->mmu_invalidations);
 -      return prev_invalidated != *last_invalidated;
 -}
 -
 -/**
 - * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
 - * been invalidated since the last time they've been set?
 + * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
   */
 -bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
 +bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
  
        if (gtt == NULL || !gtt->userptr)
                return false;
  
 -      return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
 +      return true;
  }
  
  /**
@@@ -1772,26 -1757,44 +1777,26 @@@ int amdgpu_ttm_init(struct amdgpu_devic
  
        /* Initialize various on-chip memory pools */
        r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
 -                         adev->gds.mem.total_size);
 +                         adev->gds.gds_size);
        if (r) {
                DRM_ERROR("Failed initializing GDS heap.\n");
                return r;
        }
  
 -      r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
 -                                  4, AMDGPU_GEM_DOMAIN_GDS,
 -                                  &adev->gds.gds_gfx_bo, NULL, NULL);
 -      if (r)
 -              return r;
 -
        r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
 -                         adev->gds.gws.total_size);
 +                         adev->gds.gws_size);
        if (r) {
                DRM_ERROR("Failed initializing gws heap.\n");
                return r;
        }
  
 -      r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
 -                                  1, AMDGPU_GEM_DOMAIN_GWS,
 -                                  &adev->gds.gws_gfx_bo, NULL, NULL);
 -      if (r)
 -              return r;
 -
        r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
 -                         adev->gds.oa.total_size);
 +                         adev->gds.oa_size);
        if (r) {
                DRM_ERROR("Failed initializing oa heap.\n");
                return r;
        }
  
 -      r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
 -                                  1, AMDGPU_GEM_DOMAIN_OA,
 -                                  &adev->gds.oa_gfx_bo, NULL, NULL);
 -      if (r)
 -              return r;
 -
        /* Register debugfs entries for amdgpu_ttm */
        r = amdgpu_ttm_debugfs_init(adev);
        if (r) {
index 33c1eb76c076303bd783d6ca7855873d2c2d6599,5228e8a49ec529e34a90ea32d88e473261e3fd17..524f70f2b52f2ddf770eb1e32796b9bf1a2479d9
@@@ -24,7 -24,7 +24,7 @@@
  #include <linux/firmware.h>
  #include <linux/slab.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_ucode.h"
  
@@@ -313,69 -313,6 +313,69 @@@ amdgpu_ucode_get_load_type(struct amdgp
        return AMDGPU_FW_LOAD_DIRECT;
  }
  
 +#define FW_VERSION_ATTR(name, mode, field)                            \
 +static ssize_t show_##name(struct device *dev,                                \
 +                        struct device_attribute *attr,                \
 +                        char *buf)                                    \
 +{                                                                     \
 +      struct drm_device *ddev = dev_get_drvdata(dev);                 \
 +      struct amdgpu_device *adev = ddev->dev_private;                 \
 +                                                                      \
 +      return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field);       \
 +}                                                                     \
 +static DEVICE_ATTR(name, mode, show_##name, NULL)
 +
 +FW_VERSION_ATTR(vce_fw_version, 0444, vce.fw_version);
 +FW_VERSION_ATTR(uvd_fw_version, 0444, uvd.fw_version);
 +FW_VERSION_ATTR(mc_fw_version, 0444, gmc.fw_version);
 +FW_VERSION_ATTR(me_fw_version, 0444, gfx.me_fw_version);
 +FW_VERSION_ATTR(pfp_fw_version, 0444, gfx.pfp_fw_version);
 +FW_VERSION_ATTR(ce_fw_version, 0444, gfx.ce_fw_version);
 +FW_VERSION_ATTR(rlc_fw_version, 0444, gfx.rlc_fw_version);
 +FW_VERSION_ATTR(rlc_srlc_fw_version, 0444, gfx.rlc_srlc_fw_version);
 +FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
 +FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
 +FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
 +FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
 +FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
 +FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
 +FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
 +FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
 +FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
 +FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
 +FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
 +FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
 +FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
 +
 +static struct attribute *fw_attrs[] = {
 +      &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
 +      &dev_attr_mc_fw_version.attr, &dev_attr_me_fw_version.attr,
 +      &dev_attr_pfp_fw_version.attr, &dev_attr_ce_fw_version.attr,
 +      &dev_attr_rlc_fw_version.attr, &dev_attr_rlc_srlc_fw_version.attr,
 +      &dev_attr_rlc_srlg_fw_version.attr, &dev_attr_rlc_srls_fw_version.attr,
 +      &dev_attr_mec_fw_version.attr, &dev_attr_mec2_fw_version.attr,
 +      &dev_attr_sos_fw_version.attr, &dev_attr_asd_fw_version.attr,
 +      &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
 +      &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
 +      &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
 +      &dev_attr_dmcu_fw_version.attr, NULL
 +};
 +
 +static const struct attribute_group fw_attr_group = {
 +      .name = "fw_version",
 +      .attrs = fw_attrs
 +};
 +
 +int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev)
 +{
 +      return sysfs_create_group(&adev->dev->kobj, &fw_attr_group);
 +}
 +
 +void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev)
 +{
 +      sysfs_remove_group(&adev->dev->kobj, &fw_attr_group);
 +}
 +
  static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
                                       struct amdgpu_firmware_info *ucode,
                                       uint64_t mc_addr, void *kptr)
index 118451f5e3aacfd4f2f06f32d43aa235f479a147,9501f8be8c756476f4c8ed76ff42503891255f4b..9f225cef20c5b271f02dcda8bd2e486d89ad34f9
@@@ -26,7 -26,8 +26,8 @@@
  
  #include <linux/firmware.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include <drm/drm.h>
  
  #include "amdgpu.h"
@@@ -212,6 -213,132 +213,6 @@@ int amdgpu_vcn_resume(struct amdgpu_dev
        return 0;
  }
  
 -static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
 -                                   struct dpg_pause_state *new_state)
 -{
 -      int ret_code;
 -      uint32_t reg_data = 0;
 -      uint32_t reg_data2 = 0;
 -      struct amdgpu_ring *ring;
 -
 -      /* pause/unpause if state is changed */
 -      if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
 -              DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
 -                      adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
 -                      new_state->fw_based, new_state->jpeg);
 -
 -              reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
 -                      (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
 -
 -              if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
 -                      ret_code = 0;
 -
 -                      if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
 -                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 -                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
 -                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 -
 -                      if (!ret_code) {
 -                              /* pause DPG non-jpeg */
 -                              reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
 -                              WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 -                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
 -                                                 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
 -                                                 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 -
 -                              /* Restore */
 -                              ring = &adev->vcn.ring_enc[0];
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 -
 -                              ring = &adev->vcn.ring_enc[1];
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 -                              WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 -
 -                              ring = &adev->vcn.ring_dec;
 -                              WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
 -                                                 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
 -                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 -                                                 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
 -                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 -                      }
 -              } else {
 -                      /* unpause dpg non-jpeg, no need to wait */
 -                      reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
 -                      WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 -              }
 -              adev->vcn.pause_state.fw_based = new_state->fw_based;
 -      }
 -
 -      /* pause/unpause if state is changed */
 -      if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
 -              DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
 -                      adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
 -                      new_state->fw_based, new_state->jpeg);
 -
 -              reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
 -                      (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
 -
 -              if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
 -                      ret_code = 0;
 -
 -                      if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
 -                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 -                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
 -                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 -
 -                      if (!ret_code) {
 -                              /* Make sure JPRG Snoop is disabled before sending the pause */
 -                              reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
 -                              reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
 -                              WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
 -
 -                              /* pause DPG jpeg */
 -                              reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
 -                              WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 -                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
 -                                                      UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
 -                                                      UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
 -
 -                              /* Restore */
 -                              ring = &adev->vcn.ring_jpeg;
 -                              WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
 -                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
 -                                                      UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
 -                                                      UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 -                              WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
 -                                                      lower_32_bits(ring->gpu_addr));
 -                              WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
 -                                                      upper_32_bits(ring->gpu_addr));
 -                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
 -                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
 -                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
 -                                                      UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 -
 -                              ring = &adev->vcn.ring_dec;
 -                              WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
 -                                                 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
 -                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 -                                                 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
 -                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 -                      }
 -              } else {
 -                      /* unpause dpg jpeg, no need to wait */
 -                      reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
 -                      WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 -              }
 -              adev->vcn.pause_state.jpeg = new_state->jpeg;
 -      }
 -
 -      return 0;
 -}
 -
  static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
  {
        struct amdgpu_device *adev =
                else
                        new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
  
 -              amdgpu_vcn_pause_dpg_mode(adev, &new_state);
 +              adev->vcn.pause_dpg_mode(adev, &new_state);
        }
  
        fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
@@@ -291,7 -418,7 +292,7 @@@ void amdgpu_vcn_ring_begin_use(struct a
                else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
                        new_state.jpeg = VCN_DPG_STATE__PAUSE;
  
 -              amdgpu_vcn_pause_dpg_mode(adev, &new_state);
 +              adev->vcn.pause_dpg_mode(adev, &new_state);
        }
  }
  
@@@ -320,7 -447,7 +321,7 @@@ int amdgpu_vcn_dec_ring_test_ring(struc
                tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -482,7 -609,7 +483,7 @@@ int amdgpu_vcn_enc_ring_test_ring(struc
        for (i = 0; i < adev->usec_timeout; i++) {
                if (amdgpu_ring_get_rptr(ring) != rptr)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -641,7 -768,7 +642,7 @@@ int amdgpu_vcn_jpeg_ring_test_ring(stru
                tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -715,7 -842,7 +716,7 @@@ int amdgpu_vcn_jpeg_ring_test_ib(struc
                tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
index 1f0bd4d16475053f0fefdee292d2535e849a83c5,01f88269a7f91a09559d49479b96f1499e75f252..07a7e3820b7ba6006d1cb381c91b0e3ceaab302a
   *
   */
  
+ #include <linux/module.h>
+ #include <drm/drm_drv.h>
  #include "amdgpu.h"
  
  bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
@@@ -426,47 -430,3 +430,47 @@@ uint32_t amdgpu_virt_get_mclk(struct am
        return clk;
  }
  
 +void amdgpu_virt_init_reg_access_mode(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_virt *virt = &adev->virt;
 +
 +      if (virt->ops && virt->ops->init_reg_access_mode)
 +              virt->ops->init_reg_access_mode(adev);
 +}
 +
 +bool amdgpu_virt_support_psp_prg_ih_reg(struct amdgpu_device *adev)
 +{
 +      bool ret = false;
 +      struct amdgpu_virt *virt = &adev->virt;
 +
 +      if (amdgpu_sriov_vf(adev)
 +              && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH))
 +              ret = true;
 +
 +      return ret;
 +}
 +
 +bool amdgpu_virt_support_rlc_prg_reg(struct amdgpu_device *adev)
 +{
 +      bool ret = false;
 +      struct amdgpu_virt *virt = &adev->virt;
 +
 +      if (amdgpu_sriov_vf(adev)
 +              && (virt->reg_access_mode & AMDGPU_VIRT_REG_ACCESS_RLC)
 +              && !(amdgpu_sriov_runtime(adev)))
 +              ret = true;
 +
 +      return ret;
 +}
 +
 +bool amdgpu_virt_support_skip_setting(struct amdgpu_device *adev)
 +{
 +      bool ret = false;
 +      struct amdgpu_virt *virt = &adev->virt;
 +
 +      if (amdgpu_sriov_vf(adev)
 +              && (virt->reg_access_mode & AMDGPU_VIRT_REG_SKIP_SEETING))
 +              ret = true;
 +
 +      return ret;
 +}
index 3a4f20766a390916ee40128dfb2fac79ba0f35e2,a316ce8eec98f37d1ca058b374610d36686fde35..1ffbc0d3d7a1bbb5adfdcd2c3784d1babb11dfa2
@@@ -24,7 -24,8 +24,8 @@@
  #include <linux/firmware.h>
  #include <linux/slab.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_atombios.h"
  #include "amdgpu_ih.h"
@@@ -1804,18 -1805,6 +1805,18 @@@ static bool cik_need_reset_on_init(stru
        return false;
  }
  
 +static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
 +{
 +      uint64_t nak_r, nak_g;
 +
 +      /* Get the number of NAKs received and generated */
 +      nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
 +      nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
 +
 +      /* Add the total number of NAKs, i.e the number of replays */
 +      return (nak_r + nak_g);
 +}
 +
  static const struct amdgpu_asic_funcs cik_asic_funcs =
  {
        .read_disabled_bios = &cik_read_disabled_bios,
        .init_doorbell_index = &legacy_doorbell_index_init,
        .get_pcie_usage = &cik_get_pcie_usage,
        .need_reset_on_init = &cik_need_reset_on_init,
 +      .get_pcie_replay_count = &cik_get_pcie_replay_count,
  };
  
  static int cik_common_early_init(void *handle)
index 4cd1731d62fdc1a4cf819691feb4ab249cc7d1b0,9642381ec9a20c2f423fd5a326a09278c9339f73..003bb57691830042f2450c8faed74a01b8bdf91f
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
  #include "amdgpu.h"
  #include "amdgpu_ih.h"
  #include "amdgpu_gfx.h"
@@@ -2080,7 -2082,7 +2082,7 @@@ static int gfx_v7_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
        if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
@@@ -4493,8 -4495,12 +4495,8 @@@ static int gfx_v7_0_sw_init(void *handl
  
  static int gfx_v7_0_sw_fini(void *handle)
  {
 -      int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 -
 -      amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
 -      amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
 -      amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 +      int i;
  
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@@ -5066,10 -5072,30 +5068,10 @@@ static void gfx_v7_0_set_irq_funcs(stru
  static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
  {
        /* init asci gds info */
 -      adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
 -      adev->gds.gws.total_size = 64;
 -      adev->gds.oa.total_size = 16;
 +      adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
 +      adev->gds.gws_size = 64;
 +      adev->gds.oa_size = 16;
        adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
 -
 -      if (adev->gds.mem.total_size == 64 * 1024) {
 -              adev->gds.mem.gfx_partition_size = 4096;
 -              adev->gds.mem.cs_partition_size = 4096;
 -
 -              adev->gds.gws.gfx_partition_size = 4;
 -              adev->gds.gws.cs_partition_size = 4;
 -
 -              adev->gds.oa.gfx_partition_size = 4;
 -              adev->gds.oa.cs_partition_size = 1;
 -      } else {
 -              adev->gds.mem.gfx_partition_size = 1024;
 -              adev->gds.mem.cs_partition_size = 1024;
 -
 -              adev->gds.gws.gfx_partition_size = 16;
 -              adev->gds.gws.cs_partition_size = 16;
 -
 -              adev->gds.oa.gfx_partition_size = 4;
 -              adev->gds.oa.cs_partition_size = 4;
 -      }
  }
  
  
index 25400b7087221c5831e57f88090b5fe77f8a30bb,812a8597c09d2858adf60c94acc31e860276921d..b7a2df46dc221de813196c4845c8e6c6111b5226
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
+ #include <linux/delay.h>
  #include <linux/kernel.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
  #include "vi.h"
@@@ -855,7 -859,7 +859,7 @@@ static int gfx_v8_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -2057,8 -2061,12 +2061,8 @@@ static int gfx_v8_0_sw_init(void *handl
  
  static int gfx_v8_0_sw_fini(void *handle)
  {
 -      int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 -
 -      amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
 -      amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
 -      amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 +      int i;
  
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@@ -7006,10 -7014,30 +7010,10 @@@ static void gfx_v8_0_set_rlc_funcs(stru
  static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
  {
        /* init asci gds info */
 -      adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE);
 -      adev->gds.gws.total_size = 64;
 -      adev->gds.oa.total_size = 16;
 +      adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
 +      adev->gds.gws_size = 64;
 +      adev->gds.oa_size = 16;
        adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
 -
 -      if (adev->gds.mem.total_size == 64 * 1024) {
 -              adev->gds.mem.gfx_partition_size = 4096;
 -              adev->gds.mem.cs_partition_size = 4096;
 -
 -              adev->gds.gws.gfx_partition_size = 4;
 -              adev->gds.gws.cs_partition_size = 4;
 -
 -              adev->gds.oa.gfx_partition_size = 4;
 -              adev->gds.oa.cs_partition_size = 1;
 -      } else {
 -              adev->gds.mem.gfx_partition_size = 1024;
 -              adev->gds.mem.cs_partition_size = 1024;
 -
 -              adev->gds.gws.gfx_partition_size = 16;
 -              adev->gds.gws.cs_partition_size = 16;
 -
 -              adev->gds.oa.gfx_partition_size = 4;
 -              adev->gds.oa.cs_partition_size = 4;
 -      }
  }
  
  static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
index 2e9cac19a4173694b400940a157b997a494907aa,08d1c134d125a38b97c23fd3f14e239cd744ad16..702ca55040661a9374ecc21ad6b623d8cc6934d6
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
+ #include <linux/delay.h>
  #include <linux/kernel.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_gfx.h"
  #include "soc15.h"
@@@ -34,7 -38,6 +38,7 @@@
  #include "vega10_enum.h"
  #include "hdp/hdp_4_0_offset.h"
  
 +#include "soc15.h"
  #include "soc15_common.h"
  #include "clearstate_gfx9.h"
  #include "v9_structs.h"
@@@ -308,14 -311,12 +312,14 @@@ static void gfx_v9_0_init_golden_regist
  {
        switch (adev->asic_type) {
        case CHIP_VEGA10:
 -              soc15_program_register_sequence(adev,
 -                                               golden_settings_gc_9_0,
 -                                               ARRAY_SIZE(golden_settings_gc_9_0));
 -              soc15_program_register_sequence(adev,
 -                                               golden_settings_gc_9_0_vg10,
 -                                               ARRAY_SIZE(golden_settings_gc_9_0_vg10));
 +              if (!amdgpu_virt_support_skip_setting(adev)) {
 +                      soc15_program_register_sequence(adev,
 +                                                       golden_settings_gc_9_0,
 +                                                       ARRAY_SIZE(golden_settings_gc_9_0));
 +                      soc15_program_register_sequence(adev,
 +                                                       golden_settings_gc_9_0_vg10,
 +                                                       ARRAY_SIZE(golden_settings_gc_9_0_vg10));
 +              }
                break;
        case CHIP_VEGA12:
                soc15_program_register_sequence(adev,
@@@ -420,7 -421,7 +424,7 @@@ static int gfx_v9_0_ring_test_ring(stru
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1461,7 -1462,8 +1465,7 @@@ static int gfx_v9_0_ngg_init(struct amd
  
        /* GDS reserve memory: 64 bytes alignment */
        adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
 -      adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
 -      adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
 +      adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size;
        adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
        adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
  
@@@ -1569,7 -1571,7 +1573,7 @@@ static int gfx_v9_0_ngg_en(struct amdgp
  
        gfx_v9_0_write_data_to_reg(ring, 0, false,
                                   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
 -                                 (adev->gds.mem.total_size +
 +                                 (adev->gds.gds_size +
                                    adev->gfx.ngg.gds_reserve_size));
  
        amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
@@@ -1783,6 -1785,10 +1787,6 @@@ static int gfx_v9_0_sw_fini(void *handl
                kfree(ras_if);
        }
  
 -      amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
 -      amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
 -      amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
 -
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
  
        gfx_v9_0_mec_fini(adev);
        gfx_v9_0_ngg_fini(adev);
 -      amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
 -                              &adev->gfx.rlc.clear_state_gpu_addr,
 -                              (void **)&adev->gfx.rlc.cs_ptr);
 +      amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
        if (adev->asic_type == CHIP_RAVEN) {
                amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
                                &adev->gfx.rlc.cp_table_gpu_addr,
@@@ -1830,7 -1838,7 +1834,7 @@@ static void gfx_v9_0_select_se_sh(struc
        else
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
  
 -      WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
 +      WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
  }
  
  static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
@@@ -1898,8 -1906,8 +1902,8 @@@ static void gfx_v9_0_init_compute_vmid(
        for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
                soc15_grbm_select(adev, 0, 0, 0, i);
                /* CP and shaders */
 -              WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
 -              WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
 +              WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
 +              WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
        }
        soc15_grbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
@@@ -1910,7 -1918,7 +1914,7 @@@ static void gfx_v9_0_constants_init(str
        u32 tmp;
        int i;
  
 -      WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
 +      WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
  
        gfx_v9_0_tiling_mode_table_init(adev);
  
                if (i == 0) {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
 -                      WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
 -                      WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
 +                      WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
 +                      WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
 -                      WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
 +                      WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
                                (adev->gmc.private_aperture_start >> 48));
                        tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
                                (adev->gmc.shared_aperture_start >> 48));
 -                      WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
 +                      WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
                }
        }
        soc15_grbm_select(adev, 0, 0, 0, 0);
         */
        gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
  
 -      WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
 +      WREG32_SOC15_RLC(GC, 0, mmPA_SC_FIFO_SIZE,
                   (adev->gfx.config.sc_prim_fifo_size_frontend <<
                        PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
                   (adev->gfx.config.sc_prim_fifo_size_backend <<
@@@ -2020,11 -2028,11 +2024,11 @@@ static void gfx_v9_0_enable_gui_idle_in
  static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
  {
        /* csib */
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
                        adev->gfx.rlc.clear_state_gpu_addr >> 32);
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
                        adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
 +      WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
                        adev->gfx.rlc.clear_state_size);
  }
  
@@@ -2494,7 -2502,7 +2498,7 @@@ static void gfx_v9_0_cp_gfx_enable(stru
                for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                        adev->gfx.gfx_ring[i].sched.ready = false;
        }
 -      WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
        udelay(50);
  }
  
@@@ -2692,9 -2700,9 +2696,9 @@@ static void gfx_v9_0_cp_compute_enable(
        int i;
  
        if (enable) {
 -              WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
 +              WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
        } else {
 -              WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
 +              WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
                        (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                for (i = 0; i < adev->gfx.num_compute_rings; i++)
                        adev->gfx.compute_ring[i].sched.ready = false;
@@@ -2755,9 -2763,9 +2759,9 @@@ static void gfx_v9_0_kiq_setting(struc
        tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
        tmp &= 0xffffff00;
        tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
 -      WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
 +      WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
        tmp |= 0x80;
 -      WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
 +      WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
  }
  
  static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
@@@ -2975,67 -2983,67 +2979,67 @@@ static int gfx_v9_0_kiq_init_register(s
        /* disable wptr polling */
        WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
  
 -      WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
               mqd->cp_hqd_eop_base_addr_lo);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
               mqd->cp_hqd_eop_base_addr_hi);
  
        /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
               mqd->cp_hqd_eop_control);
  
        /* enable doorbell? */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
               mqd->cp_hqd_pq_doorbell_control);
  
        /* disable the queue if it's active */
        if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
 -              WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
                for (j = 0; j < adev->usec_timeout; j++) {
                        if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
                                break;
                        udelay(1);
                }
 -              WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
                       mqd->cp_hqd_dequeue_request);
 -              WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
                       mqd->cp_hqd_pq_rptr);
 -              WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
                       mqd->cp_hqd_pq_wptr_lo);
 -              WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
                       mqd->cp_hqd_pq_wptr_hi);
        }
  
        /* set the pointer to the MQD */
 -      WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
               mqd->cp_mqd_base_addr_lo);
 -      WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
               mqd->cp_mqd_base_addr_hi);
  
        /* set MQD vmid to 0 */
 -      WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
               mqd->cp_mqd_control);
  
        /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
               mqd->cp_hqd_pq_base_lo);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
               mqd->cp_hqd_pq_base_hi);
  
        /* set up the HQD, this is similar to CP_RB0_CNTL */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
               mqd->cp_hqd_pq_control);
  
        /* set the wb address whether it's enabled or not */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
                                mqd->cp_hqd_pq_rptr_report_addr_lo);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
                                mqd->cp_hqd_pq_rptr_report_addr_hi);
  
        /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
               mqd->cp_hqd_pq_wptr_poll_addr_lo);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
               mqd->cp_hqd_pq_wptr_poll_addr_hi);
  
        /* enable the doorbell if requested */
                                        (adev->doorbell_index.userqueue_end * 2) << 2);
        }
  
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
               mqd->cp_hqd_pq_doorbell_control);
  
        /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
               mqd->cp_hqd_pq_wptr_lo);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
               mqd->cp_hqd_pq_wptr_hi);
  
        /* set the vmid for the queue */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
  
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
               mqd->cp_hqd_persistent_state);
  
        /* activate the queue */
 -      WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
               mqd->cp_hqd_active);
  
        if (ring->use_doorbell)
@@@ -3079,7 -3087,7 +3083,7 @@@ static int gfx_v9_0_kiq_fini_register(s
        /* disable the queue if it's active */
        if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
  
 -              WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
  
                for (j = 0; j < adev->usec_timeout; j++) {
                        if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
                        DRM_DEBUG("KIQ dequeue request failed.\n");
  
                        /* Manual disable if dequeue request times out */
 -                      WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
 +                      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
                }
  
 -              WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
 +              WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
                      0);
        }
  
 -      WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
  
        return 0;
  }
@@@ -3525,241 -3533,6 +3529,241 @@@ static void gfx_v9_0_ring_emit_gds_swit
                                   (1 << (oa_size + oa_base)) - (1 << oa_base));
  }
  
 +static const u32 vgpr_init_compute_shader[] =
 +{
 +      0xb07c0000, 0xbe8000ff,
 +      0x000000f8, 0xbf110800,
 +      0x7e000280, 0x7e020280,
 +      0x7e040280, 0x7e060280,
 +      0x7e080280, 0x7e0a0280,
 +      0x7e0c0280, 0x7e0e0280,
 +      0x80808800, 0xbe803200,
 +      0xbf84fff5, 0xbf9c0000,
 +      0xd28c0001, 0x0001007f,
 +      0xd28d0001, 0x0002027e,
 +      0x10020288, 0xb8810904,
 +      0xb7814000, 0xd1196a01,
 +      0x00000301, 0xbe800087,
 +      0xbefc00c1, 0xd89c4000,
 +      0x00020201, 0xd89cc080,
 +      0x00040401, 0x320202ff,
 +      0x00000800, 0x80808100,
 +      0xbf84fff8, 0x7e020280,
 +      0xbf810000, 0x00000000,
 +};
 +
 +static const u32 sgpr_init_compute_shader[] =
 +{
 +      0xb07c0000, 0xbe8000ff,
 +      0x0000005f, 0xbee50080,
 +      0xbe812c65, 0xbe822c65,
 +      0xbe832c65, 0xbe842c65,
 +      0xbe852c65, 0xb77c0005,
 +      0x80808500, 0xbf84fff8,
 +      0xbe800080, 0xbf810000,
 +};
 +
 +static const struct soc15_reg_entry vgpr_init_regs[] = {
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
 +};
 +
 +static const struct soc15_reg_entry sgpr_init_regs[] = {
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
 +   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
 +};
 +
 +static const struct soc15_reg_entry sec_ded_counter_registers[] = {
 +   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2) },
 +   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2) },
 +   { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT) },
 +};
 +
 +static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 +{
 +      struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
 +      struct amdgpu_ib ib;
 +      struct dma_fence *f = NULL;
 +      int r, i, j;
 +      unsigned total_size, vgpr_offset, sgpr_offset;
 +      u64 gpu_addr;
 +
 +      /* only support when RAS is enabled */
 +      if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
 +              return 0;
 +
 +      /* bail if the compute ring is not ready */
 +      if (!ring->sched.ready)
 +              return 0;
 +
 +      total_size =
 +              ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
 +      total_size +=
 +              ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
 +      total_size = ALIGN(total_size, 256);
 +      vgpr_offset = total_size;
 +      total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
 +      sgpr_offset = total_size;
 +      total_size += sizeof(sgpr_init_compute_shader);
 +
 +      /* allocate an indirect buffer to put the commands in */
 +      memset(&ib, 0, sizeof(ib));
 +      r = amdgpu_ib_get(adev, NULL, total_size, &ib);
 +      if (r) {
 +              DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
 +              return r;
 +      }
 +
 +      /* load the compute shaders */
 +      for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
 +              ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
 +
 +      for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
 +              ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
 +
 +      /* init the ib length to 0 */
 +      ib.length_dw = 0;
 +
 +      /* VGPR */
 +      /* write the register state for the compute dispatch */
 +      for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
 +              ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
 +              ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
 +                                                              - PACKET3_SET_SH_REG_START;
 +              ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
 +      }
 +      /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
 +      gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
 +      ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
 +      ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
 +                                                      - PACKET3_SET_SH_REG_START;
 +      ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
 +      ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
 +
 +      /* write dispatch packet */
 +      ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
 +      ib.ptr[ib.length_dw++] = 128; /* x */
 +      ib.ptr[ib.length_dw++] = 1; /* y */
 +      ib.ptr[ib.length_dw++] = 1; /* z */
 +      ib.ptr[ib.length_dw++] =
 +              REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
 +
 +      /* write CS partial flush packet */
 +      ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
 +      ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 +
 +      /* SGPR */
 +      /* write the register state for the compute dispatch */
 +      for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
 +              ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
 +              ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
 +                                                              - PACKET3_SET_SH_REG_START;
 +              ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
 +      }
 +      /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
 +      gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
 +      ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
 +      ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
 +                                                      - PACKET3_SET_SH_REG_START;
 +      ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
 +      ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
 +
 +      /* write dispatch packet */
 +      ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
 +      ib.ptr[ib.length_dw++] = 128; /* x */
 +      ib.ptr[ib.length_dw++] = 1; /* y */
 +      ib.ptr[ib.length_dw++] = 1; /* z */
 +      ib.ptr[ib.length_dw++] =
 +              REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
 +
 +      /* write CS partial flush packet */
 +      ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
 +      ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
 +
 +      /* shedule the ib on the ring */
 +      r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
 +      if (r) {
 +              DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
 +              goto fail;
 +      }
 +
 +      /* wait for the GPU to finish processing the IB */
 +      r = dma_fence_wait(f, false);
 +      if (r) {
 +              DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
 +              goto fail;
 +      }
 +
 +      /* read back registers to clear the counters */
 +      mutex_lock(&adev->grbm_idx_mutex);
 +      for (j = 0; j < 16; j++) {
 +              gfx_v9_0_select_se_sh(adev, 0x01, 0x0, j);
 +              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 +                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 +              gfx_v9_0_select_se_sh(adev, 0x02, 0x0, j);
 +              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 +                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 +              gfx_v9_0_select_se_sh(adev, 0x03, 0x0, j);
 +              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 +                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 +              gfx_v9_0_select_se_sh(adev, 0x04, 0x0, j);
 +              for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
 +                      RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
 +      }
 +      WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
 +      mutex_unlock(&adev->grbm_idx_mutex);
 +
 +fail:
 +      amdgpu_ib_free(adev, &ib, NULL);
 +      dma_fence_put(f);
 +
 +      return r;
 +}
 +
  static int gfx_v9_0_early_init(void *handle)
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@@ -3801,31 -3574,8 +3805,31 @@@ static int gfx_v9_0_ecc_late_init(void 
                return 0;
        }
  
 -      if (*ras_if)
 +      /* requires IBs so do in late init after IB pool is initialized */
 +      r = gfx_v9_0_do_edc_gpr_workarounds(adev);
 +      if (r)
 +              return r;
 +
 +      /* handle resume path. */
 +      if (*ras_if) {
 +              /* resend ras TA enable cmd during resume.
 +               * prepare to handle failure.
 +               */
 +              ih_info.head = **ras_if;
 +              r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 +              if (r) {
 +                      if (r == -EAGAIN) {
 +                              /* request a gpu reset. will run again. */
 +                              amdgpu_ras_request_reset_on_boot(adev,
 +                                              AMDGPU_RAS_BLOCK__GFX);
 +                              return 0;
 +                      }
 +                      /* fail to enable ras, cleanup all. */
 +                      goto irq;
 +              }
 +              /* enable successfully. continue. */
                goto resume;
 +      }
  
        *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
        if (!*ras_if)
        **ras_if = ras_block;
  
        r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 -      if (r)
 +      if (r) {
 +              if (r == -EAGAIN) {
 +                      amdgpu_ras_request_reset_on_boot(adev,
 +                                      AMDGPU_RAS_BLOCK__GFX);
 +                      r = 0;
 +              }
                goto feature;
 +      }
  
        ih_info.head = **ras_if;
        fs_info.head = **ras_if;
@@@ -3874,7 -3618,7 +3878,7 @@@ interrupt
  feature:
        kfree(*ras_if);
        *ras_if = NULL;
 -      return -EINVAL;
 +      return r;
  }
  
  static int gfx_v9_0_late_init(void *handle)
@@@ -4579,8 -4323,8 +4583,8 @@@ static void gfx_v9_0_hqd_set_priority(s
        mutex_lock(&adev->srbm_mutex);
        soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
  
 -      WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
 -      WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
 +      WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
  
        soc15_grbm_select(adev, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
@@@ -5316,13 -5060,13 +5320,13 @@@ static void gfx_v9_0_set_gds_init(struc
        case CHIP_VEGA10:
        case CHIP_VEGA12:
        case CHIP_VEGA20:
 -              adev->gds.mem.total_size = 0x10000;
 +              adev->gds.gds_size = 0x10000;
                break;
        case CHIP_RAVEN:
 -              adev->gds.mem.total_size = 0x1000;
 +              adev->gds.gds_size = 0x1000;
                break;
        default:
 -              adev->gds.mem.total_size = 0x10000;
 +              adev->gds.gds_size = 0x10000;
                break;
        }
  
                break;
        }
  
 -      adev->gds.gws.total_size = 64;
 -      adev->gds.oa.total_size = 16;
 -
 -      if (adev->gds.mem.total_size == 64 * 1024) {
 -              adev->gds.mem.gfx_partition_size = 4096;
 -              adev->gds.mem.cs_partition_size = 4096;
 -
 -              adev->gds.gws.gfx_partition_size = 4;
 -              adev->gds.gws.cs_partition_size = 4;
 -
 -              adev->gds.oa.gfx_partition_size = 4;
 -              adev->gds.oa.cs_partition_size = 1;
 -      } else {
 -              adev->gds.mem.gfx_partition_size = 1024;
 -              adev->gds.mem.cs_partition_size = 1024;
 -
 -              adev->gds.gws.gfx_partition_size = 16;
 -              adev->gds.gws.cs_partition_size = 16;
 -
 -              adev->gds.oa.gfx_partition_size = 4;
 -              adev->gds.oa.cs_partition_size = 4;
 -      }
 +      adev->gds.gws_size = 64;
 +      adev->gds.oa_size = 16;
  }
  
  static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
index 8bf2ba310fd95521000eac24c67680c1693f8c99,c0e25583f98724509effa965500e177a18d8f363..9238280d1ff76a1d5951ed657d66f467628ac5e7
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include <drm/drm_cache.h>
  #include "amdgpu.h"
  #include "gmc_v8_0.h"
@@@ -289,7 -292,7 +292,7 @@@ out
   *
   * @adev: amdgpu_device pointer
   *
 - * Load the GDDR MC ucode into the hw (CIK).
 + * Load the GDDR MC ucode into the hw (VI).
   * Returns 0 on success, error on failure.
   */
  static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
@@@ -443,7 -446,7 +446,7 @@@ static void gmc_v8_0_vram_gtt_location(
   * @adev: amdgpu_device pointer
   *
   * Set the location of vram, gart, and AGP in the GPU's
 - * physical address space (CIK).
 + * physical address space (VI).
   */
  static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
  {
   * @adev: amdgpu_device pointer
   *
   * Look up the amount of vram, vram width, and decide how to place
 - * vram and gart within the GPU's physical address space (CIK).
 + * vram and gart within the GPU's physical address space (VI).
   * Returns 0 for success.
   */
  static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
   * @adev: amdgpu_device pointer
   * @vmid: vm instance to flush
   *
 - * Flush the TLB for the requested page table (CIK).
 + * Flush the TLB for the requested page table (VI).
   */
  static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
                                uint32_t vmid, uint32_t flush_type)
@@@ -800,7 -803,7 +803,7 @@@ static void gmc_v8_0_set_prt(struct amd
   * This sets up the TLBs, programs the page tables for VMID0,
   * sets up the hw for VMIDs 1-15 which are allocated on
   * demand, and sets up the global locations for the LDS, GDS,
 - * and GPUVM for FSA64 clients (CIK).
 + * and GPUVM for FSA64 clients (VI).
   * Returns 0 for success, errors for failure.
   */
  static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
@@@ -948,7 -951,7 +951,7 @@@ static int gmc_v8_0_gart_init(struct am
   *
   * @adev: amdgpu_device pointer
   *
 - * This disables all VM page table (CIK).
 + * This disables all VM page table (VI).
   */
  static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
  {
   * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
   * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
   *
 - * Print human readable fault information (CIK).
 + * Print human readable fault information (VI).
   */
  static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
                                     u32 addr, u32 mc_client, unsigned pasid)
index 602593bab7a7b60750de287f21e1edb7f98c53d6,51bbf773b4f5ba356dc8768b889be8b1e952fa20..ba4b2aa9181b9f614f993b8b8bb3a26d4b16f1c9
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
  #include <linux/firmware.h>
+ #include <linux/pci.h>
  #include <drm/drm_cache.h>
  #include "amdgpu.h"
  #include "gmc_v9_0.h"
  #include "amdgpu_atomfirmware.h"
@@@ -687,25 -691,8 +691,25 @@@ static int gmc_v9_0_ecc_late_init(void 
                return 0;
        }
        /* handle resume path. */
 -      if (*ras_if)
 +      if (*ras_if) {
 +              /* resend ras TA enable cmd during resume.
 +               * prepare to handle failure.
 +               */
 +              ih_info.head = **ras_if;
 +              r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 +              if (r) {
 +                      if (r == -EAGAIN) {
 +                              /* request a gpu reset. will run again. */
 +                              amdgpu_ras_request_reset_on_boot(adev,
 +                                              AMDGPU_RAS_BLOCK__UMC);
 +                              return 0;
 +                      }
 +                      /* fail to enable ras, cleanup all. */
 +                      goto irq;
 +              }
 +              /* enable successfully. continue. */
                goto resume;
 +      }
  
        *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
        if (!*ras_if)
        **ras_if = ras_block;
  
        r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 -      if (r)
 +      if (r) {
 +              if (r == -EAGAIN) {
 +                      amdgpu_ras_request_reset_on_boot(adev,
 +                                      AMDGPU_RAS_BLOCK__UMC);
 +                      r = 0;
 +              }
                goto feature;
 +      }
  
        ih_info.head = **ras_if;
        fs_info.head = **ras_if;
@@@ -754,7 -735,7 +758,7 @@@ interrupt
  feature:
        kfree(*ras_if);
        *ras_if = NULL;
 -      return -EINVAL;
 +      return r;
  }
  
  
@@@ -1123,9 -1104,6 +1127,9 @@@ static void gmc_v9_0_init_golden_regist
  
        switch (adev->asic_type) {
        case CHIP_VEGA10:
 +              if (amdgpu_virt_support_skip_setting(adev))
 +                      break;
 +              /* fall through */
        case CHIP_VEGA20:
                soc15_program_register_sequence(adev,
                                                golden_settings_mmhub_1_0_0,
@@@ -1190,9 -1168,6 +1194,9 @@@ static int gmc_v9_0_gart_enable(struct 
        tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
        WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
  
 +      WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
 +      WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
 +
        /* After HDP is initialized, flush HDP.*/
        adev->nbio_funcs->hdp_flush(adev, NULL);
  
index 3f5827764df0048ef216727212417e206a4284e8,5f531a7db257e92d88a78db7ffb96fbe06cce076..2ea77269203728bbec3438bf85579bb5995c45a0
@@@ -24,7 -24,9 +24,9 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_psp.h"
  #include "amdgpu_ucode.h"
@@@ -50,10 -52,6 +52,10 @@@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin"
  
  static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
  
 +static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
 +static int psp_v3_1_ring_stop(struct psp_context *psp,
 +                            enum psp_ring_type ring_type);
 +
  static int psp_v3_1_init_microcode(struct psp_context *psp)
  {
        struct amdgpu_device *adev = psp->adev;
@@@ -300,57 -298,27 +302,57 @@@ static int psp_v3_1_ring_create(struct 
  
        psp_v3_1_reroute_ih(psp);
  
 -      /* Write low address of the ring to C2PMSG_69 */
 -      psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
 -      WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
 -      /* Write high address of the ring to C2PMSG_70 */
 -      psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
 -      WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
 -      /* Write size of ring to C2PMSG_71 */
 -      psp_ring_reg = ring->ring_size;
 -      WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
 -      /* Write the ring initialization command to C2PMSG_64 */
 -      psp_ring_reg = ring_type;
 -      psp_ring_reg = psp_ring_reg << 16;
 -      WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
 -
 -      /* there might be handshake issue with hardware which needs delay */
 -      mdelay(20);
 -
 -      /* Wait for response flag (bit 31) in C2PMSG_64 */
 -      ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
 -                         0x80000000, 0x8000FFFF, false);
 +      if (psp_v3_1_support_vmr_ring(psp)) {
 +              ret = psp_v3_1_ring_stop(psp, ring_type);
 +              if (ret) {
 +                      DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
 +                      return ret;
 +              }
 +
 +              /* Write low address of the ring to C2PMSG_102 */
 +              psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
 +              /* Write high address of the ring to C2PMSG_103 */
 +              psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
 +              /* No size initialization for sriov  */
 +              /* Write the ring initialization command to C2PMSG_101 */
 +              psp_ring_reg = ring_type;
 +              psp_ring_reg = psp_ring_reg << 16;
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
 +
 +              /* there might be hardware handshake issue which needs delay */
 +              mdelay(20);
 +
 +              /* Wait for response flag (bit 31) in C2PMSG_101 */
 +              ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
 +                                      mmMP0_SMN_C2PMSG_101), 0x80000000,
 +                                      0x8000FFFF, false);
 +      } else {
 +
 +              /* Write low address of the ring to C2PMSG_69 */
 +              psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
 +              /* Write high address of the ring to C2PMSG_70 */
 +              psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
 +              /* Write size of ring to C2PMSG_71 */
 +              psp_ring_reg = ring->ring_size;
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
 +              /* Write the ring initialization command to C2PMSG_64 */
 +              psp_ring_reg = ring_type;
 +              psp_ring_reg = psp_ring_reg << 16;
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
 +
 +              /* there might be hardware handshake issue which needs delay */
 +              mdelay(20);
 +
 +              /* Wait for response flag (bit 31) in C2PMSG_64 */
 +              ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0,
 +                                      mmMP0_SMN_C2PMSG_64), 0x80000000,
 +                                      0x8000FFFF, false);
  
 +      }
        return ret;
  }
  
@@@ -361,31 -329,16 +363,31 @@@ static int psp_v3_1_ring_stop(struct ps
        unsigned int psp_ring_reg = 0;
        struct amdgpu_device *adev = psp->adev;
  
 -      /* Write the ring destroy command to C2PMSG_64 */
 -      psp_ring_reg = 3 << 16;
 -      WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
 -
 -      /* there might be handshake issue with hardware which needs delay */
 -      mdelay(20);
 -
 -      /* Wait for response flag (bit 31) in C2PMSG_64 */
 -      ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
 -                         0x80000000, 0x80000000, false);
 +      if (psp_v3_1_support_vmr_ring(psp)) {
 +              /* Write the Destroy GPCOM ring command to C2PMSG_101 */
 +              psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
 +
 +              /* there might be handshake issue which needs delay */
 +              mdelay(20);
 +
 +              /* Wait for response flag (bit 31) in C2PMSG_101 */
 +              ret = psp_wait_for(psp,
 +                              SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
 +                              0x80000000, 0x80000000, false);
 +      } else {
 +              /* Write the ring destroy command to C2PMSG_64 */
 +              psp_ring_reg = 3 << 16;
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
 +
 +              /* there might be handshake issue which needs delay */
 +              mdelay(20);
 +
 +              /* Wait for response flag (bit 31) in C2PMSG_64 */
 +              ret = psp_wait_for(psp,
 +                              SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
 +                              0x80000000, 0x80000000, false);
 +      }
  
        return ret;
  }
@@@ -424,10 -377,7 +426,10 @@@ static int psp_v3_1_cmd_submit(struct p
        uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
  
        /* KM (GPCOM) prepare write pointer */
 -      psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 +      if (psp_v3_1_support_vmr_ring(psp))
 +              psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
 +      else
 +              psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
  
        /* Update KM RB frame pointer to new frame */
        /* write_frame ptr increments by size of rb_frame in bytes */
  
        /* Update the write Pointer in DWORDs */
        psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
 -      WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
 +      if (psp_v3_1_support_vmr_ring(psp)) {
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
 +              /* send interrupt to PSP for SRIOV ring write pointer update */
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
 +                                      GFX_CTRL_CMD_ID_CONSUME_CMD);
 +      } else
 +              WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
  
        return 0;
  }
@@@ -632,14 -576,6 +634,14 @@@ static int psp_v3_1_mode1_reset(struct 
        return 0;
  }
  
 +static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
 +{
 +      if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version >= 0x80455)
 +              return true;
 +
 +      return false;
 +}
 +
  static const struct psp_funcs psp_v3_1_funcs = {
        .init_microcode = psp_v3_1_init_microcode,
        .bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
        .compare_sram_data = psp_v3_1_compare_sram_data,
        .smu_reload_quirk = psp_v3_1_smu_reload_quirk,
        .mode1_reset = psp_v3_1_mode1_reset,
 +      .support_vmr_ring = psp_v3_1_support_vmr_ring,
  };
  
  void psp_v3_1_set_psp_funcs(struct psp_context *psp)
index 7a259c5b6c62d4f0cc8f0db0e2f03612bd4a4ace,d212011c40af01e97082421629e32c013e7e0710..bc30875995237bc473b42d85dad0bff8913eb2ef
   *
   */
  
+ #include <linux/delay.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include <linux/module.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_ucode.h"
  #include "amdgpu_trace.h"
@@@ -210,14 -213,12 +213,14 @@@ static void sdma_v4_0_init_golden_regis
  {
        switch (adev->asic_type) {
        case CHIP_VEGA10:
 -              soc15_program_register_sequence(adev,
 -                                               golden_settings_sdma_4,
 -                                               ARRAY_SIZE(golden_settings_sdma_4));
 -              soc15_program_register_sequence(adev,
 -                                               golden_settings_sdma_vg10,
 -                                               ARRAY_SIZE(golden_settings_sdma_vg10));
 +              if (!amdgpu_virt_support_skip_setting(adev)) {
 +                      soc15_program_register_sequence(adev,
 +                                                       golden_settings_sdma_4,
 +                                                       ARRAY_SIZE(golden_settings_sdma_4));
 +                      soc15_program_register_sequence(adev,
 +                                                       golden_settings_sdma_vg10,
 +                                                       ARRAY_SIZE(golden_settings_sdma_vg10));
 +              }
                break;
        case CHIP_VEGA12:
                soc15_program_register_sequence(adev,
@@@ -1209,7 -1210,7 +1212,7 @@@ static int sdma_v4_0_ring_test_ring(str
                tmp = le32_to_cpu(adev->wb.wb[index]);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1523,25 -1524,8 +1526,25 @@@ static int sdma_v4_0_late_init(void *ha
        }
  
        /* handle resume path. */
 -      if (*ras_if)
 +      if (*ras_if) {
 +              /* resend ras TA enable cmd during resume.
 +               * prepare to handle failure.
 +               */
 +              ih_info.head = **ras_if;
 +              r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 +              if (r) {
 +                      if (r == -EAGAIN) {
 +                              /* request a gpu reset. will run again. */
 +                              amdgpu_ras_request_reset_on_boot(adev,
 +                                              AMDGPU_RAS_BLOCK__SDMA);
 +                              return 0;
 +                      }
 +                      /* fail to enable ras, cleanup all. */
 +                      goto irq;
 +              }
 +              /* enable successfully. continue. */
                goto resume;
 +      }
  
        *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
        if (!*ras_if)
        **ras_if = ras_block;
  
        r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
 -      if (r)
 +      if (r) {
 +              if (r == -EAGAIN) {
 +                      amdgpu_ras_request_reset_on_boot(adev,
 +                                      AMDGPU_RAS_BLOCK__SDMA);
 +                      r = 0;
 +              }
                goto feature;
 +      }
  
        ih_info.head = **ras_if;
        fs_info.head = **ras_if;
@@@ -1596,7 -1574,7 +1599,7 @@@ interrupt
  feature:
        kfree(*ras_if);
        *ras_if = NULL;
 -      return -EINVAL;
 +      return r;
  }
  
  static int sdma_v4_0_sw_init(void *handle)
index 4ff930a47e108b37049938ec2df7a24570197ceb,b7034befe3118fcd0a8fe01184f4ba38126875f6..5e1a2528df7f379c15117e680faec4783e40eadd
@@@ -24,7 -24,8 +24,8 @@@
  #include <linux/firmware.h>
  #include <linux/slab.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_atombios.h"
  #include "amdgpu_ih.h"
@@@ -1375,18 -1376,6 +1376,18 @@@ static void si_get_pcie_usage(struct am
        *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
  }
  
 +static uint64_t si_get_pcie_replay_count(struct amdgpu_device *adev)
 +{
 +      uint64_t nak_r, nak_g;
 +
 +      /* Get the number of NAKs received and generated */
 +      nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
 +      nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
 +
 +      /* Add the total number of NAKs, i.e the number of replays */
 +      return (nak_r + nak_g);
 +}
 +
  static const struct amdgpu_asic_funcs si_asic_funcs =
  {
        .read_disabled_bios = &si_read_disabled_bios,
        .need_full_reset = &si_need_full_reset,
        .get_pcie_usage = &si_get_pcie_usage,
        .need_reset_on_init = &si_need_reset_on_init,
 +      .get_pcie_replay_count = &si_get_pcie_replay_count,
  };
  
  static uint32_t si_get_rev_id(struct amdgpu_device *adev)
index d9fdd95fd6e6f07f3f2233cd61aa8e1c4eda6791,ede0619a629779836f316a7351acf32609f7b9db..b769995c30295b1fd38185a08fde39ce68d8861a
@@@ -23,7 -23,8 +23,8 @@@
  #include <linux/firmware.h>
  #include <linux/slab.h>
  #include <linux/module.h>
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_atombios.h"
  #include "amdgpu_ih.h"
@@@ -44,7 -45,6 +45,7 @@@
  #include "smuio/smuio_9_0_offset.h"
  #include "smuio/smuio_9_0_sh_mask.h"
  #include "nbio/nbio_7_0_default.h"
 +#include "nbio/nbio_7_0_offset.h"
  #include "nbio/nbio_7_0_sh_mask.h"
  #include "nbio/nbio_7_0_smn.h"
  #include "mp/mp_9_0_offset.h"
@@@ -65,9 -65,6 +66,9 @@@
  #include "dce_virtual.h"
  #include "mxgpu_ai.h"
  #include "amdgpu_smu.h"
 +#include "amdgpu_ras.h"
 +#include "amdgpu_xgmi.h"
 +#include <uapi/linux/kfd_ioctl.h>
  
  #define mmMP0_MISC_CGTT_CTRL0                                                                   0x01b9
  #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX                                                          0
@@@ -234,7 -231,7 +235,7 @@@ void soc15_grbm_select(struct amdgpu_de
        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
  
 -      WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
 +      WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
  }
  
  static void soc15_vga_set_state(struct amdgpu_device *adev, bool state)
@@@ -389,15 -386,7 +390,15 @@@ void soc15_program_register_sequence(st
                        tmp &= ~(entry->and_mask);
                        tmp |= entry->or_mask;
                }
 -              WREG32(reg, tmp);
 +
 +              if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) ||
 +                      reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) ||
 +                      reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) ||
 +                      reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
 +                      WREG32_RLC(reg, tmp);
 +              else
 +                      WREG32(reg, tmp);
 +
        }
  
  }
@@@ -487,13 -476,6 +488,13 @@@ static int soc15_asic_reset(struct amdg
                        soc15_asic_get_baco_capability(adev, &baco_reset);
                else
                        baco_reset = false;
 +              if (baco_reset) {
 +                      struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
 +                      struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 +
 +                      if (hive || (ras && ras->supported))
 +                              baco_reset = false;
 +              }
                break;
        default:
                baco_reset = false;
@@@ -625,24 -607,12 +626,24 @@@ int soc15_set_ip_blocks(struct amdgpu_d
        case CHIP_VEGA20:
                amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
                amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
 -              amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 -              if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 -                      if (adev->asic_type == CHIP_VEGA20)
 -                              amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 -                      else
 -                              amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 +
 +              /* For Vega10 SR-IOV, PSP need to be initialized before IH */
 +              if (amdgpu_sriov_vf(adev)) {
 +                      if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 +                              if (adev->asic_type == CHIP_VEGA20)
 +                                      amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 +                              else
 +                                      amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 +                      }
 +                      amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 +              } else {
 +                      amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
 +                      if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
 +                              if (adev->asic_type == CHIP_VEGA20)
 +                                      amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
 +                              else
 +                                      amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
 +                      }
                }
                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
@@@ -764,8 -734,7 +765,8 @@@ static bool soc15_need_reset_on_init(st
        /* Just return false for soc15 GPUs.  Reset does not seem to
         * be necessary.
         */
 -      return false;
 +      if (!amdgpu_passthrough(adev))
 +              return false;
  
        if (adev->flags & AMD_IS_APU)
                return false;
        return false;
  }
  
 +static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev)
 +{
 +      uint64_t nak_r, nak_g;
 +
 +      /* Get the number of NAKs received and generated */
 +      nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK);
 +      nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED);
 +
 +      /* Add the total number of NAKs, i.e the number of replays */
 +      return (nak_r + nak_g);
 +}
 +
  static const struct amdgpu_asic_funcs soc15_asic_funcs =
  {
        .read_disabled_bios = &soc15_read_disabled_bios,
        .init_doorbell_index = &vega10_doorbell_index_init,
        .get_pcie_usage = &soc15_get_pcie_usage,
        .need_reset_on_init = &soc15_need_reset_on_init,
 +      .get_pcie_replay_count = &soc15_get_pcie_replay_count,
  };
  
  static const struct amdgpu_asic_funcs vega20_asic_funcs =
        .init_doorbell_index = &vega20_doorbell_index_init,
        .get_pcie_usage = &soc15_get_pcie_usage,
        .need_reset_on_init = &soc15_need_reset_on_init,
 +      .get_pcie_replay_count = &soc15_get_pcie_replay_count,
  };
  
  static int soc15_common_early_init(void *handle)
  {
 +#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
 +      adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
 +      adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
        adev->smc_rreg = NULL;
        adev->smc_wreg = NULL;
        adev->pcie_rreg = &soc15_pcie_rreg;
@@@ -1047,17 -999,11 +1048,17 @@@ static void soc15_doorbell_range_init(s
        int i;
        struct amdgpu_ring *ring;
  
 -      for (i = 0; i < adev->sdma.num_instances; i++) {
 -              ring = &adev->sdma.instance[i].ring;
 -              adev->nbio_funcs->sdma_doorbell_range(adev, i,
 -                      ring->use_doorbell, ring->doorbell_index,
 -                      adev->doorbell_index.sdma_doorbell_range);
 +      /*  Two reasons to skip
 +      *               1, Host driver already programmed them
 +      *               2, To avoid registers program violations in SR-IOV
 +      */
 +      if (!amdgpu_virt_support_skip_setting(adev)) {
 +              for (i = 0; i < adev->sdma.num_instances; i++) {
 +                      ring = &adev->sdma.instance[i].ring;
 +                      adev->nbio_funcs->sdma_doorbell_range(adev, i,
 +                              ring->use_doorbell, ring->doorbell_index,
 +                              adev->doorbell_index.sdma_doorbell_range);
 +              }
        }
  
        adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
@@@ -1074,12 -1020,6 +1075,12 @@@ static int soc15_common_hw_init(void *h
        soc15_program_aspm(adev);
        /* setup nbio registers */
        adev->nbio_funcs->init_registers(adev);
 +      /* remap HDP registers to a hole in mmio space,
 +       * for the purpose of expose those registers
 +       * to process space
 +       */
 +      if (adev->nbio_funcs->remap_hdp_registers)
 +              adev->nbio_funcs->remap_hdp_registers(adev);
        /* enable the doorbell aperture */
        soc15_enable_doorbell_aperture(adev, true);
        /* HW doorbell routing policy: doorbell writing not
index bf3385280d3f6e429ac721909d429457f6ea5e52,70a1ecd5ce5011206b717b66bb0b86f36b927ceb..82abd8e728ab6c4794428ccd01720b7d5d7f0091
@@@ -23,7 -23,7 +23,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_uvd.h"
  #include "cikd.h"
@@@ -491,7 -491,7 +491,7 @@@ static int uvd_v4_2_ring_test_ring(stru
                tmp = RREG32(mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -741,7 -741,6 +741,7 @@@ static const struct amdgpu_ring_funcs u
        .type = AMDGPU_RING_TYPE_UVD,
        .align_mask = 0xf,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = uvd_v4_2_ring_get_rptr,
        .get_wptr = uvd_v4_2_ring_get_wptr,
        .set_wptr = uvd_v4_2_ring_set_wptr,
index 3210a7bd9a6d693183c0e0334d9120346805bd00,40502b34105ef01b236df4b348988e580ce1e8ca..01e62fb8e6e00eb11b74d0c428f6aa3b56ccb119
@@@ -22,8 -22,9 +22,9 @@@
   * Authors: Christian König <[email protected]>
   */
  
+ #include <linux/delay.h>
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_uvd.h"
  #include "vid.h"
@@@ -506,7 -507,7 +507,7 @@@ static int uvd_v5_0_ring_test_ring(stru
                tmp = RREG32(mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -849,7 -850,6 +850,7 @@@ static const struct amdgpu_ring_funcs u
        .type = AMDGPU_RING_TYPE_UVD,
        .align_mask = 0xf,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = uvd_v5_0_ring_get_rptr,
        .get_wptr = uvd_v5_0_ring_get_wptr,
        .set_wptr = uvd_v5_0_ring_set_wptr,
index c61a314c56cc3aaaf403111802c5d38385f67aaa,bc25226f04c1bc44bcf067b6c6e0f6a5c4c12b9f..ad5bef49e455ce1905906fd58fd308d31d421525
@@@ -23,7 -23,7 +23,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_uvd.h"
  #include "vid.h"
@@@ -183,7 -183,7 +183,7 @@@ static int uvd_v6_0_enc_ring_test_ring(
        for (i = 0; i < adev->usec_timeout; i++) {
                if (amdgpu_ring_get_rptr(ring) != rptr)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -957,7 -957,7 +957,7 @@@ static int uvd_v6_0_ring_test_ring(stru
                tmp = RREG32(mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1502,7 -1502,6 +1502,7 @@@ static const struct amdgpu_ring_funcs u
        .type = AMDGPU_RING_TYPE_UVD,
        .align_mask = 0xf,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
        .set_wptr = uvd_v6_0_ring_set_wptr,
@@@ -1528,7 -1527,6 +1528,7 @@@ static const struct amdgpu_ring_funcs u
        .type = AMDGPU_RING_TYPE_UVD,
        .align_mask = 0xf,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = uvd_v6_0_ring_get_rptr,
        .get_wptr = uvd_v6_0_ring_get_wptr,
        .set_wptr = uvd_v6_0_ring_set_wptr,
@@@ -1557,7 -1555,6 +1557,7 @@@ static const struct amdgpu_ring_funcs u
        .align_mask = 0x3f,
        .nop = HEVC_ENC_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = uvd_v6_0_enc_ring_get_rptr,
        .get_wptr = uvd_v6_0_enc_ring_get_wptr,
        .set_wptr = uvd_v6_0_enc_ring_set_wptr,
index cdb96d4cb424591c57fa10288494010c8c1abfb3,9945c43bc0c509a497a592a56259a8c59d8e01a9..56f2e6884c5f57f1f5cd7a9650fc63a9edb818a0
@@@ -22,7 -22,7 +22,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_uvd.h"
  #include "soc15.h"
@@@ -191,7 -191,7 +191,7 @@@ static int uvd_v7_0_enc_ring_test_ring(
        for (i = 0; i < adev->usec_timeout; i++) {
                if (amdgpu_ring_get_rptr(ring) != rptr)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1227,7 -1227,7 +1227,7 @@@ static int uvd_v7_0_ring_test_ring(stru
                tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID);
                if (tmp == 0xDEADBEEF)
                        break;
-               DRM_UDELAY(1);
+               udelay(1);
        }
  
        if (i >= adev->usec_timeout)
@@@ -1759,7 -1759,6 +1759,7 @@@ static const struct amdgpu_ring_funcs u
        .type = AMDGPU_RING_TYPE_UVD,
        .align_mask = 0xf,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .vmhub = AMDGPU_MMHUB,
        .get_rptr = uvd_v7_0_ring_get_rptr,
        .get_wptr = uvd_v7_0_ring_get_wptr,
@@@ -1792,7 -1791,6 +1792,7 @@@ static const struct amdgpu_ring_funcs u
        .align_mask = 0x3f,
        .nop = HEVC_ENC_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .vmhub = AMDGPU_MMHUB,
        .get_rptr = uvd_v7_0_enc_ring_get_rptr,
        .get_wptr = uvd_v7_0_enc_ring_get_wptr,
index ab0cb8325796188316a6b98db321e7ee1991bfc7,05b67bf0e7a2003faaefd120d2e1859bd9d1469a..b6837fcfdba7b97e81c934472238ebb51c2bb3b1
@@@ -26,7 -26,7 +26,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_vce.h"
  #include "cikd.h"
@@@ -605,7 -605,6 +605,7 @@@ static const struct amdgpu_ring_funcs v
        .align_mask = 0xf,
        .nop = VCE_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = vce_v2_0_ring_get_rptr,
        .get_wptr = vce_v2_0_ring_get_wptr,
        .set_wptr = vce_v2_0_ring_set_wptr,
index 36902ec16dcf26d32a3b04899833fea102b811bf,ff3f8635d71987df196f7a0bd68329d5ca82f459..475ae68f38f5059b8db2cd201f4d120266f19dd9
@@@ -26,7 -26,7 +26,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_vce.h"
  #include "vid.h"
@@@ -894,7 -894,6 +894,7 @@@ static const struct amdgpu_ring_funcs v
        .align_mask = 0xf,
        .nop = VCE_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = vce_v3_0_ring_get_rptr,
        .get_wptr = vce_v3_0_ring_get_wptr,
        .set_wptr = vce_v3_0_ring_set_wptr,
@@@ -918,7 -917,6 +918,7 @@@ static const struct amdgpu_ring_funcs v
        .align_mask = 0xf,
        .nop = VCE_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .get_rptr = vce_v3_0_ring_get_rptr,
        .get_wptr = vce_v3_0_ring_get_wptr,
        .set_wptr = vce_v3_0_ring_set_wptr,
index e267b073f5253fe59a8de33fcfc3b411cd1abde6,5fe8ab04bf91e2d4f2ce67f22d3966d62004a7d2..eafbe8d8248d0a5f08729416fec770cb94b06048
@@@ -25,7 -25,7 +25,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_vce.h"
  #include "soc15.h"
@@@ -1069,7 -1069,6 +1069,7 @@@ static const struct amdgpu_ring_funcs v
        .align_mask = 0x3f,
        .nop = VCE_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .vmhub = AMDGPU_MMHUB,
        .get_rptr = vce_v4_0_ring_get_rptr,
        .get_wptr = vce_v4_0_ring_get_wptr,
index bb47f5b24be50003906c997340e0571350ad0227,1f92dec6b884d6d752d3b9ed0d33c117ef251d20..d30ff256ff575d16258425cab701745a4aac6797
@@@ -22,7 -22,7 +22,7 @@@
   */
  
  #include <linux/firmware.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_vcn.h"
  #include "soc15.h"
@@@ -49,8 -49,6 +49,8 @@@ static void vcn_v1_0_set_jpeg_ring_func
  static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
  static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
  static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
 +static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
 +                              struct dpg_pause_state *new_state);
  
  /**
   * vcn_v1_0_early_init - set function pointers
@@@ -142,9 -140,7 +142,9 @@@ static int vcn_v1_0_sw_init(void *handl
        if (r)
                return r;
  
 -      return r;
 +      adev->vcn.pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
 +
 +      return 0;
  }
  
  /**
@@@ -1208,132 -1204,6 +1208,132 @@@ static int vcn_v1_0_stop(struct amdgpu_
        return r;
  }
  
 +static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
 +                              struct dpg_pause_state *new_state)
 +{
 +      int ret_code;
 +      uint32_t reg_data = 0;
 +      uint32_t reg_data2 = 0;
 +      struct amdgpu_ring *ring;
 +
 +      /* pause/unpause if state is changed */
 +      if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
 +              DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
 +                      adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
 +                      new_state->fw_based, new_state->jpeg);
 +
 +              reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
 +                      (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
 +
 +              if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
 +                      ret_code = 0;
 +
 +                      if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK))
 +                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 +                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
 +                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 +
 +                      if (!ret_code) {
 +                              /* pause DPG non-jpeg */
 +                              reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
 +                              WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 +                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
 +                                                 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
 +                                                 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
 +
 +                              /* Restore */
 +                              ring = &adev->vcn.ring_enc[0];
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
 +
 +                              ring = &adev->vcn.ring_enc[1];
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
 +                              WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
 +
 +                              ring = &adev->vcn.ring_dec;
 +                              WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
 +                                                 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
 +                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 +                                                 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
 +                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 +                      }
 +              } else {
 +                      /* unpause dpg non-jpeg, no need to wait */
 +                      reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
 +                      WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 +              }
 +              adev->vcn.pause_state.fw_based = new_state->fw_based;
 +      }
 +
 +      /* pause/unpause if state is changed */
 +      if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
 +              DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
 +                      adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
 +                      new_state->fw_based, new_state->jpeg);
 +
 +              reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
 +                      (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK);
 +
 +              if (new_state->jpeg == VCN_DPG_STATE__PAUSE) {
 +                      ret_code = 0;
 +
 +                      if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK))
 +                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 +                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
 +                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 +
 +                      if (!ret_code) {
 +                              /* Make sure JPRG Snoop is disabled before sending the pause */
 +                              reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
 +                              reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK;
 +                              WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2);
 +
 +                              /* pause DPG jpeg */
 +                              reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
 +                              WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 +                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
 +                                                      UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK,
 +                                                      UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code);
 +
 +                              /* Restore */
 +                              ring = &adev->vcn.ring_jpeg;
 +                              WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
 +                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
 +                                                      UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK |
 +                                                      UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 +                              WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
 +                                                      lower_32_bits(ring->gpu_addr));
 +                              WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
 +                                                      upper_32_bits(ring->gpu_addr));
 +                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr);
 +                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr);
 +                              WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
 +                                                      UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
 +
 +                              ring = &adev->vcn.ring_dec;
 +                              WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
 +                                                 RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
 +                              SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
 +                                                 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
 +                                                 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
 +                      }
 +              } else {
 +                      /* unpause dpg jpeg, no need to wait */
 +                      reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
 +                      WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
 +              }
 +              adev->vcn.pause_state.jpeg = new_state->jpeg;
 +      }
 +
 +      return 0;
 +}
 +
  static bool vcn_v1_0_is_idle(void *handle)
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@@ -2184,7 -2054,6 +2184,7 @@@ static const struct amdgpu_ring_funcs v
        .type = AMDGPU_RING_TYPE_VCN_DEC,
        .align_mask = 0xf,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .vmhub = AMDGPU_MMHUB,
        .get_rptr = vcn_v1_0_dec_ring_get_rptr,
        .get_wptr = vcn_v1_0_dec_ring_get_wptr,
@@@ -2218,7 -2087,6 +2218,7 @@@ static const struct amdgpu_ring_funcs v
        .align_mask = 0x3f,
        .nop = VCN_ENC_CMD_NO_OP,
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .vmhub = AMDGPU_MMHUB,
        .get_rptr = vcn_v1_0_enc_ring_get_rptr,
        .get_wptr = vcn_v1_0_enc_ring_get_wptr,
@@@ -2250,7 -2118,6 +2250,7 @@@ static const struct amdgpu_ring_funcs v
        .align_mask = 0xf,
        .nop = PACKET0(0x81ff, 0),
        .support_64bit_ptrs = false,
 +      .no_user_fence = true,
        .vmhub = AMDGPU_MMHUB,
        .extra_dw = 64,
        .get_rptr = vcn_v1_0_jpeg_ring_get_rptr,
index 5f54acc70feca6faaeac4c48cce0bb77e0f3bfb0,266799fb0af4ab4cb18a561b5debf1f4a8e5e451..22260e6963b8d666e1656224d891853ae4e59c34
@@@ -20,7 -20,9 +20,9 @@@
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
- #include <drm/drmP.h>
+ #include <linux/pci.h>
  #include "amdgpu.h"
  #include "amdgpu_ih.h"
  #include "soc15.h"
@@@ -48,29 -50,14 +50,29 @@@ static void vega10_ih_enable_interrupts
  
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
 -      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 +      if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +              if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
 +                      DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
 +                      return;
 +              }
 +      } else {
 +              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 +      }
        adev->irq.ih.enabled = true;
  
        if (adev->irq.ih1.ring_size) {
                ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
                ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
                                           RB_ENABLE, 1);
 -              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
 +              if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +                      if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
 +                                              ih_rb_cntl)) {
 +                              DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
 +                              return;
 +                      }
 +              } else {
 +                      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
 +              }
                adev->irq.ih1.enabled = true;
        }
  
                ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
                ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
                                           RB_ENABLE, 1);
 -              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
 +              if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +                      if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
 +                                              ih_rb_cntl)) {
 +                              DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
 +                              return;
 +                      }
 +              } else {
 +                      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
 +              }
                adev->irq.ih2.enabled = true;
        }
  }
@@@ -104,15 -83,7 +106,15 @@@ static void vega10_ih_disable_interrupt
  
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
 -      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 +      if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +              if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
 +                      DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
 +                      return;
 +              }
 +      } else {
 +              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 +      }
 +
        /* set rptr, wptr to 0 */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
                ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
                ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
                                           RB_ENABLE, 0);
 -              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
 +              if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +                      if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
 +                                              ih_rb_cntl)) {
 +                              DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
 +                              return;
 +                      }
 +              } else {
 +                      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
 +              }
                /* set rptr, wptr to 0 */
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
                ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
                ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
                                           RB_ENABLE, 0);
 -              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
 +              if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +                      if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
 +                                              ih_rb_cntl)) {
 +                              DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
 +                              return;
 +                      }
 +              } else {
 +                      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
 +              }
 +
                /* set rptr, wptr to 0 */
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
@@@ -235,15 -189,7 +237,15 @@@ static int vega10_ih_irq_init(struct am
        ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
        ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
                                   !!adev->irq.msi_enabled);
 -      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 +
 +      if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +              if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
 +                      DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
 +                      return -ETIMEDOUT;
 +              }
 +      } else {
 +              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
 +      }
  
        /* set the writeback address whether it's enabled or not */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
                                           WPTR_OVERFLOW_ENABLE, 0);
                ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
                                           RB_FULL_DRAIN_ENABLE, 1);
 -              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
 +              if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +                      if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
 +                                              ih_rb_cntl)) {
 +                              DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
 +                              return -ETIMEDOUT;
 +                      }
 +              } else {
 +                      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
 +              }
  
                /* set rptr, wptr to 0 */
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
  
                ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
                ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
 -              WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
 +
 +              if (amdgpu_virt_support_psp_prg_ih_reg(adev)) {
 +                      if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
 +                                              ih_rb_cntl)) {
 +                              DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
 +                              return -ETIMEDOUT;
 +                      }
 +              } else {
 +                      WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
 +              }
  
                /* set rptr, wptr to 0 */
                WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
index b8adf3808de270dc30f900812cc87e3b81b8f1de,87c4617448738ab9cf24a2f436cfb80503ea4744..d40ed1a828dda2f0d0536e66cef00af045371894
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   */
+ #include <linux/pci.h>
  #include <linux/slab.h>
- #include <drm/drmP.h>
  #include "amdgpu.h"
  #include "amdgpu_atombios.h"
  #include "amdgpu_ih.h"
@@@ -987,18 -989,6 +989,18 @@@ static void vi_get_pcie_usage(struct am
        *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
  }
  
 +static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
 +{
 +      uint64_t nak_r, nak_g;
 +
 +      /* Get the number of NAKs received and generated */
 +      nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
 +      nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
 +
 +      /* Add the total number of NAKs, i.e the number of replays */
 +      return (nak_r + nak_g);
 +}
 +
  static bool vi_need_reset_on_init(struct amdgpu_device *adev)
  {
        u32 clock_cntl, pc;
@@@ -1033,7 -1023,6 +1035,7 @@@ static const struct amdgpu_asic_funcs v
        .init_doorbell_index = &legacy_doorbell_index_init,
        .get_pcie_usage = &vi_get_pcie_usage,
        .need_reset_on_init = &vi_need_reset_on_init,
 +      .get_pcie_replay_count = &vi_get_pcie_replay_count,
  };
  
  #define CZ_REV_BRISTOL(rev)    \
index 86308d7b5b3fba178613a8f309ba4f44f7f26345,3d5e828c3d2843acd9ecb33b6d4e50c08b13cde6..1129dd96379663ed272334a18f6efd7bc3fa43c9
@@@ -29,7 -29,6 +29,7 @@@
  #include "dm_services_types.h"
  #include "dc.h"
  #include "dc/inc/core_types.h"
 +#include "dal_asic_id.h"
  
  #include "vid.h"
  #include "amdgpu.h"
  #include <linux/version.h>
  #include <linux/types.h>
  #include <linux/pm_runtime.h>
+ #include <linux/pci.h>
  #include <linux/firmware.h>
  
- #include <drm/drmP.h>
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_uapi.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_dp_mst_helper.h>
  #include <drm/drm_fb_helper.h>
+ #include <drm/drm_fourcc.h>
  #include <drm/drm_edid.h>
+ #include <drm/drm_vblank.h>
  
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
  #include "ivsrcid/irqsrcs_dcn_1_0.h"
@@@ -616,10 -617,6 +618,10 @@@ error
  static void amdgpu_dm_fini(struct amdgpu_device *adev)
  {
        amdgpu_dm_destroy_drm_device(&adev->dm);
 +
 +      /* DC Destroy TODO: Replace destroy DAL */
 +      if (adev->dm.dc)
 +              dc_destroy(&adev->dm.dc);
        /*
         * TODO: pageflip, vlank interrupt
         *
                mod_freesync_destroy(adev->dm.freesync_module);
                adev->dm.freesync_module = NULL;
        }
 -      /* DC Destroy TODO: Replace destroy DAL */
 -      if (adev->dm.dc)
 -              dc_destroy(&adev->dm.dc);
  
        mutex_destroy(&adev->dm.dc_lock);
  
  
  static int load_dmcu_fw(struct amdgpu_device *adev)
  {
 -      const char *fw_name_dmcu;
 +      const char *fw_name_dmcu = NULL;
        int r;
        const struct dmcu_firmware_header_v1_0 *hdr;
  
        case CHIP_VEGA20:
                return 0;
        case CHIP_RAVEN:
 -              fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 +              if (ASICREV_IS_PICASSO(adev->external_rev_id))
 +                      fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 +              else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
 +                      fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
 +              else
 +                      return 0;
                break;
        default:
                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
@@@ -2591,7 -2586,7 +2593,7 @@@ fill_plane_buffer_attributes(struct amd
                address->type = PLN_ADDR_TYPE_GRAPHICS;
                address->grph.addr.low_part = lower_32_bits(afb->address);
                address->grph.addr.high_part = upper_32_bits(afb->address);
 -      } else {
 +      } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
                uint64_t chroma_addr = afb->address + fb->offsets[1];
  
                plane_size->video.luma_size.x = 0;
@@@ -2966,16 -2961,16 +2968,16 @@@ static void update_stream_scaling_setti
  }
  
  static enum dc_color_depth
 -convert_color_depth_from_display_info(const struct drm_connector *connector)
 +convert_color_depth_from_display_info(const struct drm_connector *connector,
 +                                    const struct drm_connector_state *state)
  {
 -      struct dm_connector_state *dm_conn_state =
 -              to_dm_connector_state(connector->state);
        uint32_t bpc = connector->display_info.bpc;
  
 -      /* TODO: Remove this when there's support for max_bpc in drm */
 -      if (dm_conn_state && bpc > dm_conn_state->max_bpc)
 -              /* Round down to nearest even number. */
 -              bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
 +      if (state) {
 +              bpc = state->max_bpc;
 +              /* Round down to the nearest even number. */
 +              bpc = bpc - (bpc & 1);
 +      }
  
        switch (bpc) {
        case 0:
@@@ -3093,12 -3088,11 +3095,12 @@@ static void adjust_colour_depth_from_di
  
  }
  
 -static void
 -fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 -                                           const struct drm_display_mode *mode_in,
 -                                           const struct drm_connector *connector,
 -                                           const struct dc_stream_state *old_stream)
 +static void fill_stream_properties_from_drm_display_mode(
 +      struct dc_stream_state *stream,
 +      const struct drm_display_mode *mode_in,
 +      const struct drm_connector *connector,
 +      const struct drm_connector_state *connector_state,
 +      const struct dc_stream_state *old_stream)
  {
        struct dc_crtc_timing *timing_out = &stream->timing;
        const struct drm_display_info *info = &connector->display_info;
  
        timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
        timing_out->display_color_depth = convert_color_depth_from_display_info(
 -                      connector);
 +              connector, connector_state);
        timing_out->scan_type = SCANNING_TYPE_NODATA;
        timing_out->hdmi_vic = 0;
  
@@@ -3318,8 -3312,6 +3320,8 @@@ create_stream_for_sink(struct amdgpu_dm
  {
        struct drm_display_mode *preferred_mode = NULL;
        struct drm_connector *drm_connector;
 +      const struct drm_connector_state *con_state =
 +              dm_state ? &dm_state->base : NULL;
        struct dc_stream_state *stream = NULL;
        struct drm_display_mode mode = *drm_mode;
        bool native_mode_found = false;
        */
        if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(stream,
 -                      &mode, &aconnector->base, NULL);
 +                      &mode, &aconnector->base, con_state, NULL);
        else
                fill_stream_properties_from_drm_display_mode(stream,
 -                      &mode, &aconnector->base, old_stream);
 +                      &mode, &aconnector->base, con_state, old_stream);
  
        update_stream_scaling_settings(&mode, dm_state, stream);
  
@@@ -3620,6 -3612,9 +3622,6 @@@ int amdgpu_dm_connector_atomic_set_prop
        } else if (property == adev->mode_info.underscan_property) {
                dm_new_state->underscan_enable = val;
                ret = 0;
 -      } else if (property == adev->mode_info.max_bpc_property) {
 -              dm_new_state->max_bpc = val;
 -              ret = 0;
        } else if (property == adev->mode_info.abm_level_property) {
                dm_new_state->abm_level = val;
                ret = 0;
@@@ -3665,6 -3660,9 +3667,6 @@@ int amdgpu_dm_connector_atomic_get_prop
        } else if (property == adev->mode_info.underscan_property) {
                *val = dm_state->underscan_enable;
                ret = 0;
 -      } else if (property == adev->mode_info.max_bpc_property) {
 -              *val = dm_state->max_bpc;
 -              ret = 0;
        } else if (property == adev->mode_info.abm_level_property) {
                *val = dm_state->abm_level;
                ret = 0;
        return ret;
  }
  
 +static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
 +{
 +      struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 +
 +      drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
 +}
 +
  static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
  {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
        drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
 +      if (aconnector->i2c) {
 +              i2c_del_adapter(&aconnector->i2c->base);
 +              kfree(aconnector->i2c);
 +      }
 +
        kfree(connector);
  }
  
@@@ -3733,6 -3719,7 +3735,6 @@@ void amdgpu_dm_connector_funcs_reset(st
                state->underscan_enable = false;
                state->underscan_hborder = 0;
                state->underscan_vborder = 0;
 -              state->max_bpc = 8;
  
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@@ -3758,6 -3745,7 +3760,6 @@@ amdgpu_dm_connector_atomic_duplicate_st
        new_state->underscan_enable = state->underscan_enable;
        new_state->underscan_hborder = state->underscan_hborder;
        new_state->underscan_vborder = state->underscan_vborder;
 -      new_state->max_bpc = state->max_bpc;
  
        return &new_state->base;
  }
@@@ -3770,8 -3758,7 +3772,8 @@@ static const struct drm_connector_func
        .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
 -      .atomic_get_property = amdgpu_dm_connector_atomic_get_property
 +      .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
 +      .early_unregister = amdgpu_dm_connector_unregister
  };
  
  static int get_modes(struct drm_connector *connector)
@@@ -3966,9 -3953,10 +3968,10 @@@ is_hdr_metadata_different(const struct 
  
  static int
  amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
-                                struct drm_connector_state *new_con_state)
+                                struct drm_atomic_state *state)
  {
-       struct drm_atomic_state *state = new_con_state->state;
+       struct drm_connector_state *new_con_state =
+               drm_atomic_get_new_connector_state(state, conn);
        struct drm_connector_state *old_con_state =
                drm_atomic_get_old_connector_state(state, conn);
        struct drm_crtc *crtc = new_con_state->crtc;
@@@ -4228,9 -4216,6 +4231,9 @@@ static int dm_plane_helper_prepare_fb(s
        struct amdgpu_device *adev;
        struct amdgpu_bo *rbo;
        struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
 +      struct list_head list;
 +      struct ttm_validate_buffer tv;
 +      struct ww_acquire_ctx ticket;
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
        obj = new_state->fb->obj[0];
        rbo = gem_to_amdgpu_bo(obj);
        adev = amdgpu_ttm_adev(rbo->tbo.bdev);
 -      r = amdgpu_bo_reserve(rbo, false);
 -      if (unlikely(r != 0))
 +      INIT_LIST_HEAD(&list);
 +
 +      tv.bo = &rbo->tbo;
 +      tv.num_shared = 1;
 +      list_add(&tv.head, &list);
 +
 +      r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true);
 +      if (r) {
 +              dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
                return r;
 +      }
  
        if (plane->type != DRM_PLANE_TYPE_CURSOR)
                domain = amdgpu_display_supported_domains(adev);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
 -              amdgpu_bo_unreserve(rbo);
 +              ttm_eu_backoff_reservation(&ticket, &list);
                return r;
        }
  
        r = amdgpu_ttm_alloc_gart(&rbo->tbo);
        if (unlikely(r != 0)) {
                amdgpu_bo_unpin(rbo);
 -              amdgpu_bo_unreserve(rbo);
 +              ttm_eu_backoff_reservation(&ticket, &list);
                DRM_ERROR("%p bind failed\n", rbo);
                return r;
        }
  
        amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
  
 -      amdgpu_bo_unreserve(rbo);
 +      ttm_eu_backoff_reservation(&ticket, &list);
  
        afb->address = amdgpu_bo_gpu_offset(rbo);
  
@@@ -4734,15 -4711,6 +4737,15 @@@ static void amdgpu_dm_connector_ddc_get
                amdgpu_dm_connector->num_modes =
                                drm_add_edid_modes(connector, edid);
  
 +              /* sorting the probed modes before calling function
 +               * amdgpu_dm_get_native_mode() since EDID can have
 +               * more than one preferred mode. The modes that are
 +               * later in the probed mode list could be of higher
 +               * and preferred resolution. For example, 3840x2160
 +               * resolution in base EDID preferred timing and 4096x2160
 +               * preferred resolution in DID extension block later.
 +               */
 +              drm_mode_sort(&connector->probed_modes);
                amdgpu_dm_get_native_mode(connector);
        } else {
                amdgpu_dm_connector->num_modes = 0;
@@@ -4822,12 -4790,9 +4825,12 @@@ void amdgpu_dm_connector_init_helper(st
        drm_object_attach_property(&aconnector->base.base,
                                adev->mode_info.underscan_vborder_property,
                                0);
 -      drm_object_attach_property(&aconnector->base.base,
 -                              adev->mode_info.max_bpc_property,
 -                              0);
 +
 +      drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 +
 +      /* This defaults to the max in the range, but we want 8bpc. */
 +      aconnector->base.state->max_bpc = 8;
 +      aconnector->base.state->max_requested_bpc = 8;
  
        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
            dc_is_dmcu_initialized(adev->dm.dc)) {
@@@ -5110,12 -5075,12 +5113,12 @@@ static int get_cursor_position(struct d
        int x, y;
        int xorigin = 0, yorigin = 0;
  
 -      if (!crtc || !plane->state->fb) {
 -              position->enable = false;
 -              position->x = 0;
 -              position->y = 0;
 +      position->enable = false;
 +      position->x = 0;
 +      position->y = 0;
 +
 +      if (!crtc || !plane->state->fb)
                return 0;
 -      }
  
        if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
            (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
        x = plane->state->crtc_x;
        y = plane->state->crtc_y;
  
 +      if (x <= -amdgpu_crtc->max_cursor_width ||
 +          y <= -amdgpu_crtc->max_cursor_height)
 +              return 0;
 +
        if (crtc->primary->state) {
                /* avivo cursor are offset into the total surface */
                x += crtc->primary->state->src_x >> 16;
@@@ -5276,11 -5237,6 +5279,11 @@@ static void update_freesync_state_on_st
                    amdgpu_dm_vrr_active(new_crtc_state)) {
                        mod_freesync_handle_v_update(dm->freesync_module,
                                                     new_stream, &vrr_params);
 +
 +                      /* Need to call this before the frame ends. */
 +                      dc_stream_adjust_vmin_vmax(dm->dc,
 +                                                 new_crtc_state->stream,
 +                                                 &vrr_params.adjust);
                }
        }
  
@@@ -5619,6 -5575,11 +5622,6 @@@ static void amdgpu_dm_commit_planes(str
                }
  
                if (acrtc_state->stream) {
 -
 -                      if (acrtc_state->freesync_timing_changed)
 -                              bundle->stream_update.adjust =
 -                                      &acrtc_state->stream->adjust;
 -
                        if (acrtc_state->freesync_vrr_info_changed)
                                bundle->stream_update.vrr_infopacket =
                                        &acrtc_state->stream->vrr_infopacket;
                if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
                        bundle->stream_update.abm_level = &acrtc_state->abm_level;
  
 +              /*
 +               * If FreeSync state on the stream has changed then we need to
 +               * re-adjust the min/max bounds now that DC doesn't handle this
 +               * as part of commit.
 +               */
 +              if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
 +                  amdgpu_dm_vrr_active(acrtc_state)) {
 +                      spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
 +                      dc_stream_adjust_vmin_vmax(
 +                              dm->dc, acrtc_state->stream,
 +                              &acrtc_state->vrr_params.adjust);
 +                      spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
 +              }
 +
                mutex_lock(&dm->dc_lock);
                dc_commit_updates_for_stream(dm->dc,
                                                     bundle->surface_updates,
@@@ -6523,10 -6470,6 +6526,10 @@@ static bool should_reset_plane(struct d
        if (!new_crtc_state)
                return true;
  
 +      /* CRTC Degamma changes currently require us to recreate planes. */
 +      if (new_crtc_state->color_mgmt_changed)
 +              return true;
 +
        if (drm_atomic_crtc_needs_modeset(new_crtc_state))
                return true;
  
index b0ce44422e905c6a01076bf2b727500a73f5c3c7,2485d8426e5e86083e86a0dae80e2842c8829ad7..811253d7f157bae02b7da4a5bd69f5f5c852f4d0
  #ifndef __AMDGPU_DM_H__
  #define __AMDGPU_DM_H__
  
- #include <drm/drmP.h>
  #include <drm/drm_atomic.h>
+ #include <drm/drm_connector.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_dp_mst_helper.h>
+ #include <drm/drm_plane.h>
  
  /*
   * This file contains the definition for amdgpu_display_manager
@@@ -304,6 -307,7 +307,6 @@@ struct dm_connector_state 
        enum amdgpu_rmx_type scaling;
        uint8_t underscan_vborder;
        uint8_t underscan_hborder;
 -      uint8_t max_bpc;
        bool underscan_enable;
        bool freesync_capable;
        uint8_t abm_level;
index 12bc7ee66b18e831517516a255a8b0f44695b73f,5e1b849684a6890ea059a6baabb8d9d5cdf14e9e..fecd766ece37ea8adbee53266e482cc7f18ddfb1
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "ObjectID.h"
@@@ -1313,8 -1315,6 +1315,8 @@@ static enum bp_result bios_parser_get_e
                        ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
        info->HDMI_6GB_EN = (record->encodercaps &
                        ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
 +      info->DP_IS_USB_C = (record->encodercaps &
 +                      ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
  
        return BP_RESULT_OK;
  }
index eb2204d42337a593ecef77fb779072be84c21cca,0000000000000000000000000000000000000000..cb3f6a74d9e3610140c0bfd78fc2d35982ad735c
mode 100644,000000..100644
--- /dev/null
@@@ -1,134 -1,0 +1,136 @@@
 +/*
 + * Copyright 2012-16 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
 +#include "dal_asic_id.h"
 +#include "dc_types.h"
 +#include "dccg.h"
 +#include "clk_mgr_internal.h"
 +
 +#include "dce100/dce_clk_mgr.h"
 +#include "dce110/dce110_clk_mgr.h"
 +#include "dce112/dce112_clk_mgr.h"
 +#include "dce120/dce120_clk_mgr.h"
 +#include "dcn10/rv1_clk_mgr.h"
 +#include "dcn10/rv2_clk_mgr.h"
 +
 +
 +int clk_mgr_helper_get_active_display_cnt(
 +              struct dc *dc,
 +              struct dc_state *context)
 +{
 +      int i, display_count;
 +
 +      display_count = 0;
 +      for (i = 0; i < context->stream_count; i++) {
 +              const struct dc_stream_state *stream = context->streams[i];
 +
 +              /*
 +               * Only notify active stream or virtual stream.
 +               * Need to notify virtual stream to work around
 +               * headless case. HPD does not fire when system is in
 +               * S0i2.
 +               */
 +              if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
 +                      display_count++;
 +      }
 +
 +      return display_count;
 +}
 +
 +
 +struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg)
 +{
 +      struct hw_asic_id asic_id = ctx->asic_id;
 +
 +      struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
 +
 +      if (clk_mgr == NULL) {
 +              BREAK_TO_DEBUGGER();
 +              return NULL;
 +      }
 +
 +      switch (asic_id.chip_family) {
 +      case FAMILY_CI:
 +      case FAMILY_KV:
 +              dce_clk_mgr_construct(ctx, clk_mgr);
 +              break;
 +      case FAMILY_CZ:
 +              dce110_clk_mgr_construct(ctx, clk_mgr);
 +              break;
 +      case FAMILY_VI:
 +              if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
 +                              ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
 +                      dce_clk_mgr_construct(ctx, clk_mgr);
 +                      break;
 +              }
 +              if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
 +                              ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
 +                              ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
 +                      dce112_clk_mgr_construct(ctx, clk_mgr);
 +                      break;
 +              }
 +              if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
 +                      dce112_clk_mgr_construct(ctx, clk_mgr);
 +                      break;
 +              }
 +              break;
 +      case FAMILY_AI:
 +              if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
 +                      dce121_clk_mgr_construct(ctx, clk_mgr);
 +              else
 +                      dce120_clk_mgr_construct(ctx, clk_mgr);
 +              break;
 +
 +#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
 +      case FAMILY_RV:
 +              if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
 +                      rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
 +                      break;
 +              }
 +              if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
 +                              ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
 +                      rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
 +                      break;
 +              }
 +              break;
 +#endif        /* Family RV */
 +
 +      default:
 +              ASSERT(0); /* Unknown Asic */
 +              break;
 +      }
 +
 +      return &clk_mgr->base;
 +}
 +
 +void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
 +{
 +      struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 +
 +      kfree(clk_mgr);
 +}
 +
index 31db9b55e11ad59ecdfca827ecaee336e36ab5d4,0000000000000000000000000000000000000000..04b12bb2243d0a4ad0dc340462374793a81a75e7
mode 100644,000000..100644
--- /dev/null
@@@ -1,262 -1,0 +1,265 @@@
 +/*
 + * Copyright 2018 Advanced Micro Devices, Inc.
 + *
 + * Permission is hereby granted, free of charge, to any person obtaining a
 + * copy of this software and associated documentation files (the "Software"),
 + * to deal in the Software without restriction, including without limitation
 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 + * and/or sell copies of the Software, and to permit persons to whom the
 + * Software is furnished to do so, subject to the following conditions:
 + *
 + * The above copyright notice and this permission notice shall be included in
 + * all copies or substantial portions of the Software.
 + *
 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 + * OTHER DEALINGS IN THE SOFTWARE.
 + *
 + * Authors: AMD
 + *
 + */
 +
++#include <linux/slab.h>
++
++#include "reg_helper.h"
 +#include "core_types.h"
 +#include "clk_mgr_internal.h"
 +#include "rv1_clk_mgr.h"
 +#include "dce100/dce_clk_mgr.h"
 +#include "dce112/dce112_clk_mgr.h"
 +#include "rv1_clk_mgr_vbios_smu.h"
 +#include "rv1_clk_mgr_clk.h"
 +
 +static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, struct dc_clocks *new_clocks)
 +{
 +      bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
 +      bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->base.clks.dispclk_khz;
 +      int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
 +      bool cur_dpp_div = clk_mgr->base.clks.dispclk_khz > clk_mgr->base.clks.dppclk_khz;
 +
 +      /* increase clock, looking for div is 0 for current, request div is 1*/
 +      if (dispclk_increase) {
 +              /* already divided by 2, no need to reach target clk with 2 steps*/
 +              if (cur_dpp_div)
 +                      return new_clocks->dispclk_khz;
 +
 +              /* request disp clk is lower than maximum supported dpp clk,
 +               * no need to reach target clk with two steps.
 +               */
 +              if (new_clocks->dispclk_khz <= disp_clk_threshold)
 +                      return new_clocks->dispclk_khz;
 +
 +              /* target dpp clk not request divided by 2, still within threshold */
 +              if (!request_dpp_div)
 +                      return new_clocks->dispclk_khz;
 +
 +      } else {
 +              /* decrease clock, looking for current dppclk divided by 2,
 +               * request dppclk not divided by 2.
 +               */
 +
 +              /* current dpp clk not divided by 2, no need to ramp*/
 +              if (!cur_dpp_div)
 +                      return new_clocks->dispclk_khz;
 +
 +              /* current disp clk is lower than current maximum dpp clk,
 +               * no need to ramp
 +               */
 +              if (clk_mgr->base.clks.dispclk_khz <= disp_clk_threshold)
 +                      return new_clocks->dispclk_khz;
 +
 +              /* request dpp clk need to be divided by 2 */
 +              if (request_dpp_div)
 +                      return new_clocks->dispclk_khz;
 +      }
 +
 +      return disp_clk_threshold;
 +}
 +
 +static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks)
 +{
 +      int i;
 +      int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks);
 +      bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
 +
 +      /* set disp clk to dpp clk threshold */
 +
 +      clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold);
 +      clk_mgr->funcs->set_dprefclk(clk_mgr);
 +
 +
 +      /* update request dpp clk division option */
 +      for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +              struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
 +
 +              if (!pipe_ctx->plane_state)
 +                      continue;
 +
 +              pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
 +                              pipe_ctx->plane_res.dpp,
 +                              request_dpp_div,
 +                              true);
 +      }
 +
 +      /* If target clk not same as dppclk threshold, set to target clock */
 +      if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) {
 +              clk_mgr->funcs->set_dispclk(clk_mgr, new_clocks->dispclk_khz);
 +              clk_mgr->funcs->set_dprefclk(clk_mgr);
 +      }
 +
 +
 +      clk_mgr->base.clks.dispclk_khz = new_clocks->dispclk_khz;
 +      clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
 +      clk_mgr->base.clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
 +}
 +
 +static void rv1_update_clocks(struct clk_mgr *clk_mgr_base,
 +                      struct dc_state *context,
 +                      bool safe_to_lower)
 +{
 +      struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 +      struct dc *dc = clk_mgr_base->ctx->dc;
 +      struct dc_debug_options *debug = &dc->debug;
 +      struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
 +      struct pp_smu_funcs_rv *pp_smu = NULL;
 +      bool send_request_to_increase = false;
 +      bool send_request_to_lower = false;
 +      int display_count;
 +
 +      bool enter_display_off = false;
 +
 +      ASSERT(clk_mgr->pp_smu);
 +
 +      pp_smu = &clk_mgr->pp_smu->rv_funcs;
 +
 +      display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
 +
 +      if (display_count == 0)
 +              enter_display_off = true;
 +
 +      if (enter_display_off == safe_to_lower) {
 +              /*
 +               * Notify SMU active displays
 +               * if function pointer not set up, this message is
 +               * sent as part of pplib_apply_display_requirements.
 +               */
 +              if (pp_smu->set_display_count)
 +                      pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
 +      }
 +
 +      if (new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz
 +                      || new_clocks->phyclk_khz > clk_mgr_base->clks.phyclk_khz
 +                      || new_clocks->fclk_khz > clk_mgr_base->clks.fclk_khz
 +                      || new_clocks->dcfclk_khz > clk_mgr_base->clks.dcfclk_khz)
 +              send_request_to_increase = true;
 +
 +      if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) {
 +              clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
 +              send_request_to_lower = true;
 +      }
 +
 +      // F Clock
 +      if (debug->force_fclk_khz != 0)
 +              new_clocks->fclk_khz = debug->force_fclk_khz;
 +
 +      if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr_base->clks.fclk_khz)) {
 +              clk_mgr_base->clks.fclk_khz = new_clocks->fclk_khz;
 +              send_request_to_lower = true;
 +      }
 +
 +      //DCF Clock
 +      if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
 +              clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
 +              send_request_to_lower = true;
 +      }
 +
 +      if (should_set_clock(safe_to_lower,
 +                      new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
 +              clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
 +              send_request_to_lower = true;
 +      }
 +
 +      /* make sure dcf clk is before dpp clk to
 +       * make sure we have enough voltage to run dpp clk
 +       */
 +      if (send_request_to_increase) {
 +              /*use dcfclk to request voltage*/
 +              if (pp_smu->set_hard_min_fclk_by_freq &&
 +                              pp_smu->set_hard_min_dcfclk_by_freq &&
 +                              pp_smu->set_min_deep_sleep_dcfclk) {
 +                      pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
 +                      pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
 +                      pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
 +              }
 +      }
 +
 +      /* dcn1 dppclk is tied to dispclk */
 +      /* program dispclk on = as a w/a for sleep resume clock ramping issues */
 +      if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)
 +                      || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) {
 +              ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks);
 +              clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
 +              send_request_to_lower = true;
 +      }
 +
 +      if (!send_request_to_increase && send_request_to_lower) {
 +              /*use dcfclk to request voltage*/
 +              if (pp_smu->set_hard_min_fclk_by_freq &&
 +                              pp_smu->set_hard_min_dcfclk_by_freq &&
 +                              pp_smu->set_min_deep_sleep_dcfclk) {
 +                      pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
 +                      pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
 +                      pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
 +              }
 +      }
 +}
 +
 +static struct clk_mgr_funcs rv1_clk_funcs = {
 +      .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
 +      .update_clocks = rv1_update_clocks,
 +};
 +
 +static struct clk_mgr_internal_funcs rv1_clk_internal_funcs = {
 +      .set_dispclk = rv1_vbios_smu_set_dispclk,
 +      .set_dprefclk = dce112_set_dprefclk
 +};
 +
 +void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu)
 +{
 +      struct dc_debug_options *debug = &ctx->dc->debug;
 +      struct dc_bios *bp = ctx->dc_bios;
 +      struct dc_firmware_info fw_info = { { 0 } };
 +
 +      clk_mgr->base.ctx = ctx;
 +      clk_mgr->pp_smu = pp_smu;
 +      clk_mgr->base.funcs = &rv1_clk_funcs;
 +      clk_mgr->funcs = &rv1_clk_internal_funcs;
 +
 +      clk_mgr->dfs_bypass_disp_clk = 0;
 +
 +      clk_mgr->dprefclk_ss_percentage = 0;
 +      clk_mgr->dprefclk_ss_divider = 1000;
 +      clk_mgr->ss_on_dprefclk = false;
 +      clk_mgr->base.dprefclk_khz = 600000;
 +
 +      if (bp->integrated_info)
 +              clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
 +      if (clk_mgr->dentist_vco_freq_khz == 0) {
 +              bp->funcs->get_firmware_info(bp, &fw_info);
 +              clk_mgr->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
 +              if (clk_mgr->dentist_vco_freq_khz == 0)
 +                      clk_mgr->dentist_vco_freq_khz = 3600000;
 +      }
 +
 +      if (!debug->disable_dfs_bypass && bp->integrated_info)
 +              if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
 +                      clk_mgr->dfs_bypass_enabled = true;
 +
 +      dce_clock_read_ss_info(clk_mgr);
 +}
 +
 +
index d89a29bd878592af16eb3c35c1d659c49257e7d9,03dec40de361797bb5168711ce5ab41c2fda57a2..ed466087c8b5cdb7efd5f695971b6f7b16d05905
@@@ -22,6 -22,8 +22,8 @@@
   * Authors: AMD
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "dc.h"
@@@ -33,7 -35,6 +35,7 @@@
  
  #include "resource.h"
  
 +#include "clk_mgr.h"
  #include "clock_source.h"
  #include "dc_bios_types.h"
  
@@@ -170,14 -171,9 +172,14 @@@ static bool create_links
                link = link_create(&link_init_params);
  
                if (link) {
 -                      dc->links[dc->link_count] = link;
 -                      link->dc = dc;
 -                      ++dc->link_count;
 +                      if (dc->config.edp_not_connected &&
 +                                      link->connector_signal == SIGNAL_TYPE_EDP) {
 +                              link_destroy(&link);
 +                      } else {
 +                              dc->links[dc->link_count] = link;
 +                              link->dc = dc;
 +                              ++dc->link_count;
 +                      }
                }
        }
  
@@@ -263,7 -259,7 +265,7 @@@ bool dc_stream_adjust_vmin_vmax(struct 
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
  
 -              if (pipe->stream == stream && pipe->stream_res.stream_enc) {
 +              if (pipe->stream == stream && pipe->stream_res.tg) {
                        pipe->stream->adjust = *adjust;
                        dc->hwss.set_drr(&pipe,
                                        1,
@@@ -490,6 -486,128 +492,6 @@@ void dc_stream_set_static_screen_events
        dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
  }
  
 -void dc_link_set_drive_settings(struct dc *dc,
 -                              struct link_training_settings *lt_settings,
 -                              const struct dc_link *link)
 -{
 -
 -      int i;
 -
 -      for (i = 0; i < dc->link_count; i++) {
 -              if (dc->links[i] == link)
 -                      break;
 -      }
 -
 -      if (i >= dc->link_count)
 -              ASSERT_CRITICAL(false);
 -
 -      dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
 -}
 -
 -void dc_link_perform_link_training(struct dc *dc,
 -                                 struct dc_link_settings *link_setting,
 -                                 bool skip_video_pattern)
 -{
 -      int i;
 -
 -      for (i = 0; i < dc->link_count; i++)
 -              dc_link_dp_perform_link_training(
 -                      dc->links[i],
 -                      link_setting,
 -                      skip_video_pattern);
 -}
 -
 -void dc_link_set_preferred_link_settings(struct dc *dc,
 -                                       struct dc_link_settings *link_setting,
 -                                       struct dc_link *link)
 -{
 -      int i;
 -      struct pipe_ctx *pipe;
 -      struct dc_stream_state *link_stream;
 -      struct dc_link_settings store_settings = *link_setting;
 -
 -      link->preferred_link_setting = store_settings;
 -
 -      /* Retrain with preferred link settings only relevant for
 -       * DP signal type
 -       */
 -      if (!dc_is_dp_signal(link->connector_signal))
 -              return;
 -
 -      for (i = 0; i < MAX_PIPES; i++) {
 -              pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 -              if (pipe->stream && pipe->stream->link) {
 -                      if (pipe->stream->link == link)
 -                              break;
 -              }
 -      }
 -
 -      /* Stream not found */
 -      if (i == MAX_PIPES)
 -              return;
 -
 -      link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
 -
 -      /* Cannot retrain link if backend is off */
 -      if (link_stream->dpms_off)
 -              return;
 -
 -      if (link_stream)
 -              decide_link_settings(link_stream, &store_settings);
 -
 -      if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
 -              (store_settings.link_rate != LINK_RATE_UNKNOWN))
 -              dp_retrain_link_dp_test(link, &store_settings, false);
 -}
 -
 -void dc_link_enable_hpd(const struct dc_link *link)
 -{
 -      dc_link_dp_enable_hpd(link);
 -}
 -
 -void dc_link_disable_hpd(const struct dc_link *link)
 -{
 -      dc_link_dp_disable_hpd(link);
 -}
 -
 -
 -void dc_link_set_test_pattern(struct dc_link *link,
 -                            enum dp_test_pattern test_pattern,
 -                            const struct link_training_settings *p_link_settings,
 -                            const unsigned char *p_custom_pattern,
 -                            unsigned int cust_pattern_size)
 -{
 -      if (link != NULL)
 -              dc_link_dp_set_test_pattern(
 -                      link,
 -                      test_pattern,
 -                      p_link_settings,
 -                      p_custom_pattern,
 -                      cust_pattern_size);
 -}
 -
 -uint32_t dc_link_bandwidth_kbps(
 -      const struct dc_link *link,
 -      const struct dc_link_settings *link_setting)
 -{
 -      uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
 -
 -      link_bw_kbps *= 8;   /* 8 bits per byte*/
 -      link_bw_kbps *= link_setting->lane_count;
 -
 -      return link_bw_kbps;
 -
 -}
 -
 -const struct dc_link_settings *dc_link_get_link_cap(
 -              const struct dc_link *link)
 -{
 -      if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
 -                      link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
 -              return &link->preferred_link_setting;
 -      return &link->verified_link_cap;
 -}
 -
  static void destruct(struct dc *dc)
  {
        dc_release_state(dc->current_state);
  
        destroy_links(dc);
  
 +      if (dc->clk_mgr) {
 +              dc_destroy_clk_mgr(dc->clk_mgr);
 +              dc->clk_mgr = NULL;
 +      }
 +
        dc_destroy_resource_pool(dc);
  
        if (dc->ctx->gpio_service)
@@@ -645,10 -758,6 +647,10 @@@ static bool construct(struct dc *dc
        if (!dc->res_pool)
                goto fail;
  
 +      dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
 +      if (!dc->clk_mgr)
 +              goto fail;
 +
        /* Creation of current_state must occur after dc->dml
         * is initialized in dc_create_resource_pool because
         * on creation it copies the contents of dc->dml
@@@ -1029,6 -1138,10 +1031,6 @@@ static enum dc_status dc_commit_state_n
        /* Program all planes within new context*/
        for (i = 0; i < context->stream_count; i++) {
                const struct dc_link *link = context->streams[i]->link;
 -              struct dc_stream_status *status;
 -
 -              if (context->streams[i]->apply_seamless_boot_optimization)
 -                      context->streams[i]->apply_seamless_boot_optimization = false;
  
                if (!context->streams[i]->mode_changed)
                        continue;
                        }
                }
  
 -              status = dc_stream_get_status_from_state(context, context->streams[i]);
 -              context->streams[i]->out.otg_offset = status->primary_otg_inst;
 -
                CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
                                context->streams[i]->timing.h_addressable,
                                context->streams[i]->timing.v_addressable,
@@@ -1217,94 -1333,71 +1219,94 @@@ static bool is_surface_in_context
  static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
  {
        union surface_update_flags *update_flags = &u->surface->update_flags;
 +      enum surface_update_type update_type = UPDATE_TYPE_FAST;
  
        if (!u->plane_info)
                return UPDATE_TYPE_FAST;
  
 -      if (u->plane_info->color_space != u->surface->color_space)
 +      if (u->plane_info->color_space != u->surface->color_space) {
                update_flags->bits.color_space_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
  
 -      if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
 +      if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
                update_flags->bits.horizontal_mirror_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
  
 -      if (u->plane_info->rotation != u->surface->rotation)
 +      if (u->plane_info->rotation != u->surface->rotation) {
                update_flags->bits.rotation_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_FULL);
 +      }
  
 -      if (u->plane_info->format != u->surface->format)
 +      if (u->plane_info->format != u->surface->format) {
                update_flags->bits.pixel_format_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_FULL);
 +      }
  
 -      if (u->plane_info->stereo_format != u->surface->stereo_format)
 +      if (u->plane_info->stereo_format != u->surface->stereo_format) {
                update_flags->bits.stereo_format_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_FULL);
 +      }
  
 -      if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
 +      if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
                update_flags->bits.per_pixel_alpha_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
  
 -      if (u->plane_info->global_alpha_value != u->surface->global_alpha_value)
 +      if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
                update_flags->bits.global_alpha_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
 +
 +      if (u->plane_info->sdr_white_level != u->surface->sdr_white_level) {
 +              update_flags->bits.sdr_white_level = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
  
        if (u->plane_info->dcc.enable != u->surface->dcc.enable
                        || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
 -                      || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
 +                      || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) {
                update_flags->bits.dcc_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
  
        if (resource_pixel_format_to_bpp(u->plane_info->format) !=
 -                      resource_pixel_format_to_bpp(u->surface->format))
 +                      resource_pixel_format_to_bpp(u->surface->format)) {
                /* different bytes per element will require full bandwidth
                 * and DML calculation
                 */
                update_flags->bits.bpp_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_FULL);
 +      }
  
        if (u->plane_info->plane_size.grph.surface_pitch != u->surface->plane_size.grph.surface_pitch
                        || u->plane_info->plane_size.video.luma_pitch != u->surface->plane_size.video.luma_pitch
 -                      || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch)
 +                      || u->plane_info->plane_size.video.chroma_pitch != u->surface->plane_size.video.chroma_pitch) {
                update_flags->bits.plane_size_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +      }
  
  
        if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
                        sizeof(union dc_tiling_info)) != 0) {
                update_flags->bits.swizzle_change = 1;
 +              elevate_update_type(&update_type, UPDATE_TYPE_MED);
 +
                /* todo: below are HW dependent, we should add a hook to
                 * DCE/N resource and validated there.
                 */
 -              if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
 +              if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
                        /* swizzled mode requires RQ to be setup properly,
                         * thus need to run DML to calculate RQ settings
                         */
                        update_flags->bits.bandwidth_change = 1;
 +                      elevate_update_type(&update_type, UPDATE_TYPE_FULL);
 +              }
        }
  
 -      if (update_flags->bits.rotation_change
 -                      || update_flags->bits.stereo_format_change
 -                      || update_flags->bits.pixel_format_change
 -                      || update_flags->bits.bpp_change
 -                      || update_flags->bits.bandwidth_change
 -                      || update_flags->bits.output_tf_change)
 -              return UPDATE_TYPE_FULL;
 -
 -      return update_flags->raw ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST;
 +      /* This should be UPDATE_TYPE_FAST if nothing has changed. */
 +      return update_type;
  }
  
  static enum surface_update_type get_scaling_info_update_type(
@@@ -1384,9 -1477,6 +1386,9 @@@ static enum surface_update_type det_sur
        type = get_scaling_info_update_type(u);
        elevate_update_type(&overall_type, type);
  
 +      if (u->flip_addr)
 +              update_flags->bits.addr_update = 1;
 +
        if (u->in_transfer_func)
                update_flags->bits.in_transfer_func_change = 1;
  
@@@ -1623,6 -1713,13 +1625,6 @@@ static void commit_planes_do_stream_upd
                        pipe_ctx->stream &&
                        pipe_ctx->stream == stream) {
  
 -                      /* Fast update*/
 -                      // VRR program can be done as part of FAST UPDATE
 -                      if (stream_update->adjust)
 -                              dc->hwss.set_drr(&pipe_ctx, 1,
 -                                      stream_update->adjust->v_total_min,
 -                                      stream_update->adjust->v_total_max);
 -
                        if (stream_update->periodic_interrupt0 &&
                                        dc->hwss.setup_periodic_interrupt)
                                dc->hwss.setup_periodic_interrupt(pipe_ctx, VLINE0);
@@@ -1697,15 -1794,10 +1699,15 @@@ static void commit_planes_for_stream(st
        if (dc->optimize_seamless_boot && surface_count > 0) {
                /* Optimize seamless boot flag keeps clocks and watermarks high until
                 * first flip. After first flip, optimization is required to lower
 -               * bandwidth.
 +               * bandwidth. Important to note that it is expected UEFI will
 +               * only light up a single display on POST, therefore we only expect
 +               * one stream with seamless boot flag set.
                 */
 -              dc->optimize_seamless_boot = false;
 -              dc->optimized_required = true;
 +              if (stream->apply_seamless_boot_optimization) {
 +                      stream->apply_seamless_boot_optimization = false;
 +                      dc->optimize_seamless_boot = false;
 +                      dc->optimized_required = true;
 +              }
        }
  
        if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
  
                dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
        }
 +
 +      // Fire manual trigger only when bottom plane is flipped
 +      for (j = 0; j < dc->res_pool->pipe_count; j++) {
 +              struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 +
 +              if (pipe_ctx->bottom_pipe ||
 +                              !pipe_ctx->stream ||
 +                              pipe_ctx->stream != stream ||
 +                              !pipe_ctx->plane_state->update_flags.bits.addr_update)
 +                      continue;
 +
 +              if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
 +                      pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
 +      }
  }
  
  void dc_commit_updates_for_stream(struct dc *dc,
index ca50ede3718320a966c21e2decb81ce0d5f599a3,9b01078d6ec5cf1d9cff1047ff58ef1812a028b9..c026b393f3c5f0ef24cef708aa0d983b7ed6394d
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/delay.h>
  #include "dm_services.h"
  #include "core_types.h"
  #include "timing_generator.h"
@@@ -45,10 -47,8 +47,10 @@@ enum dc_color_space_type 
        COLOR_SPACE_RGB_LIMITED_TYPE,
        COLOR_SPACE_YCBCR601_TYPE,
        COLOR_SPACE_YCBCR709_TYPE,
 +      COLOR_SPACE_YCBCR2020_TYPE,
        COLOR_SPACE_YCBCR601_LIMITED_TYPE,
 -      COLOR_SPACE_YCBCR709_LIMITED_TYPE
 +      COLOR_SPACE_YCBCR709_LIMITED_TYPE,
 +      COLOR_SPACE_YCBCR709_BLACK_TYPE,
  };
  
  static const struct tg_color black_color_format[] = {
@@@ -82,6 -82,7 +84,6 @@@ static const struct out_csc_color_matri
        { COLOR_SPACE_YCBCR709_TYPE,
                { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
                                0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
 -
        /* TODO: correct values below */
        { COLOR_SPACE_YCBCR601_LIMITED_TYPE,
                { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
        { COLOR_SPACE_YCBCR709_LIMITED_TYPE,
                { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
                                0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
 +      { COLOR_SPACE_YCBCR2020_TYPE,
 +              { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
 +                              0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
 +      { COLOR_SPACE_YCBCR709_BLACK_TYPE,
 +              { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
 +                              0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
  };
  
  static bool is_rgb_type(
@@@ -156,16 -151,6 +158,16 @@@ static bool is_ycbcr709_type
        return ret;
  }
  
 +static bool is_ycbcr2020_type(
 +      enum dc_color_space color_space)
 +{
 +      bool ret = false;
 +
 +      if (color_space == COLOR_SPACE_2020_YCBCR)
 +              ret = true;
 +      return ret;
 +}
 +
  static bool is_ycbcr709_limited_type(
                enum dc_color_space color_space)
  {
@@@ -191,12 -176,7 +193,12 @@@ enum dc_color_space_type get_color_spac
                type = COLOR_SPACE_YCBCR601_LIMITED_TYPE;
        else if (is_ycbcr709_limited_type(color_space))
                type = COLOR_SPACE_YCBCR709_LIMITED_TYPE;
 -
 +      else if (is_ycbcr2020_type(color_space))
 +              type = COLOR_SPACE_YCBCR2020_TYPE;
 +      else if (color_space == COLOR_SPACE_YCBCR709)
 +              type = COLOR_SPACE_YCBCR709_BLACK_TYPE;
 +      else if (color_space == COLOR_SPACE_YCBCR709_BLACK)
 +              type = COLOR_SPACE_YCBCR709_BLACK_TYPE;
        return type;
  }
  
@@@ -228,7 -208,6 +230,7 @@@ void color_space_to_black_color
        switch (colorspace) {
        case COLOR_SPACE_YCBCR601:
        case COLOR_SPACE_YCBCR709:
 +      case COLOR_SPACE_YCBCR709_BLACK:
        case COLOR_SPACE_YCBCR601_LIMITED:
        case COLOR_SPACE_YCBCR709_LIMITED:
        case COLOR_SPACE_2020_YCBCR:
index c9e0b126777b914ac4f8d0c1c88b18b8136f341c,bcb20e71b920539593704b2abdc1232edcb1cc23..f48863cf796b6aa16aa418ae4dec087bc65421e3
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "atom.h"
  #include "dm_helpers.h"
@@@ -42,7 -44,6 +44,7 @@@
  #include "fixed31_32.h"
  #include "dpcd_defs.h"
  #include "dmcu.h"
 +#include "hw/clk_mgr.h"
  
  #define DC_LOGGER_INIT(logger)
  
@@@ -705,7 -706,6 +707,7 @@@ bool dc_link_detect(struct dc_link *lin
  
        if (new_connection_type != dc_connection_none) {
                link->type = new_connection_type;
 +              link->link_state_valid = false;
  
                /* From Disconnected-to-Connected. */
                switch (link->connector_signal) {
                        sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
  
                /* Connectivity log: detection */
 -              for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
 +              for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
                        CONN_DATA_DETECT(link,
 -                                      &sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
 -                                      EDID_BLOCK_SIZE,
 +                                      &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
 +                                      DC_EDID_BLOCK_SIZE,
                                        "%s: [Block %d] ", sink->edid_caps.display_name, i);
                }
  
@@@ -2339,8 -2339,7 +2341,8 @@@ void core_link_resume(struct dc_link *l
  static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
  {
        struct fixed31_32 mbytes_per_sec;
 -      uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
 +      uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
 +                      &stream->link->cur_link_settings);
        link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
  
        mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
@@@ -2634,8 -2633,6 +2636,8 @@@ void core_link_enable_stream
                        stream->phy_pix_clk,
                        pipe_ctx->stream_res.audio != NULL);
  
 +      pipe_ctx->stream->link->link_state_valid = true;
 +
        if (dc_is_dvi_signal(pipe_ctx->stream->signal))
                pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
                        pipe_ctx->stream_res.stream_enc,
@@@ -2718,37 -2715,17 +2720,37 @@@ void core_link_disable_stream(struct pi
  {
        struct dc  *core_dc = pipe_ctx->stream->ctx->dc;
        struct dc_stream_state *stream = pipe_ctx->stream;
 +      struct dc_link *link = stream->sink->link;
  
        core_dc->hwss.blank_stream(pipe_ctx);
  
        if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                deallocate_mst_payload(pipe_ctx);
  
 -      if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
 -              dal_ddc_service_write_scdc_data(
 -                      stream->link->ddc, 0,
 -                      stream->timing.flags.LTE_340MCSC_SCRAMBLE);
 +      if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
 +              struct ext_hdmi_settings settings = {0};
 +              enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
  
 +              unsigned short masked_chip_caps = link->chip_caps &
 +                              EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
 +              //Need to inform that sink is going to use legacy HDMI mode.
 +              dal_ddc_service_write_scdc_data(
 +                      link->ddc,
 +                      165000,//vbios only handles 165Mhz.
 +                      false);
 +              if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
 +                      /* DP159, Retimer settings */
 +                      if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
 +                              write_i2c_retimer_setting(pipe_ctx,
 +                                              false, false, &settings);
 +                      else
 +                              write_i2c_default_retimer_setting(pipe_ctx,
 +                                              false, false);
 +              } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
 +                      /* PI3EQX1204, Redriver settings */
 +                      write_i2c_redriver_setting(pipe_ctx, false);
 +              }
 +      }
        core_dc->hwss.disable_stream(pipe_ctx, option);
  
        disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
@@@ -2859,127 -2836,3 +2861,127 @@@ uint32_t dc_bandwidth_in_kbps_from_timi
        return kbps;
  
  }
 +
 +void dc_link_set_drive_settings(struct dc *dc,
 +                              struct link_training_settings *lt_settings,
 +                              const struct dc_link *link)
 +{
 +
 +      int i;
 +
 +      for (i = 0; i < dc->link_count; i++) {
 +              if (dc->links[i] == link)
 +                      break;
 +      }
 +
 +      if (i >= dc->link_count)
 +              ASSERT_CRITICAL(false);
 +
 +      dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
 +}
 +
 +void dc_link_perform_link_training(struct dc *dc,
 +                                 struct dc_link_settings *link_setting,
 +                                 bool skip_video_pattern)
 +{
 +      int i;
 +
 +      for (i = 0; i < dc->link_count; i++)
 +              dc_link_dp_perform_link_training(
 +                      dc->links[i],
 +                      link_setting,
 +                      skip_video_pattern);
 +}
 +
 +void dc_link_set_preferred_link_settings(struct dc *dc,
 +                                       struct dc_link_settings *link_setting,
 +                                       struct dc_link *link)
 +{
 +      int i;
 +      struct pipe_ctx *pipe;
 +      struct dc_stream_state *link_stream;
 +      struct dc_link_settings store_settings = *link_setting;
 +
 +      link->preferred_link_setting = store_settings;
 +
 +      /* Retrain with preferred link settings only relevant for
 +       * DP signal type
 +       */
 +      if (!dc_is_dp_signal(link->connector_signal))
 +              return;
 +
 +      for (i = 0; i < MAX_PIPES; i++) {
 +              pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 +              if (pipe->stream && pipe->stream->link) {
 +                      if (pipe->stream->link == link)
 +                              break;
 +              }
 +      }
 +
 +      /* Stream not found */
 +      if (i == MAX_PIPES)
 +              return;
 +
 +      link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
 +
 +      /* Cannot retrain link if backend is off */
 +      if (link_stream->dpms_off)
 +              return;
 +
 +      if (link_stream)
 +              decide_link_settings(link_stream, &store_settings);
 +
 +      if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
 +              (store_settings.link_rate != LINK_RATE_UNKNOWN))
 +              dp_retrain_link_dp_test(link, &store_settings, false);
 +}
 +
 +void dc_link_enable_hpd(const struct dc_link *link)
 +{
 +      dc_link_dp_enable_hpd(link);
 +}
 +
 +void dc_link_disable_hpd(const struct dc_link *link)
 +{
 +      dc_link_dp_disable_hpd(link);
 +}
 +
 +
 +void dc_link_set_test_pattern(struct dc_link *link,
 +                            enum dp_test_pattern test_pattern,
 +                            const struct link_training_settings *p_link_settings,
 +                            const unsigned char *p_custom_pattern,
 +                            unsigned int cust_pattern_size)
 +{
 +      if (link != NULL)
 +              dc_link_dp_set_test_pattern(
 +                      link,
 +                      test_pattern,
 +                      p_link_settings,
 +                      p_custom_pattern,
 +                      cust_pattern_size);
 +}
 +
 +uint32_t dc_link_bandwidth_kbps(
 +      const struct dc_link *link,
 +      const struct dc_link_settings *link_setting)
 +{
 +      uint32_t link_bw_kbps =
 +              link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
 +
 +      link_bw_kbps *= 8;   /* 8 bits per byte*/
 +      link_bw_kbps *= link_setting->lane_count;
 +
 +      return link_bw_kbps;
 +
 +}
 +
 +const struct dc_link_settings *dc_link_get_link_cap(
 +              const struct dc_link *link)
 +{
 +      if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
 +                      link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
 +              return &link->preferred_link_setting;
 +      return &link->verified_link_cap;
 +}
 +
index 0a2e0fe75405dec32ad9e5674b55a0e340502d15,5ad1c62e9e4d77b93e008035cb1335aa71efc202..1b5756590a6a07683842ef6ea08320ad2a092ce8
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "resource.h"
@@@ -93,8 -96,10 +96,8 @@@ enum dce_version resource_parse_asic_id
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case FAMILY_RV:
                dc_version = DCN_VERSION_1_0;
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
                if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
                        dc_version = DCN_VERSION_1_01;
 -#endif
                break;
  #endif
        default:
@@@ -145,7 -150,9 +148,7 @@@ struct resource_pool *dc_create_resourc
  
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case DCN_VERSION_1_0:
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        case DCN_VERSION_1_01:
 -#endif
                res_pool = dcn10_create_resource_pool(init_data, dc);
                break;
  #endif
@@@ -1180,27 -1187,24 +1183,27 @@@ static int acquire_first_split_pipe
        int i;
  
        for (i = 0; i < pool->pipe_count; i++) {
 -              struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
 -
 -              if (pipe_ctx->top_pipe &&
 -                              pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
 -                      pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
 -                      if (pipe_ctx->bottom_pipe)
 -                              pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
 -
 -                      memset(pipe_ctx, 0, sizeof(*pipe_ctx));
 -                      pipe_ctx->stream_res.tg = pool->timing_generators[i];
 -                      pipe_ctx->plane_res.hubp = pool->hubps[i];
 -                      pipe_ctx->plane_res.ipp = pool->ipps[i];
 -                      pipe_ctx->plane_res.dpp = pool->dpps[i];
 -                      pipe_ctx->stream_res.opp = pool->opps[i];
 -                      pipe_ctx->plane_res.mpcc_inst = pool->dpps[i]->inst;
 -                      pipe_ctx->pipe_idx = i;
 -
 -                      pipe_ctx->stream = stream;
 +              struct pipe_ctx *split_pipe = &res_ctx->pipe_ctx[i];
 +
 +              if (split_pipe->top_pipe && !dc_res_is_odm_head_pipe(split_pipe) &&
 +                              split_pipe->top_pipe->plane_state == split_pipe->plane_state) {
 +                      split_pipe->top_pipe->bottom_pipe = split_pipe->bottom_pipe;
 +                      if (split_pipe->bottom_pipe)
 +                              split_pipe->bottom_pipe->top_pipe = split_pipe->top_pipe;
 +
 +                      if (split_pipe->top_pipe->plane_state)
 +                              resource_build_scaling_params(split_pipe->top_pipe);
 +
 +                      memset(split_pipe, 0, sizeof(*split_pipe));
 +                      split_pipe->stream_res.tg = pool->timing_generators[i];
 +                      split_pipe->plane_res.hubp = pool->hubps[i];
 +                      split_pipe->plane_res.ipp = pool->ipps[i];
 +                      split_pipe->plane_res.dpp = pool->dpps[i];
 +                      split_pipe->stream_res.opp = pool->opps[i];
 +                      split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
 +                      split_pipe->pipe_idx = i;
 +
 +                      split_pipe->stream = stream;
                        return i;
                }
        }
@@@ -1646,6 -1650,46 +1649,6 @@@ static int acquire_first_free_pipe
        return -1;
  }
  
 -static struct stream_encoder *find_first_free_match_stream_enc_for_link(
 -              struct resource_context *res_ctx,
 -              const struct resource_pool *pool,
 -              struct dc_stream_state *stream)
 -{
 -      int i;
 -      int j = -1;
 -      struct dc_link *link = stream->link;
 -
 -      for (i = 0; i < pool->stream_enc_count; i++) {
 -              if (!res_ctx->is_stream_enc_acquired[i] &&
 -                              pool->stream_enc[i]) {
 -                      /* Store first available for MST second display
 -                       * in daisy chain use case */
 -                      j = i;
 -                      if (pool->stream_enc[i]->id ==
 -                                      link->link_enc->preferred_engine)
 -                              return pool->stream_enc[i];
 -              }
 -      }
 -
 -      /*
 -       * below can happen in cases when stream encoder is acquired:
 -       * 1) for second MST display in chain, so preferred engine already
 -       * acquired;
 -       * 2) for another link, which preferred engine already acquired by any
 -       * MST configuration.
 -       *
 -       * If signal is of DP type and preferred engine not found, return last available
 -       *
 -       * TODO - This is just a patch up and a generic solution is
 -       * required for non DP connectors.
 -       */
 -
 -      if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
 -              return pool->stream_enc[j];
 -
 -      return NULL;
 -}
 -
  static struct audio *find_first_free_audio(
                struct resource_context *res_ctx,
                const struct resource_pool *pool,
@@@ -1957,7 -2001,7 +1960,7 @@@ enum dc_status resource_map_pool_resour
        pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
  
        pipe_ctx->stream_res.stream_enc =
 -              find_first_free_match_stream_enc_for_link(
 +              dc->res_pool->funcs->find_first_free_match_stream_enc_for_link(
                        &context->res_ctx, pool, stream);
  
        if (!pipe_ctx->stream_res.stream_enc)
@@@ -2018,7 -2062,7 +2021,7 @@@ void dc_resource_state_construct
                const struct dc *dc,
                struct dc_state *dst_ctx)
  {
 -      dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
 +      dst_ctx->clk_mgr = dc->clk_mgr;
  }
  
  /**
@@@ -2313,18 -2357,7 +2316,18 @@@ static void set_avi_info_frame
                        break;
                }
        }
 +      /* If VIC >= 128, the Source shall use AVI InfoFrame Version 3*/
        hdmi_info.bits.VIC0_VIC7 = vic;
 +      if (vic >= 128)
 +              hdmi_info.bits.header.version = 3;
 +      /* If (C1, C0)=(1, 1) and (EC2, EC1, EC0)=(1, 1, 1),
 +       * the Source shall use 20 AVI InfoFrame Version 4
 +       */
 +      if (hdmi_info.bits.C0_C1 == COLORIMETRY_EXTENDED &&
 +                      hdmi_info.bits.EC0_EC2 == COLORIMETRYEX_RESERVED) {
 +              hdmi_info.bits.header.version = 4;
 +              hdmi_info.bits.header.length = 14;
 +      }
  
        /* pixel repetition
         * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
        hdmi_info.bits.bar_right = (stream->timing.h_total
                        - stream->timing.h_border_right + 1);
  
 +    /* Additional Colorimetry Extension
 +     * Used in conduction with C0-C1 and EC0-EC2
 +     * 0 = DCI-P3 RGB (D65)
 +     * 1 = DCI-P3 RGB (theater)
 +     */
 +      hdmi_info.bits.ACE0_ACE3 = 0;
 +
        /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
        check_sum = &hdmi_info.packet_raw_data.sb[0];
  
 -      *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
 +      *check_sum = HDMI_INFOFRAME_TYPE_AVI + hdmi_info.bits.header.length + hdmi_info.bits.header.version;
  
 -      for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
 +      for (byte_index = 1; byte_index <= hdmi_info.bits.header.length; byte_index++)
                *check_sum += hdmi_info.packet_raw_data.sb[byte_index];
  
        /* one byte complement */
@@@ -2402,6 -2428,21 +2405,6 @@@ static void set_spd_info_packet
        *info_packet = stream->vrr_infopacket;
  }
  
 -static void set_dp_sdp_info_packet(
 -              struct dc_info_packet *info_packet,
 -              struct dc_stream_state *stream)
 -{
 -      /* SPD info packet for custom sdp message */
 -
 -      /* Return if false. If true,
 -       * set the corresponding bit in the info packet
 -       */
 -      if (!stream->dpsdp_infopacket.valid)
 -              return;
 -
 -      *info_packet = stream->dpsdp_infopacket;
 -}
 -
  static void set_hdr_static_info_packet(
                struct dc_info_packet *info_packet,
                struct dc_stream_state *stream)
@@@ -2457,6 -2498,7 +2460,6 @@@ void dc_resource_state_copy_construct
  
                if (cur_pipe->bottom_pipe)
                        cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
 -
        }
  
        for (i = 0; i < dst_ctx->stream_count; i++) {
@@@ -2497,6 -2539,7 +2500,6 @@@ void resource_build_info_frame(struct p
        info->spd.valid = false;
        info->hdrsmd.valid = false;
        info->vsc.valid = false;
 -      info->dpsdp.valid = false;
  
        signal = pipe_ctx->stream->signal;
  
                set_spd_info_packet(&info->spd, pipe_ctx->stream);
  
                set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
 -
 -              set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
        }
  
        patch_gamut_packet_checksum(&info->gamut);
@@@ -2602,10 -2647,6 +2605,10 @@@ bool pipe_need_reprogram
        if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
                return true;
  
 +      if (false == pipe_ctx_old->stream->link->link_state_valid &&
 +              false == pipe_ctx_old->stream->dpms_off)
 +              return true;
 +
        return false;
  }
  
index a002e690814f492ce9cc5e19ffb3c7770e198518,7a46cc4414d26d55f1829eb29468a8e1488d9943..7fe0dbe306666a529d2e668a68a86d0ef1761ffb
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dc.h"
  #include "core_types.h"
@@@ -47,8 -50,8 +50,8 @@@ void update_stream_signal(struct dc_str
  
        if (dc_is_dvi_signal(stream->signal)) {
                if (stream->ctx->dc->caps.dual_link_dvi &&
 -                  (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
 -                  sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
 +                      (stream->timing.pix_clk_100hz / 10) > TMDS_MAX_PIXEL_CLOCK &&
 +                      sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
                        stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
                else
                        stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
@@@ -179,9 -182,6 +182,9 @@@ struct dc_stream_state *dc_copy_stream(
        if (new_stream->out_transfer_func)
                dc_transfer_func_retain(new_stream->out_transfer_func);
  
 +      new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
 +      new_stream->ctx->dc_stream_id_count++;
 +
        kref_init(&new_stream->refcount);
  
        return new_stream;
@@@ -232,7 -232,7 +235,7 @@@ static void delay_cursor_until_vupdate(
        unsigned int us_per_line;
  
        if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
 -                      ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
 +                      ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
  
                vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
                if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
@@@ -374,12 -374,42 +377,12 @@@ uint32_t dc_stream_get_vblank_counter(c
        return 0;
  }
  
 -static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
 -              const uint8_t  *custom_sdp_message,
 -              unsigned int sdp_message_size)
 -{
 -      uint8_t i;
 -      struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
 -
 -      /* set valid info */
 -      info->dpsdp.valid = true;
 -
 -      /* set sdp message header */
 -      info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
 -      info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
 -      info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
 -      info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
 -
 -      /* set sdp message data */
 -      for (i = 0; i < 32; i++)
 -              info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
 -
 -}
 -
 -static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
 -{
 -      struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
 -
 -      /* in-valid info */
 -      info->dpsdp.valid = false;
 -}
 -
  bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
                const uint8_t *custom_sdp_message,
                unsigned int sdp_message_size)
  {
        int i;
 -      struct dc  *core_dc;
 +      struct dc  *dc;
        struct resource_context *res_ctx;
  
        if (stream == NULL) {
                return false;
        }
  
 -      core_dc = stream->ctx->dc;
 -      res_ctx = &core_dc->current_state->res_ctx;
 +      dc = stream->ctx->dc;
 +      res_ctx = &dc->current_state->res_ctx;
  
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
                if (pipe_ctx->stream != stream)
                        continue;
  
 -              build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
 -
 -              core_dc->hwss.update_info_frame(pipe_ctx);
 +              if (dc->hwss.send_immediate_sdp_message != NULL)
 +                      dc->hwss.send_immediate_sdp_message(pipe_ctx,
 +                                                              custom_sdp_message,
 +                                                              sdp_message_size);
 +              else
 +                      DC_LOG_WARNING("%s:send_immediate_sdp_message not implemented on this ASIC\n",
 +                      __func__);
  
 -              invalid_dp_sdp_info_frame(pipe_ctx);
        }
  
        return true;
index 2d0acf10936089f9ba5342bfb1d28977662c5e23,eb2d01d1a678d5301767c4b577d5bfba76ed1374..30b2f9edd42f5d988c3077bf9ef6bee4b3667c4b
@@@ -26,6 -26,9 +26,9 @@@
   *  Created on: Aug 30, 2016
   *      Author: agrodzov
   */
+ #include <linux/delay.h>
  #include "dm_services.h"
  #include <stdarg.h>
  
@@@ -297,7 -300,7 +300,7 @@@ void generic_reg_wait(const struct dc_c
        int i;
  
        /* something is terribly wrong if time out is > 200ms. (5Hz) */
 -      ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
 +      ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
  
        for (i = 0; i <= time_out_num_tries; i++) {
                if (i) {
index 3d87a8800300f6c4e13e73dbf1c39da3645530e5,dbd8cc6001279c4dea8a8ef64d30301c38901415..f8903bcabe491f3faa0961e3f3bb5da0b5539942
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dce_abm.h"
  #include "dm_services.h"
  #include "reg_helper.h"
@@@ -58,9 -60,6 +60,9 @@@ static bool dce_abm_set_pipe(struct ab
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
        uint32_t rampingBoundary = 0xFFFF;
  
 +      if (abm->dmcu_is_running == false)
 +              return true;
 +
        REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
                        1, 80000);
  
@@@ -305,9 -304,6 +307,9 @@@ static bool dce_abm_set_level(struct ab
  {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
  
 +      if (abm->dmcu_is_running == false)
 +              return true;
 +
        REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
                        1, 80000);
  
@@@ -326,9 -322,6 +328,9 @@@ static bool dce_abm_immediate_disable(s
  {
        struct dce_abm *abm_dce = TO_DCE_ABM(abm);
  
 +      if (abm->dmcu_is_running == false)
 +              return true;
 +
        dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
  
        abm->stored_backlight_registers.BL_PWM_CNTL =
@@@ -452,7 -445,6 +454,7 @@@ static void dce_abm_construct
        base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
        base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
        base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
 +      base->dmcu_is_running = false;
  
        abm_dce->regs = regs;
        abm_dce->abm_shift = abm_shift;
@@@ -483,9 -475,6 +485,9 @@@ void dce_abm_destroy(struct abm **abm
  {
        struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
  
 +      if (abm_dce->base.dmcu_is_running == true)
 +              abm_dce->base.funcs->set_abm_immediate_disable(*abm);
 +
        kfree(abm_dce);
        *abm = NULL;
  }
index 01efcddea359cbf90864eb1833194e4c672dae79,58864fca3da77b1c698b2770b3ea52eea35a32c0..8347be76c60a4836d5fa41cf8642842a8b949caa
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  
@@@ -33,7 -35,6 +35,7 @@@
  #include "include/logger_interface.h"
  
  #include "dce_clock_source.h"
 +#include "clk_mgr.h"
  
  #include "reg_helper.h"
  
@@@ -184,8 -185,8 +186,8 @@@ static bool calculate_fb_and_fractional
  *RETURNS:
  * It fills the PLLSettings structure with PLL Dividers values
  * if calculated values are within required tolerance
 -* It returns  - true if eror is within tolerance
 -*             - false if eror is not within tolerance
 +* It returns  - true if error is within tolerance
 +*             - false if error is not within tolerance
  */
  static bool calc_fb_divider_checking_tolerance(
                struct calc_pll_clock_source *calc_pll_cs,
index c2bc36f9f6c78ee5c63998696d7f6c9780166dbb,9c701ab2dbfda263c38c4a78bf1de2dec6cb42a7..ddd30fc0d76b7852f106110268089ed43462b290
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "core_types.h"
  #include "link_encoder.h"
  #include "dce_dmcu.h"
@@@ -388,9 -391,6 +391,9 @@@ static bool dcn10_dmcu_init(struct dmc
                /* Set initialized ramping boundary value */
                REG_WRITE(MASTER_COMM_DATA_REG1, 0xFFFF);
  
 +              /* Set backlight ramping stepsize */
 +              REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
 +
                /* Set command to initialize microcontroller */
                REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
                        MCP_INIT_DMCU);
@@@ -816,9 -816,6 +819,9 @@@ void dce_dmcu_destroy(struct dmcu **dmc
  {
        struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
  
 +      if (dmcu_dce->base.dmcu_state == DMCU_RUNNING)
 +              dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true);
 +
        kfree(dmcu_dce);
        *dmcu = NULL;
  }
index 526aab438374697fc9d9ac8c2edbd8efecf32d4f,8a236d40d8d004534d03e169723153e19f59e37f..5ca558766d2e2672079a240c144a4385f923faba
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/delay.h>
  #include "dce_i2c.h"
  #include "dce_i2c_hw.h"
  #include "reg_helper.h"
@@@ -268,8 -271,6 +271,8 @@@ static bool setup_engine
        struct dce_i2c_hw *dce_i2c_hw)
  {
        uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE;
 +      /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/
 +      REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1);
  
        if (dce_i2c_hw->setup_limit != 0)
                i2c_setup_limit = dce_i2c_hw->setup_limit;
@@@ -324,6 -325,8 +327,6 @@@ static void release_engine
  
        set_speed(dce_i2c_hw, dce_i2c_hw->original_speed);
  
 -      /* Release I2C */
 -      REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
  
        /* Reset HW engine */
        {
        /* HW I2c engine - clock gating feature */
        if (!dce_i2c_hw->engine_keep_power_up_count)
                REG_UPDATE_N(SETUP, 1, FN(SETUP, DC_I2C_DDC1_ENABLE), 0);
 +      /* Release I2C after reset, so HW or DMCU could use it */
 +      REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1,
 +              DC_I2C_SW_USE_I2C_REG_REQ, 0);
  
  }
  
index 3690ca9572822a95696453a49dad99e3e7e44b6a,0c0b41d70f655f3602ecee177e6c98bb4ef9a8dd..5e2b4d47c5482014f4da41f30ef54fb281f7bfce
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/delay.h>
  #include "dc_bios_types.h"
  #include "dce_stream_encoder.h"
  #include "reg_helper.h"
@@@ -418,7 -420,6 +420,7 @@@ static void dce110_stream_encoder_dp_se
                        break;
                case COLOR_SPACE_YCBCR709:
                case COLOR_SPACE_YCBCR709_LIMITED:
 +              case COLOR_SPACE_YCBCR709_BLACK:
                        misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
                        misc1 = misc1 & ~0x80; /* bit7 = 0*/
                        dynamic_range_ycbcr = 1; /*bt709*/
@@@ -1124,6 -1125,19 +1126,6 @@@ union audio_cea_channels 
        } channels;
  };
  
 -struct audio_clock_info {
 -      /* pixel clock frequency*/
 -      uint32_t pixel_clock_in_10khz;
 -      /* N - 32KHz audio */
 -      uint32_t n_32khz;
 -      /* CTS - 32KHz audio*/
 -      uint32_t cts_32khz;
 -      uint32_t n_44khz;
 -      uint32_t cts_44khz;
 -      uint32_t n_48khz;
 -      uint32_t cts_48khz;
 -};
 -
  /* 25.2MHz/1.001*/
  /* 25.2MHz/1.001*/
  /* 25.2MHz*/
index ae87c501775666dae865d2e5cbd80d596f914d24,870bda57dc29b3fcc4a08455126912cb93260c32..6248c845531408988acb192d86ea05bc8c7e884b
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "link_encoder.h"
@@@ -35,6 -38,8 +38,6 @@@
  #include "irq/dce110/irq_service_dce110.h"
  #include "dce/dce_link_encoder.h"
  #include "dce/dce_stream_encoder.h"
 -
 -#include "dce/dce_clk_mgr.h"
  #include "dce/dce_mem_input.h"
  #include "dce/dce_ipp.h"
  #include "dce/dce_transform.h"
@@@ -135,6 -140,19 +138,6 @@@ static const struct dce110_timing_gener
  #define SRI(reg_name, block, id)\
        .reg_name = mm ## block ## id ## _ ## reg_name
  
 -
 -static const struct clk_mgr_registers disp_clk_regs = {
 -              CLK_COMMON_REG_LIST_DCE_BASE()
 -};
 -
 -static const struct clk_mgr_shift disp_clk_shift = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 -};
 -
 -static const struct clk_mgr_mask disp_clk_mask = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 -};
 -
  #define ipp_regs(id)\
  [id] = {\
                IPP_DCE100_REG_LIST_DCE_BASE(id)\
@@@ -731,6 -749,9 +734,6 @@@ static void destruct(struct dce110_reso
                        dce_aud_destroy(&pool->base.audios[i]);
        }
  
 -      if (pool->base.clk_mgr != NULL)
 -              dce_clk_mgr_destroy(&pool->base.clk_mgr);
 -
        if (pool->base.abm != NULL)
                                dce_abm_destroy(&pool->base.abm);
  
@@@ -849,55 -870,13 +852,55 @@@ enum dc_status dce100_validate_plane(co
        return DC_FAIL_SURFACE_VALIDATE;
  }
  
 +struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
 +              struct resource_context *res_ctx,
 +              const struct resource_pool *pool,
 +              struct dc_stream_state *stream)
 +{
 +      int i;
 +      int j = -1;
 +      struct dc_link *link = stream->link;
 +
 +      for (i = 0; i < pool->stream_enc_count; i++) {
 +              if (!res_ctx->is_stream_enc_acquired[i] &&
 +                              pool->stream_enc[i]) {
 +                      /* Store first available for MST second display
 +                       * in daisy chain use case
 +                       */
 +                      j = i;
 +                      if (pool->stream_enc[i]->id ==
 +                                      link->link_enc->preferred_engine)
 +                              return pool->stream_enc[i];
 +              }
 +      }
 +
 +      /*
 +       * below can happen in cases when stream encoder is acquired:
 +       * 1) for second MST display in chain, so preferred engine already
 +       * acquired;
 +       * 2) for another link, which preferred engine already acquired by any
 +       * MST configuration.
 +       *
 +       * If signal is of DP type and preferred engine not found, return last available
 +       *
 +       * TODO - This is just a patch up and a generic solution is
 +       * required for non DP connectors.
 +       */
 +
 +      if (j >= 0 && link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT)
 +              return pool->stream_enc[j];
 +
 +      return NULL;
 +}
 +
  static const struct resource_funcs dce100_res_pool_funcs = {
        .destroy = dce100_destroy_resource_pool,
        .link_enc_create = dce100_link_encoder_create,
        .validate_bandwidth = dce100_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce100_add_stream_to_ctx,
 -      .validate_global = dce100_validate_global
 +      .validate_global = dce100_validate_global,
 +      .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
  };
  
  static bool construct(
                }
        }
  
 -      pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 -                      &disp_clk_regs,
 -                      &disp_clk_shift,
 -                      &disp_clk_mask);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto res_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
index 3042741b165a157556bf9787e939adb8ed681280,54c266e8441672b23eaad7e548f9973db8afb658..753c96f74af0e14c5cf57f876e677cbf31d057fc
@@@ -22,6 -22,9 +22,9 @@@
   * Authors: AMD
   *
   */
+ #include <linux/delay.h>
  #include "dm_services.h"
  #include "dc.h"
  #include "dc_bios_types.h"
@@@ -46,7 -49,6 +49,7 @@@
  #include "link_encoder.h"
  #include "link_hwss.h"
  #include "clock_source.h"
 +#include "clk_mgr.h"
  #include "abm.h"
  #include "audio.h"
  #include "reg_helper.h"
@@@ -243,9 -245,6 +246,9 @@@ static void build_prescale_params(struc
        prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
  
        switch (plane_state->format) {
 +      case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
 +              prescale_params->scale = 0x2082;
 +              break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
                prescale_params->scale = 0x2020;
@@@ -961,9 -960,6 +964,9 @@@ void dce110_enable_audio_stream(struct 
        struct pp_smu_funcs *pp_smu = NULL;
        unsigned int i, num_audio = 1;
  
 +      if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
 +              return;
 +
        if (core_dc->res_pool->pp_smu)
                pp_smu = core_dc->res_pool->pp_smu;
  
                /* TODO: audio should be per stream rather than per link */
                pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
                                        pipe_ctx->stream_res.stream_enc, false);
 +              if (pipe_ctx->stream_res.audio)
 +                      pipe_ctx->stream_res.audio->enabled = true;
        }
  }
  
@@@ -993,9 -987,6 +996,9 @@@ void dce110_disable_audio_stream(struc
        struct dc *dc = pipe_ctx->stream->ctx->dc;
        struct pp_smu_funcs *pp_smu = NULL;
  
 +      if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
 +              return;
 +
        pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
                        pipe_ctx->stream_res.stream_enc, true);
        if (pipe_ctx->stream_res.audio) {
                /* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
                 * stream->stream_engine_id);
                 */
 +              if (pipe_ctx->stream_res.audio)
 +                      pipe_ctx->stream_res.audio->enabled = false;
        }
  }
  
@@@ -1310,11 -1299,6 +1313,11 @@@ static enum dc_status dce110_enable_str
                pipe_ctx->stream_res.tg->funcs->program_timing(
                                pipe_ctx->stream_res.tg,
                                &stream->timing,
 +                              0,
 +                              0,
 +                              0,
 +                              0,
 +                              pipe_ctx->stream->signal,
                                true);
        }
  
@@@ -1507,11 -1491,10 +1510,11 @@@ static void disable_vga_and_power_gate_
        }
  }
  
 -static struct dc_link *get_link_for_edp(struct dc *dc)
 +static struct dc_link *get_edp_link(struct dc *dc)
  {
        int i;
  
 +      // report any eDP links, even unconnected DDI's
        for (i = 0; i < dc->link_count; i++) {
                if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP)
                        return dc->links[i];
        return NULL;
  }
  
 -static struct dc_link *get_link_for_edp_to_turn_off(
 +static struct dc_link *get_edp_link_with_sink(
                struct dc *dc,
                struct dc_state *context)
  {
        int i;
        struct dc_link *link = NULL;
  
 -      /* check if eDP panel is suppose to be set mode, if yes, no need to disable */
 -      for (i = 0; i < context->stream_count; i++) {
 -              if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
 -                      if (context->streams[i]->dpms_off == true)
 -                              return context->streams[i]->sink->link;
 -                      else
 -                              return NULL;
 -              }
 -      }
 -
        /* check if there is an eDP panel not in use */
        for (i = 0; i < dc->link_count; i++) {
                if (dc->links[i]->local_sink &&
  void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
  {
        int i;
 -      struct dc_link *edp_link_to_turnoff = NULL;
 -      struct dc_link *edp_link = get_link_for_edp(dc);
 -      bool can_edp_fast_boot_optimize = false;
 -      bool apply_edp_fast_boot_optimization = false;
 +      struct dc_link *edp_link_with_sink = get_edp_link_with_sink(dc, context);
 +      struct dc_link *edp_link = get_edp_link(dc);
 +      bool can_apply_edp_fast_boot = false;
        bool can_apply_seamless_boot = false;
  
 -      for (i = 0; i < context->stream_count; i++) {
 -              if (context->streams[i]->apply_seamless_boot_optimization) {
 -                      can_apply_seamless_boot = true;
 -                      break;
 -              }
 -      }
 -
        if (dc->hwss.init_pipes)
                dc->hwss.init_pipes(dc, context);
  
 -      if (edp_link) {
 -              /* this seems to cause blank screens on DCE8 */
 -              if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
 -                  (dc->ctx->dce_version == DCE_VERSION_8_1) ||
 -                  (dc->ctx->dce_version == DCE_VERSION_8_3))
 -                      can_edp_fast_boot_optimize = false;
 -              else
 -                      can_edp_fast_boot_optimize =
 -                              edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
 +      // Check fastboot support, disable on DCE8 because of blank screens
 +      if (edp_link && dc->ctx->dce_version != DCE_VERSION_8_0 &&
 +                  dc->ctx->dce_version != DCE_VERSION_8_1 &&
 +                  dc->ctx->dce_version != DCE_VERSION_8_3) {
 +
 +              // enable fastboot if backend is enabled on eDP
 +              if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
 +                      /* Find eDP stream and set optimization flag */
 +                      for (i = 0; i < context->stream_count; i++) {
 +                              if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
 +                                      context->streams[i]->apply_edp_fast_boot_optimization = true;
 +                                      can_apply_edp_fast_boot = true;
 +                                      break;
 +                              }
 +                      }
 +              }
        }
  
 -      if (can_edp_fast_boot_optimize)
 -              edp_link_to_turnoff = get_link_for_edp_to_turn_off(dc, context);
 -
 -      /* if OS doesn't light up eDP and eDP link is available, we want to disable
 -       * If resume from S4/S5, should optimization.
 -       */
 -      if (can_edp_fast_boot_optimize && !edp_link_to_turnoff) {
 -              /* Find eDP stream and set optimization flag */
 -              for (i = 0; i < context->stream_count; i++) {
 -                      if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
 -                              context->streams[i]->apply_edp_fast_boot_optimization = true;
 -                              apply_edp_fast_boot_optimization = true;
 -                      }
 +      // Check seamless boot support
 +      for (i = 0; i < context->stream_count; i++) {
 +              if (context->streams[i]->apply_seamless_boot_optimization) {
 +                      can_apply_seamless_boot = true;
 +                      break;
                }
        }
  
 -      if (!apply_edp_fast_boot_optimization && !can_apply_seamless_boot) {
 -              if (edp_link_to_turnoff) {
 +      /* eDP should not have stream in resume from S4 and so even with VBios post
 +       * it should get turned off
 +       */
 +      if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) {
 +              if (edp_link_with_sink) {
                        /*turn off backlight before DP_blank and encoder powered down*/
 -                      dc->hwss.edp_backlight_control(edp_link_to_turnoff, false);
 +                      dc->hwss.edp_backlight_control(edp_link_with_sink, false);
                }
                /*resume from S3, no vbios posting, no need to power down again*/
                power_down_all_hw_blocks(dc);
                disable_vga_and_power_gate_all_controllers(dc);
 -              if (edp_link_to_turnoff)
 -                      dc->hwss.edp_power_control(edp_link_to_turnoff, false);
 +              if (edp_link_with_sink)
 +                      dc->hwss.edp_power_control(edp_link_with_sink, false);
        }
        bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
  }
@@@ -2034,10 -2033,8 +2037,10 @@@ enum dc_status dce110_apply_ctx_to_hw
                if (pipe_ctx->stream == NULL)
                        continue;
  
 -              if (pipe_ctx->stream == pipe_ctx_old->stream)
 +              if (pipe_ctx->stream == pipe_ctx_old->stream &&
 +                      pipe_ctx->stream->link->link_state_valid) {
                        continue;
 +              }
  
                if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
                        continue;
@@@ -2324,7 -2321,6 +2327,7 @@@ static void init_hw(struct dc *dc
        struct dc_bios *bp;
        struct transform *xfm;
        struct abm *abm;
 +      struct dmcu *dmcu;
  
        bp = dc->ctx->dc_bios;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                 * default signal on connector). */
                struct dc_link *link = dc->links[i];
  
 -              if (link->link_enc->connector.id == CONNECTOR_ID_EDP)
 -                      dc->hwss.edp_power_control(link, true);
 -
                link->link_enc->funcs->hw_init(link->link_enc);
        }
  
                abm->funcs->abm_init(abm);
        }
  
 +      dmcu = dc->res_pool->dmcu;
 +      if (dmcu != NULL && abm != NULL)
 +              abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
 +
        if (dc->fbc_compressor)
                dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
  
@@@ -2391,7 -2386,7 +2394,7 @@@ void dce110_prepare_bandwidth
                struct dc *dc,
                struct dc_state *context)
  {
 -      struct clk_mgr *dccg = dc->res_pool->clk_mgr;
 +      struct clk_mgr *dccg = dc->clk_mgr;
  
        dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
  
@@@ -2405,7 -2400,7 +2408,7 @@@ void dce110_optimize_bandwidth
                struct dc *dc,
                struct dc_state *context)
  {
 -      struct clk_mgr *dccg = dc->res_pool->clk_mgr;
 +      struct clk_mgr *dccg = dc->clk_mgr;
  
        dce110_set_displaymarks(dc, context);
  
index 113cfb3d972c435bd1dea3fe6ac98c5fae035eca,4d9f219738bba26df2df1bc143d8047f258cb550..764329264c3b4546f264f1ec9fb4986c7eff2c69
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "link_encoder.h"
@@@ -30,6 -32,8 +32,6 @@@
  
  #include "resource.h"
  #include "dce110/dce110_resource.h"
 -
 -#include "dce/dce_clk_mgr.h"
  #include "include/irq_service_interface.h"
  #include "dce/dce_audio.h"
  #include "dce110/dce110_timing_generator.h"
@@@ -147,6 -151,18 +149,6 @@@ static const struct dce110_timing_gener
  #define SRI(reg_name, block, id)\
        .reg_name = mm ## block ## id ## _ ## reg_name
  
 -static const struct clk_mgr_registers disp_clk_regs = {
 -              CLK_COMMON_REG_LIST_DCE_BASE()
 -};
 -
 -static const struct clk_mgr_shift disp_clk_shift = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 -};
 -
 -static const struct clk_mgr_mask disp_clk_mask = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 -};
 -
  static const struct dce_dmcu_registers dmcu_regs = {
                DMCU_DCE110_COMMON_REG_LIST()
  };
@@@ -797,6 -813,9 +799,6 @@@ static void destruct(struct dce110_reso
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
  
 -      if (pool->base.clk_mgr != NULL)
 -              dce_clk_mgr_destroy(&pool->base.clk_mgr);
 -
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
        }
@@@ -1080,11 -1099,6 +1082,11 @@@ static struct pipe_ctx *dce110_acquire_
  
                pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg,
                                &stream->timing,
 +                              0,
 +                              0,
 +                              0,
 +                              0,
 +                              pipe_ctx->stream->signal,
                                false);
  
                pipe_ctx->stream_res.tg->funcs->enable_advanced_request(
@@@ -1117,38 -1131,6 +1119,38 @@@ static void dce110_destroy_resource_poo
        *pool = NULL;
  }
  
 +struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
 +              struct resource_context *res_ctx,
 +              const struct resource_pool *pool,
 +              struct dc_stream_state *stream)
 +{
 +      int i;
 +      int j = -1;
 +      struct dc_link *link = stream->link;
 +
 +      for (i = 0; i < pool->stream_enc_count; i++) {
 +              if (!res_ctx->is_stream_enc_acquired[i] &&
 +                              pool->stream_enc[i]) {
 +                      /* Store first available for MST second display
 +                       * in daisy chain use case
 +                       */
 +                      j = i;
 +                      if (pool->stream_enc[i]->id ==
 +                                      link->link_enc->preferred_engine)
 +                              return pool->stream_enc[i];
 +              }
 +      }
 +
 +      /*
 +       * For CZ and later, we can allow DIG FE and BE to differ for all display types
 +       */
 +
 +      if (j >= 0)
 +              return pool->stream_enc[j];
 +
 +      return NULL;
 +}
 +
  
  static const struct resource_funcs dce110_res_pool_funcs = {
        .destroy = dce110_destroy_resource_pool,
        .validate_plane = dce110_validate_plane,
        .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
        .add_stream_to_ctx = dce110_add_stream_to_ctx,
 -      .validate_global = dce110_validate_global
 +      .validate_global = dce110_validate_global,
 +      .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
  };
  
  static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
@@@ -1329,6 -1310,16 +1331,6 @@@ static bool construct
                }
        }
  
 -      pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
 -                      &disp_clk_regs,
 -                      &disp_clk_shift,
 -                      &disp_clk_mask);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto res_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
index 1c3e8939696a0fb1be26a4eae45284b3c7f509f8,1bfff440e807e0f10444ccd9317364c81b24ed3a..c6136e0ed1a4d88c269d2a9b40a5e295ee5ce96c
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  #include "link_encoder.h"
@@@ -34,6 -36,8 +36,6 @@@
  #include "dce110/dce110_timing_generator.h"
  
  #include "irq/dce110/irq_service_dce110.h"
 -
 -#include "dce/dce_clk_mgr.h"
  #include "dce/dce_mem_input.h"
  #include "dce/dce_transform.h"
  #include "dce/dce_link_encoder.h"
@@@ -146,6 -150,19 +148,6 @@@ static const struct dce110_timing_gener
  #define SRI(reg_name, block, id)\
        .reg_name = mm ## block ## id ## _ ## reg_name
  
 -
 -static const struct clk_mgr_registers disp_clk_regs = {
 -              CLK_COMMON_REG_LIST_DCE_BASE()
 -};
 -
 -static const struct clk_mgr_shift disp_clk_shift = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 -};
 -
 -static const struct clk_mgr_mask disp_clk_mask = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 -};
 -
  static const struct dce_dmcu_registers dmcu_regs = {
                DMCU_DCE110_COMMON_REG_LIST()
  };
@@@ -759,6 -776,9 +761,6 @@@ static void destruct(struct dce110_reso
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
  
 -      if (pool->base.clk_mgr != NULL)
 -              dce_clk_mgr_destroy(&pool->base.clk_mgr);
 -
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
        }
@@@ -975,8 -995,7 +977,8 @@@ static const struct resource_funcs dce1
        .validate_bandwidth = dce112_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce112_add_stream_to_ctx,
 -      .validate_global = dce112_validate_global
 +      .validate_global = dce112_validate_global,
 +      .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
  };
  
  static void bw_calcs_data_update_from_pplib(struct dc *dc)
@@@ -1207,6 -1226,16 +1209,6 @@@ static bool construct
                }
        }
  
 -      pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
 -                      &disp_clk_regs,
 -                      &disp_clk_shift,
 -                      &disp_clk_mask);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto res_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
index afa1c6029835d1483dcb4d2796c5c22f1df44647,c2fae5150bccea12375168f154707dddc9104861..54be7ab370df055707ac7877d62000faeb6d78fd
@@@ -24,6 -24,8 +24,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  
@@@ -46,7 -48,8 +48,7 @@@
  #include "dce110/dce110_hw_sequencer.h"
  #include "dce120/dce120_hw_sequencer.h"
  #include "dce/dce_transform.h"
 -
 -#include "dce/dce_clk_mgr.h"
 +#include "clk_mgr.h"
  #include "dce/dce_audio.h"
  #include "dce/dce_link_encoder.h"
  #include "dce/dce_stream_encoder.h"
@@@ -479,7 -482,7 +481,7 @@@ static const struct dc_debug_options de
                .disable_clock_gate = true,
  };
  
 -struct clock_source *dce120_clock_source_create(
 +static struct clock_source *dce120_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
        enum clock_source_id id,
        return NULL;
  }
  
 -void dce120_clock_source_destroy(struct clock_source **clk_src)
 +static void dce120_clock_source_destroy(struct clock_source **clk_src)
  {
        kfree(TO_DCE110_CLK_SRC(*clk_src));
        *clk_src = NULL;
  }
  
  
 -bool dce120_hw_sequencer_create(struct dc *dc)
 +static bool dce120_hw_sequencer_create(struct dc *dc)
  {
        /* All registers used by dce11.2 match those in dce11 in offset and
         * structure
@@@ -608,6 -611,9 +610,6 @@@ static void destruct(struct dce110_reso
  
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
 -
 -      if (pool->base.clk_mgr != NULL)
 -              dce_clk_mgr_destroy(&pool->base.clk_mgr);
  }
  
  static void read_dce_straps(
@@@ -833,8 -839,7 +835,8 @@@ static const struct resource_funcs dce1
        .link_enc_create = dce120_link_encoder_create,
        .validate_bandwidth = dce112_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
 -      .add_stream_to_ctx = dce112_add_stream_to_ctx
 +      .add_stream_to_ctx = dce112_add_stream_to_ctx,
 +      .find_first_free_match_stream_enc_for_link = dce110_find_first_free_match_stream_enc_for_link
  };
  
  static void bw_calcs_data_update_from_pplib(struct dc *dc)
@@@ -1044,6 -1049,17 +1046,6 @@@ static bool construct
                }
        }
  
 -      if (is_vg20)
 -              pool->base.clk_mgr = dce121_clk_mgr_create(ctx);
 -      else
 -              pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
 -
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto dccg_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
         * here.
         */
        if (is_vg20 && dce121_xgmi_enabled(dc->hwseq))
 -              dce121_clock_patch_xgmi_ss_info(pool->base.clk_mgr);
 +              dce121_clock_patch_xgmi_ss_info(dc->clk_mgr);
  
        /* Create hardware sequencer */
        if (!dce120_hw_sequencer_create(dc))
  
  irqs_create_fail:
  controller_create_fail:
 -dccg_create_fail:
  clk_src_create_fail:
  res_create_fail:
  
index 1dccd59c59c5e48edcadbc94d498e8c0b9814f20,83c4cc8516c1afb88e4c2f694b95a739a5b0f354..860a524ebcfab7b7c6e4f5501349efd27d6f75d3
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dce/dce_8_0_d.h"
  #include "dce/dce_8_0_sh_mask.h"
  
@@@ -37,6 -39,7 +39,6 @@@
  #include "dce110/dce110_timing_generator.h"
  #include "dce110/dce110_resource.h"
  #include "dce80/dce80_timing_generator.h"
 -#include "dce/dce_clk_mgr.h"
  #include "dce/dce_mem_input.h"
  #include "dce/dce_link_encoder.h"
  #include "dce/dce_stream_encoder.h"
@@@ -153,6 -156,19 +155,6 @@@ static const struct dce110_timing_gener
  #define SRI(reg_name, block, id)\
        .reg_name = mm ## block ## id ## _ ## reg_name
  
 -
 -static const struct clk_mgr_registers disp_clk_regs = {
 -              CLK_COMMON_REG_LIST_DCE_BASE()
 -};
 -
 -static const struct clk_mgr_shift disp_clk_shift = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
 -};
 -
 -static const struct clk_mgr_mask disp_clk_mask = {
 -              CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 -};
 -
  #define ipp_regs(id)\
  [id] = {\
                IPP_COMMON_REG_LIST_DCE_BASE(id)\
@@@ -788,6 -804,9 +790,6 @@@ static void destruct(struct dce110_reso
                }
        }
  
 -      if (pool->base.clk_mgr != NULL)
 -              dce_clk_mgr_destroy(&pool->base.clk_mgr);
 -
        if (pool->base.irqs != NULL) {
                dal_irq_service_destroy(&pool->base.irqs);
        }
@@@ -863,8 -882,7 +865,8 @@@ static const struct resource_funcs dce8
        .validate_bandwidth = dce80_validate_bandwidth,
        .validate_plane = dce100_validate_plane,
        .add_stream_to_ctx = dce100_add_stream_to_ctx,
 -      .validate_global = dce80_validate_global
 +      .validate_global = dce80_validate_global,
 +      .find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
  };
  
  static bool dce80_construct(
                }
        }
  
 -      pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 -                      &disp_clk_regs,
 -                      &disp_clk_shift,
 -                      &disp_clk_mask);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto res_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
@@@ -1137,6 -1165,16 +1139,6 @@@ static bool dce81_construct
                }
        }
  
 -      pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 -                      &disp_clk_regs,
 -                      &disp_clk_shift,
 -                      &disp_clk_mask);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto res_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
@@@ -1332,6 -1370,16 +1334,6 @@@ static bool dce83_construct
                }
        }
  
 -      pool->base.clk_mgr = dce_clk_mgr_create(ctx,
 -                      &disp_clk_regs,
 -                      &disp_clk_shift,
 -                      &disp_clk_mask);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto res_create_fail;
 -      }
 -
        pool->base.dmcu = dce_dmcu_create(ctx,
                        &dmcu_regs,
                        &dmcu_shift,
index bf978831bb0ea8af98598611ab2d8fbd99f08b1a,3d31dadfa012174da13eb900d0f5449e89a8619c..a1c824efa6864ada4037bed4abcb00bfb1fd4f00
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/delay.h>
  #include "dm_services.h"
  #include "dcn10_hubp.h"
  #include "dcn10_hubbub.h"
@@@ -263,15 -265,20 +265,15 @@@ void hubbub1_wm_change_req_wa(struct hu
                        DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
  }
  
 -void hubbub1_program_watermarks(
 +void hubbub1_program_urgent_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
                unsigned int refclk_mhz,
                bool safe_to_lower)
  {
        struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 -      /*
 -       * Need to clamp to max of the register values (i.e. no wrap)
 -       * for dcn1, all wm registers are 21-bit wide
 -       */
        uint32_t prog_wm_value;
  
 -
        /* Repeat for water mark set A, B, C and D. */
        /* clock state A */
        if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
                        watermarks->a.urgent_ns, prog_wm_value);
        }
  
 -      if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) {
 -              if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
 -                      hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
 -                      prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->a.pte_meta_urgent_ns, prog_wm_value);
 -              }
 -      }
 -
 -      if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
 -              if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
 -                              > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
 -                      hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
 -                                      watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -              }
 -
 -              if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
 -                              > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
 -                      hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
 -                                      watermarks->a.cstate_pstate.cstate_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->a.cstate_pstate.cstate_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -              }
 -      }
 -
 -      if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
 -                      > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
 -              hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
 -                              watermarks->a.cstate_pstate.pstate_change_ns;
 -              prog_wm_value = convert_and_clamp(
 -                              watermarks->a.cstate_pstate.pstate_change_ns,
 +      if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
 +              hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
 +              prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
 -              REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
 -                              DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
 -              DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
 -                      "HW register value = 0x%x\n\n",
 -                      watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
 +              REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->a.pte_meta_urgent_ns, prog_wm_value);
        }
  
        /* clock state B */
                        watermarks->b.urgent_ns, prog_wm_value);
        }
  
 -      if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) {
 -              if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
 -                      hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
 -                      prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->b.pte_meta_urgent_ns, prog_wm_value);
 -              }
 -      }
 -
 -      if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
 -              if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
 -                              > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
 -                      hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
 -                                      watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -              }
 -
 -              if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
 -                              > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
 -                      hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
 -                                      watermarks->b.cstate_pstate.cstate_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->b.cstate_pstate.cstate_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -              }
 -      }
 -
 -      if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
 -                      > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
 -              hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
 -                              watermarks->b.cstate_pstate.pstate_change_ns;
 -              prog_wm_value = convert_and_clamp(
 -                              watermarks->b.cstate_pstate.pstate_change_ns,
 +      if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
 +              hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
 +              prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
 -              REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
 -                              DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
 -              DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
 -                      "HW register value = 0x%x\n\n",
 -                      watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
 +              REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->b.pte_meta_urgent_ns, prog_wm_value);
        }
  
        /* clock state C */
                        watermarks->c.urgent_ns, prog_wm_value);
        }
  
 -      if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) {
 -              if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
 -                      hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
 -                      prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->c.pte_meta_urgent_ns, prog_wm_value);
 -              }
 -      }
 -
 -      if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
 -              if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
 -                              > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
 -                      hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
 -                                      watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -              }
 -
 -              if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
 -                              > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
 -                      hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
 -                                      watermarks->c.cstate_pstate.cstate_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->c.cstate_pstate.cstate_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -              }
 -      }
 -
 -      if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
 -                      > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
 -              hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
 -                              watermarks->c.cstate_pstate.pstate_change_ns;
 -              prog_wm_value = convert_and_clamp(
 -                              watermarks->c.cstate_pstate.pstate_change_ns,
 +      if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
 +              hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
 +              prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns,
                                refclk_mhz, 0x1fffff);
 -              REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
 -                              DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
 -              DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
 -                      "HW register value = 0x%x\n\n",
 -                      watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
 +              REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->c.pte_meta_urgent_ns, prog_wm_value);
        }
  
        /* clock state D */
                        watermarks->d.urgent_ns, prog_wm_value);
        }
  
 -      if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) {
 -              if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
 -                      hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
 -                      prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->d.pte_meta_urgent_ns, prog_wm_value);
 -              }
 +      if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
 +              hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
 +              prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->d.pte_meta_urgent_ns, prog_wm_value);
        }
 +}
  
 -      if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
 -              if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
 -                              > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
 -                      hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
 -                                      watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -              }
 +void hubbub1_program_stutter_watermarks(
 +              struct hubbub *hubbub,
 +              struct dcn_watermark_set *watermarks,
 +              unsigned int refclk_mhz,
 +              bool safe_to_lower)
 +{
 +      struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 +      uint32_t prog_wm_value;
  
 -              if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
 -                              > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
 -                      hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
 -                                      watermarks->d.cstate_pstate.cstate_exit_ns;
 -                      prog_wm_value = convert_and_clamp(
 -                                      watermarks->d.cstate_pstate.cstate_exit_ns,
 -                                      refclk_mhz, 0x1fffff);
 -                      REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
 -                                      DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
 -                      DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
 -                              "HW register value = 0x%x\n",
 -                              watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -              }
 +      /* clock state A */
 +      if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
 +                      > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
 +              hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
 +                              watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 +      }
 +
 +      if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
 +                      > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
 +              hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
 +                              watermarks->a.cstate_pstate.cstate_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->a.cstate_pstate.cstate_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
 +      }
 +
 +      /* clock state B */
 +      if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
 +                      > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
 +              hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
 +                              watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
        }
  
 +      if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
 +                      > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
 +              hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
 +                              watermarks->b.cstate_pstate.cstate_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->b.cstate_pstate.cstate_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
 +      }
 +
 +      /* clock state C */
 +      if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
 +                      > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
 +              hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
 +                              watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 +      }
 +
 +      if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
 +                      > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
 +              hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
 +                              watermarks->c.cstate_pstate.cstate_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->c.cstate_pstate.cstate_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
 +      }
 +
 +      /* clock state D */
 +      if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
 +                      > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
 +              hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
 +                              watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 +      }
 +
 +      if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
 +                      > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
 +              hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
 +                              watermarks->d.cstate_pstate.cstate_exit_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->d.cstate_pstate.cstate_exit_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
 +                              DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
 +                      "HW register value = 0x%x\n",
 +                      watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
 +      }
 +
 +}
 +
 +void hubbub1_program_pstate_watermarks(
 +              struct hubbub *hubbub,
 +              struct dcn_watermark_set *watermarks,
 +              unsigned int refclk_mhz,
 +              bool safe_to_lower)
 +{
 +      struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 +      uint32_t prog_wm_value;
 +
 +      /* clock state A */
 +      if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
 +                      > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
 +              hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
 +                              watermarks->a.cstate_pstate.pstate_change_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->a.cstate_pstate.pstate_change_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
 +                              DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
 +                      "HW register value = 0x%x\n\n",
 +                      watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
 +      }
 +
 +      /* clock state B */
 +      if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
 +                      > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
 +              hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
 +                              watermarks->b.cstate_pstate.pstate_change_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->b.cstate_pstate.pstate_change_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
 +                              DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
 +                      "HW register value = 0x%x\n\n",
 +                      watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
 +      }
 +
 +      /* clock state C */
 +      if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
 +                      > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
 +              hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
 +                              watermarks->c.cstate_pstate.pstate_change_ns;
 +              prog_wm_value = convert_and_clamp(
 +                              watermarks->c.cstate_pstate.pstate_change_ns,
 +                              refclk_mhz, 0x1fffff);
 +              REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
 +                              DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
 +              DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
 +                      "HW register value = 0x%x\n\n",
 +                      watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
 +      }
 +
 +      /* clock state D */
        if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
                        > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
                hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
                        "HW register value = 0x%x\n\n",
                        watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
        }
 +}
 +
 +void hubbub1_program_watermarks(
 +              struct hubbub *hubbub,
 +              struct dcn_watermark_set *watermarks,
 +              unsigned int refclk_mhz,
 +              bool safe_to_lower)
 +{
 +      struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 +      /*
 +       * Need to clamp to max of the register values (i.e. no wrap)
 +       * for dcn1, all wm registers are 21-bit wide
 +       */
 +      hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
 +      hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
 +      hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
  
        REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
                        DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@@ -927,7 -905,9 +929,7 @@@ void hubbub1_construct(struct hubbub *h
        hubbub1->masks = hubbub_mask;
  
        hubbub1->debug_test_index_pstate = 0x7;
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        if (ctx->dce_version == DCN_VERSION_1_01)
                hubbub1->debug_test_index_pstate = 0xB;
 -#endif
  }
  
index 3396e499090d9452d06df81cf4016a26da5ce37e,0df729492db306124dbeac374adc301703529026..0a520591fd3a998d0b2f9d706e29ce8d6bbcc853
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/delay.h>
+ #include <linux/slab.h>
  #include "reg_helper.h"
  
  #include "core_types.h"
@@@ -726,8 -729,6 +729,8 @@@ void dcn10_link_encoder_construct
                enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
                                bp_cap_info.DP_HBR3_EN;
                enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
 +              enc10->base.features.flags.bits.DP_IS_USB_C =
 +                              bp_cap_info.DP_IS_USB_C;
        } else {
                DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
                                __func__,
@@@ -1359,5 -1360,5 +1362,5 @@@ void dcn10_aux_initialize(struct dcn10_
  
        /* 1/4 window (the maximum allowed) */
        AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0,
 -                      AUX_RX_RECEIVE_WINDOW, 1);
 +                      AUX_RX_RECEIVE_WINDOW, 0);
  }
index f6004bc53dce9c95a34edde27b6dd2ef9d08a937,6b97d45c6ebf50a3402830969fdb3d582783de30..3272030c82c5f3e9ba1dc4dce59a89685c79dabb
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  #include "dc.h"
  
  #include "resource.h"
  #include "include/irq_service_interface.h"
  #include "dcn10_resource.h"
 -
  #include "dcn10_ipp.h"
  #include "dcn10_mpc.h"
  #include "irq/dcn10/irq_service_dcn10.h"
@@@ -39,6 -42,7 +41,6 @@@
  #include "dcn10_opp.h"
  #include "dcn10_link_encoder.h"
  #include "dcn10_stream_encoder.h"
 -#include "dcn10_clk_mgr.h"
  #include "dce/dce_clock_source.h"
  #include "dce/dce_audio.h"
  #include "dce/dce_hwseq.h"
@@@ -151,7 -155,9 +153,7 @@@ enum dcn10_clk_src_array_id 
        DCN10_CLK_SRC_PLL2,
        DCN10_CLK_SRC_PLL3,
        DCN10_CLK_SRC_TOTAL,
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        DCN101_CLK_SRC_TOTAL = DCN10_CLK_SRC_PLL3
 -#endif
  };
  
  /* begin *********************
  #define MMHUB_SR(reg_name)\
                .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) +  \
                                        mm ## reg_name
 +
  /* macros to expend register list macro defined in HW object header file
   * end *********************/
  
@@@ -442,6 -447,7 +444,6 @@@ static const struct bios_registers bios
        HUBP_REG_LIST_DCN10(id)\
  }
  
 -
  static const struct dcn_mi_registers hubp_regs[] = {
        hubp_regs(0),
        hubp_regs(1),
@@@ -457,6 -463,7 +459,6 @@@ static const struct dcn_mi_mask hubp_ma
                HUBP_MASK_SH_LIST_DCN10(_MASK)
  };
  
 -
  static const struct dcn_hubbub_registers hubbub_reg = {
                HUBBUB_REG_LIST_DCN10(0)
  };
@@@ -499,6 -506,7 +501,6 @@@ static const struct resource_caps res_c
                .num_ddc = 4,
  };
  
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
  static const struct resource_caps rv2_res_cap = {
                .num_timing_generator = 3,
                .num_opp = 3,
                .num_pll = 3,
                .num_ddc = 3,
  };
 -#endif
  
  static const struct dc_plane_cap plane_cap = {
        .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
@@@ -959,6 -968,9 +961,6 @@@ static void destruct(struct dcn10_resou
        if (pool->base.dmcu != NULL)
                dce_dmcu_destroy(&pool->base.dmcu);
  
 -      if (pool->base.clk_mgr != NULL)
 -              dce_clk_mgr_destroy(&pool->base.clk_mgr);
 -
        kfree(pool->base.pp_smu);
  }
  
@@@ -1207,38 -1219,6 +1209,38 @@@ static enum dc_status dcn10_get_default
        return result;
  }
  
 +struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
 +              struct resource_context *res_ctx,
 +              const struct resource_pool *pool,
 +              struct dc_stream_state *stream)
 +{
 +      int i;
 +      int j = -1;
 +      struct dc_link *link = stream->link;
 +
 +      for (i = 0; i < pool->stream_enc_count; i++) {
 +              if (!res_ctx->is_stream_enc_acquired[i] &&
 +                              pool->stream_enc[i]) {
 +                      /* Store first available for MST second display
 +                       * in daisy chain use case
 +                       */
 +                      j = i;
 +                      if (pool->stream_enc[i]->id ==
 +                                      link->link_enc->preferred_engine)
 +                              return pool->stream_enc[i];
 +              }
 +      }
 +
 +      /*
 +       * For CZ and later, we can allow DIG FE and BE to differ for all display types
 +       */
 +
 +      if (j >= 0)
 +              return pool->stream_enc[j];
 +
 +      return NULL;
 +}
 +
  static const struct dc_cap_funcs cap_funcs = {
        .get_dcc_compression_cap = dcn10_get_dcc_compression_cap
  };
@@@ -1251,8 -1231,7 +1253,8 @@@ static const struct resource_funcs dcn1
        .validate_plane = dcn10_validate_plane,
        .validate_global = dcn10_validate_global,
        .add_stream_to_ctx = dcn10_add_stream_to_ctx,
 -      .get_default_swizzle_mode = dcn10_get_default_swizzle_mode
 +      .get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
 +      .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
  };
  
  static uint32_t read_pipe_fuses(struct dc_context *ctx)
@@@ -1275,9 -1254,11 +1277,9 @@@ static bool construct
  
        ctx->dc_bios->regs = &bios_regs;
  
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        if (ctx->dce_version == DCN_VERSION_1_01)
                pool->base.res_cap = &rv2_res_cap;
        else
 -#endif
                pool->base.res_cap = &res_cap;
        pool->base.funcs = &dcn10_res_pool_funcs;
  
        /* max pipe num for ASIC before check pipe fuses */
        pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
  
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        if (dc->ctx->dce_version == DCN_VERSION_1_01)
                pool->base.pipe_count = 3;
 -#endif
        dc->caps.max_video_width = 3840;
        dc->caps.max_downscale_ratio = 200;
        dc->caps.i2c_speed_in_khz = 100;
                                CLOCK_SOURCE_COMBO_PHY_PLL2,
                                &clk_src_regs[2], false);
  
 -#ifdef CONFIG_DRM_AMD_DC_DCN1_01
        if (dc->ctx->dce_version == DCN_VERSION_1_0) {
                pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
                                dcn10_clock_source_create(ctx, ctx->dc_bios,
                                        CLOCK_SOURCE_COMBO_PHY_PLL3,
                                        &clk_src_regs[3], false);
        }
 -#else
 -      pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
 -                      dcn10_clock_source_create(ctx, ctx->dc_bios,
 -                              CLOCK_SOURCE_COMBO_PHY_PLL3,
 -                              &clk_src_regs[3], false);
 -#endif
  
        pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
  
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        if (dc->ctx->dce_version == DCN_VERSION_1_01)
                pool->base.clk_src_count = DCN101_CLK_SRC_TOTAL;
 -#endif
  
        pool->base.dp_clock_source =
                        dcn10_clock_source_create(ctx, ctx->dc_bios,
                        goto fail;
                }
        }
 -      pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
 -      if (pool->base.clk_mgr == NULL) {
 -              dm_error("DC: failed to create display clock!\n");
 -              BREAK_TO_DEBUGGER();
 -              goto fail;
 -      }
  
        pool->base.dmcu = dcn10_dmcu_create(ctx,
                        &dmcu_regs,
        memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
        memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
  
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        if (dc->ctx->dce_version == DCN_VERSION_1_01) {
                struct dcn_soc_bounding_box *dcn_soc = dc->dcn_soc;
                struct dcn_ip_params *dcn_ip = dc->dcn_ip;
                dcn_soc->dram_clock_change_latency = 23;
                dcn_ip->max_num_dpp = 3;
        }
 -#endif
        if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
                dc->dcn_soc->urgent_latency = 3;
                dc->debug.disable_dmcu = true;
index 2d15ae6642261ba719a64f6b86784907927e2a73,c33153d696ac28e56b16671647f25932d63cc047..b9ffbf6b58ff23ed4e63bfe50a3f8aae86c21cd1
@@@ -23,6 -23,7 +23,7 @@@
   *
   */
  
+ #include <linux/delay.h>
  
  #include "dc_bios_types.h"
  #include "dcn10_stream_encoder.h"
@@@ -415,7 -416,6 +416,7 @@@ void enc1_stream_encoder_dp_set_stream_
        case COLOR_SPACE_APPCTRL:
        case COLOR_SPACE_CUSTOMPOINTS:
        case COLOR_SPACE_UNKNOWN:
 +      case COLOR_SPACE_YCBCR709_BLACK:
                /* do nothing */
                break;
        }
                hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom);
  }
  
 -static void enc1_stream_encoder_set_stream_attribute_helper(
 +void enc1_stream_encoder_set_stream_attribute_helper(
                struct dcn10_stream_encoder *enc1,
                struct dc_crtc_timing *crtc_timing)
  {
@@@ -727,9 -727,11 +728,9 @@@ void enc1_stream_encoder_update_dp_info
                                3,  /* packetIndex */
                                &info_frame->hdrsmd);
  
 -      if (info_frame->dpsdp.valid)
 -              enc1_update_generic_info_packet(
 -                              enc1,
 -                              4,/* packetIndex */
 -                              &info_frame->dpsdp);
 +      /* packetIndex 4 is used for send immediate sdp message, and please
 +       * use other packetIndex (such as 5,6) for other info packet
 +       */
  
        /* enable/disable transmission of packet(s).
         * If enabled, packet transmission begins on the next frame
        REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
        REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
        REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
 -      REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, info_frame->dpsdp.valid);
 +
 +
 +      /* This bit is the master enable bit.
 +       * When enabling secondary stream engine,
 +       * this master bit must also be set.
 +       * This register shared with audio info frame.
 +       * Therefore we need to enable master bit
 +       * if at least on of the fields is not 0
 +       */
 +      value = REG_READ(DP_SEC_CNTL);
 +      if (value)
 +              REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
 +}
 +
 +void enc1_stream_encoder_send_immediate_sdp_message(
 +      struct stream_encoder *enc,
 +      const uint8_t *custom_sdp_message,
 +      unsigned int sdp_message_size)
 +{
 +      struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
 +      uint32_t value = 0;
 +
 +      /* TODOFPGA Figure out a proper number for max_retries polling for lock
 +       * use 50 for now.
 +       */
 +      uint32_t max_retries = 50;
 +
 +      /* check if GSP4 is transmitted */
 +      REG_WAIT(DP_SEC_CNTL2, DP_SEC_GSP4_SEND_PENDING,
 +              0, 10, max_retries);
 +
 +      /* disable GSP4 transmitting */
 +      REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND, 0);
 +
 +      /* transmit GSP4 at the earliest time in a frame */
 +      REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND_ANY_LINE, 1);
 +
 +      /*we need turn on clock before programming AFMT block*/
 +      REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
 +
 +      /* check if HW reading GSP memory */
 +      REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
 +                      0, 10, max_retries);
 +
 +      /* HW does is not reading GSP memory not reading too long ->
 +       * something wrong. clear GPS memory access and notify?
 +       * hw SW is writing to GSP memory
 +       */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
 +
 +      /* use generic packet 4 for immediate sdp message */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
 +                      AFMT_GENERIC_INDEX, 4);
 +
 +      /* write generic packet header
 +       * (4th byte is for GENERIC0 only)
 +       */
 +      REG_SET_4(AFMT_GENERIC_HDR, 0,
 +                      AFMT_GENERIC_HB0, custom_sdp_message[0],
 +                      AFMT_GENERIC_HB1, custom_sdp_message[1],
 +                      AFMT_GENERIC_HB2, custom_sdp_message[2],
 +                      AFMT_GENERIC_HB3, custom_sdp_message[3]);
 +
 +      /* write generic packet contents
 +       * (we never use last 4 bytes)
 +       * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers
 +       */
 +      {
 +              const uint32_t *content =
 +                      (const uint32_t *) &custom_sdp_message[4];
 +
 +              REG_WRITE(AFMT_GENERIC_0, *content++);
 +              REG_WRITE(AFMT_GENERIC_1, *content++);
 +              REG_WRITE(AFMT_GENERIC_2, *content++);
 +              REG_WRITE(AFMT_GENERIC_3, *content++);
 +              REG_WRITE(AFMT_GENERIC_4, *content++);
 +              REG_WRITE(AFMT_GENERIC_5, *content++);
 +              REG_WRITE(AFMT_GENERIC_6, *content++);
 +              REG_WRITE(AFMT_GENERIC_7, *content);
 +      }
 +
 +      /* check whether GENERIC4 registers double buffer update in immediate mode
 +       * is pending
 +       */
 +      REG_WAIT(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING,
 +                      0, 10, max_retries);
 +
 +      /* atomically update double-buffered GENERIC4 registers in immediate mode
 +       * (update immediately)
 +       */
 +      REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
 +                      AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
 +
 +      /* enable GSP4 transmitting */
 +      REG_UPDATE(DP_SEC_CNTL2, DP_SEC_GSP4_SEND, 1);
  
        /* This bit is the master enable bit.
         * When enabling secondary stream engine,
@@@ -1091,6 -999,19 +1092,6 @@@ union audio_cea_channels 
        } channels;
  };
  
 -struct audio_clock_info {
 -      /* pixel clock frequency*/
 -      uint32_t pixel_clock_in_10khz;
 -      /* N - 32KHz audio */
 -      uint32_t n_32khz;
 -      /* CTS - 32KHz audio*/
 -      uint32_t cts_32khz;
 -      uint32_t n_44khz;
 -      uint32_t cts_44khz;
 -      uint32_t n_48khz;
 -      uint32_t cts_48khz;
 -};
 -
  /* 25.2MHz/1.001*/
  /* 25.2MHz/1.001*/
  /* 25.2MHz*/
@@@ -1193,7 -1114,7 +1194,7 @@@ static union audio_cea_channels speaker
        return cea_channels;
  }
  
 -static void get_audio_clock_info(
 +void get_audio_clock_info(
        enum dc_color_depth color_depth,
        uint32_t crtc_pixel_clock_in_khz,
        uint32_t actual_pixel_clock_in_khz,
@@@ -1397,7 -1318,7 +1398,7 @@@ static void enc1_se_setup_dp_audio
        REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
  }
  
 -static void enc1_se_enable_audio_clock(
 +void enc1_se_enable_audio_clock(
        struct stream_encoder *enc,
        bool enable)
  {
         */
  }
  
 -static void enc1_se_enable_dp_audio(
 +void enc1_se_enable_dp_audio(
        struct stream_encoder *enc)
  {
        struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@@ -1542,8 -1463,6 +1543,8 @@@ static const struct stream_encoder_func
                enc1_stream_encoder_stop_hdmi_info_packets,
        .update_dp_info_packets =
                enc1_stream_encoder_update_dp_info_packets,
 +      .send_immediate_sdp_message =
 +              enc1_stream_encoder_send_immediate_sdp_message,
        .stop_dp_info_packets =
                enc1_stream_encoder_stop_dp_info_packets,
        .dp_blank =
index a610fae162807d2c56efb9e0e7ca1f71ca7cdbce,a97972ebd4b7c2313548a7fa0680fec708b75529..a15aca47342cbfe271238cecc103b486f2e39b70
@@@ -23,6 -23,8 +23,8 @@@
   *
   */
  
+ #include <linux/slab.h>
  #include "dm_services.h"
  
  /*
@@@ -84,6 -86,10 +86,6 @@@ bool dal_hw_factory_init
                return true;
  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
        case DCN_VERSION_1_0:
 -              dal_hw_factory_dcn10_init(factory);
 -              return true;
 -#endif
 -#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
        case DCN_VERSION_1_01:
                dal_hw_factory_dcn10_init(factory);
                return true;
index 89a65e1d83176b4b254879a26f42eb41d0571bdc,67001fca688b87465fdca7a8dcf6a7265d6e55be..b31af9be41eb89049167fc259c951322b50c875b
@@@ -23,6 -23,9 +23,9 @@@
   *
   */
  
+ #include <linux/mm.h>
+ #include <linux/slab.h>
  #include "dc.h"
  #include "opp.h"
  #include "color_gamma.h"
@@@ -240,27 -243,16 +243,27 @@@ struct dividers 
        struct fixed31_32 divider3;
  };
  
 -static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4)
 +enum gamma_type_index {
 +      gamma_type_index_2_4,
 +      gamma_type_index_2_2,
 +      gamma_type_index_2_2_flat
 +};
 +
 +static void build_coefficients(struct gamma_coefficients *coefficients, enum gamma_type_index type)
  {
 -      static const int32_t numerator01[] = { 31308, 180000};
 -      static const int32_t numerator02[] = { 12920, 4500};
 -      static const int32_t numerator03[] = { 55, 99};
 -      static const int32_t numerator04[] = { 55, 99};
 -      static const int32_t numerator05[] = { 2400, 2200};
 +      static const int32_t numerator01[] = { 31308,   180000, 0};
 +      static const int32_t numerator02[] = { 12920,   4500,   0};
 +      static const int32_t numerator03[] = { 55,              99,             0};
 +      static const int32_t numerator04[] = { 55,              99,             0};
 +      static const int32_t numerator05[] = { 2400,    2200, 2200};
  
        uint32_t i = 0;
 -      uint32_t index = is_2_4 == true ? 0:1;
 +      uint32_t index = 0;
 +
 +      if (type == gamma_type_index_2_2)
 +              index = 1;
 +      else if (type == gamma_type_index_2_2_flat)
 +              index = 2;
  
        do {
                coefficients->a0[i] = dc_fixpt_from_fraction(
@@@ -708,7 -700,7 +711,7 @@@ static void build_de_pq(struct pwl_floa
  
  static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
                uint32_t hw_points_num,
 -              const struct hw_x_point *coordinate_x, bool is_2_4)
 +              const struct hw_x_point *coordinate_x, enum gamma_type_index type)
  {
        uint32_t i;
  
        struct pwl_float_data_ex *rgb = rgb_regamma;
        const struct hw_x_point *coord_x = coordinate_x;
  
 -      build_coefficients(&coeff, is_2_4);
 +      build_coefficients(&coeff, type);
  
        i = 0;
  
@@@ -903,13 -895,13 +906,13 @@@ static bool build_freesync_hdr(struct p
  
  static void build_degamma(struct pwl_float_data_ex *curve,
                uint32_t hw_points_num,
 -              const struct hw_x_point *coordinate_x, bool is_2_4)
 +              const struct hw_x_point *coordinate_x, enum gamma_type_index type)
  {
        uint32_t i;
        struct gamma_coefficients coeff;
        uint32_t begin_index, end_index;
  
 -      build_coefficients(&coeff, is_2_4);
 +      build_coefficients(&coeff, type);
        i = 0;
  
        /* X points is 2^-25 to 2^7
@@@ -1569,15 -1561,13 +1572,15 @@@ bool mod_color_calculate_regamma_params
                        output_tf->tf == TRANSFER_FUNCTION_SRGB) {
                if (ramp == NULL)
                        return true;
 -              if (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))
 +              if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
 +                              (!mapUserRamp && ramp->type == GAMMA_RGB_256))
                        return true;
        }
  
        output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
  
 -      if (ramp && (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
 +      if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
 +                      (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
                rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
                            sizeof(*rgb_user),
                            GFP_KERNEL);
                                coordinates_x,
                                output_tf->sdr_ref_white_level);
        } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
 -                      fs_params != NULL) {
 +                      fs_params != NULL && fs_params->skip_tm == 0) {
                build_freesync_hdr(rgb_regamma,
                                MAX_HW_POINTS,
                                coordinates_x,
  
                build_regamma(rgb_regamma,
                                MAX_HW_POINTS,
 -                              coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? true:false);
 +                              coordinates_x, tf == TRANSFER_FUNCTION_SRGB ? gamma_type_index_2_4 :
 +                                      tf == TRANSFER_FUNCTION_GAMMA22 ?
 +                                      gamma_type_index_2_2_flat : gamma_type_index_2_2);
        }
        map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
                        coordinates_x, axis_x, rgb_regamma,
@@@ -1847,9 -1835,7 +1850,9 @@@ bool mod_color_calculate_degamma_params
                build_degamma(curve,
                                MAX_HW_POINTS,
                                coordinates_x,
 -                              tf == TRANSFER_FUNCTION_SRGB ? true : false);
 +                              tf == TRANSFER_FUNCTION_SRGB ?
 +                              gamma_type_index_2_4 : tf == TRANSFER_FUNCTION_GAMMA22 ?
 +                              gamma_type_index_2_2_flat : gamma_type_index_2_2);
        else if (tf == TRANSFER_FUNCTION_LINEAR) {
                // just copy coordinates_x into curve
                i = 0;
@@@ -1949,10 -1935,7 +1952,10 @@@ bool  mod_color_calculate_curve(enum dc
  
                build_regamma(rgb_regamma,
                                MAX_HW_POINTS,
 -                              coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
 +                              coordinates_x,
 +                              trans == TRANSFER_FUNCTION_SRGB ?
 +                              gamma_type_index_2_4 : trans == TRANSFER_FUNCTION_GAMMA22 ?
 +                              gamma_type_index_2_2_flat : gamma_type_index_2_2);
                for (i = 0; i <= MAX_HW_POINTS ; i++) {
                        points->red[i]    = rgb_regamma[i].r;
                        points->green[i]  = rgb_regamma[i].g;
@@@ -2022,8 -2005,7 +2025,8 @@@ bool  mod_color_calculate_degamma_curve
  
                kvfree(rgb_degamma);
        } else if (trans == TRANSFER_FUNCTION_SRGB ||
 -                        trans == TRANSFER_FUNCTION_BT709) {
 +                        trans == TRANSFER_FUNCTION_BT709 ||
 +                        trans == TRANSFER_FUNCTION_GAMMA22) {
                rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
                                       sizeof(*rgb_degamma),
                                       GFP_KERNEL);
  
                build_degamma(rgb_degamma,
                                MAX_HW_POINTS,
 -                              coordinates_x, trans == TRANSFER_FUNCTION_SRGB ? true:false);
 +                              coordinates_x,
 +                              trans == TRANSFER_FUNCTION_SRGB ?
 +                              gamma_type_index_2_4 : trans == TRANSFER_FUNCTION_GAMMA22 ?
 +                              gamma_type_index_2_2_flat : gamma_type_index_2_2);
                for (i = 0; i <= MAX_HW_POINTS ; i++) {
                        points->red[i]    = rgb_degamma[i].r;
                        points->green[i]  = rgb_degamma[i].g;
index 3026c7e2d3ea6c38263af10a854ce35a7e00873b,a4c9d9267f8e3f3915c21ce823c93e33028aff29..9c67adee2c9e7af172eaa36cf5963362583b1fa7
@@@ -20,9 -20,9 +20,9 @@@
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include "pp_debug.h"
  #include <linux/firmware.h>
- #include <drm/drmP.h>
+ #include "pp_debug.h"
  #include "amdgpu.h"
  #include "amdgpu_smu.h"
  #include "soc15_common.h"
  #include "atom.h"
  #include "amd_pcie.h"
  
 +int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t *smu_version)
 +{
 +      int ret = 0;
 +
 +      if (!if_version && !smu_version)
 +              return -EINVAL;
 +
 +      if (if_version) {
 +              ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
 +              if (ret)
 +                      return ret;
 +
 +              ret = smu_read_smc_arg(smu, if_version);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      if (smu_version) {
 +              ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
 +              if (ret)
 +                      return ret;
 +
 +              ret = smu_read_smc_arg(smu, smu_version);
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      return ret;
 +}
 +
  int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
                           bool gate)
  {
@@@ -198,8 -168,6 +198,8 @@@ int smu_sys_set_pp_table(struct smu_con
        ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
        int ret = 0;
  
 +      if (!smu->pm_enabled)
 +              return -EINVAL;
        if (header->usStructureSize != size) {
                pr_err("pp table size not matched !\n");
                return -EIO;
@@@ -235,8 -203,6 +235,8 @@@ int smu_feature_init_dpm(struct smu_con
        int ret = 0;
        uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
  
 +      if (!smu->pm_enabled)
 +              return ret;
        mutex_lock(&feature->mutex);
        bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
        mutex_unlock(&feature->mutex);
@@@ -348,7 -314,6 +348,7 @@@ static int smu_early_init(void *handle
        struct smu_context *smu = &adev->smu;
  
        smu->adev = adev;
 +      smu->pm_enabled = !!amdgpu_dpm;
        mutex_init(&smu->mutex);
  
        return smu_set_funcs(adev);
@@@ -358,9 -323,6 +358,9 @@@ static int smu_late_init(void *handle
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 +
 +      if (!smu->pm_enabled)
 +              return 0;
        mutex_lock(&smu->mutex);
        smu_handle_task(&adev->smu,
                        smu->smu_dpm.dpm_level,
@@@ -444,6 -406,9 +444,6 @@@ static int smu_sw_init(void *handle
        struct smu_context *smu = &adev->smu;
        int ret;
  
 -      if (!is_support_sw_smu(adev))
 -              return -EINVAL;
 -
        smu->pool_size = adev->pm.smu_prv_buffer_size;
        smu->smu_feature.feature_num = SMU_FEATURE_MAX;
        mutex_init(&smu->smu_feature.mutex);
@@@ -495,6 -460,9 +495,6 @@@ static int smu_sw_fini(void *handle
        struct smu_context *smu = &adev->smu;
        int ret;
  
 -      if (!is_support_sw_smu(adev))
 -              return -EINVAL;
 -
        ret = smu_smc_table_sw_fini(smu);
        if (ret) {
                pr_err("Failed to sw fini smc table!\n");
@@@ -644,6 -612,10 +644,6 @@@ static int smu_smc_table_hw_init(struc
                 * check if the format_revision in vbios is up to pptable header
                 * version, and the structure size is not 0.
                 */
 -              ret = smu_get_clk_info_from_vbios(smu);
 -              if (ret)
 -                      return ret;
 -
                ret = smu_check_pptable(smu);
                if (ret)
                        return ret;
         */
        ret = smu_set_tool_table_location(smu);
  
 +      if (!smu_is_dpm_running(smu))
 +              pr_info("dpm has been disabled\n");
 +
        return ret;
  }
  
@@@ -819,6 -788,9 +819,6 @@@ static int smu_hw_init(void *handle
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
  
 -      if (!is_support_sw_smu(adev))
 -              return -EINVAL;
 -
        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
                ret = smu_load_microcode(smu);
                if (ret)
  
        mutex_unlock(&smu->mutex);
  
 -      adev->pm.dpm_enabled = true;
 +      if (!smu->pm_enabled)
 +              adev->pm.dpm_enabled = false;
 +      else
 +              adev->pm.dpm_enabled = true;
  
        pr_info("SMU is initialized successfully!\n");
  
@@@ -880,6 -849,9 +880,6 @@@ static int smu_hw_fini(void *handle
        struct smu_table_context *table_context = &smu->smu_table;
        int ret = 0;
  
 -      if (!is_support_sw_smu(adev))
 -              return -EINVAL;
 -
        kfree(table_context->driver_pptable);
        table_context->driver_pptable = NULL;
  
@@@ -934,6 -906,9 +934,6 @@@ static int smu_suspend(void *handle
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
  
 -      if (!is_support_sw_smu(adev))
 -              return -EINVAL;
 -
        ret = smu_system_features_control(smu, false);
        if (ret)
                return ret;
@@@ -949,6 -924,9 +949,6 @@@ static int smu_resume(void *handle
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
  
 -      if (!is_support_sw_smu(adev))
 -              return -EINVAL;
 -
        pr_info("SMU is resuming...\n");
  
        mutex_lock(&smu->mutex);
@@@ -977,7 -955,7 +977,7 @@@ int smu_display_configuration_change(st
        int index = 0;
        int num_of_active_display = 0;
  
 -      if (!is_support_sw_smu(smu->adev))
 +      if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
                return -EINVAL;
  
        if (!display_config)
@@@ -1105,7 -1083,7 +1105,7 @@@ static int smu_enable_umd_pstate(void *
  
        struct smu_context *smu = (struct smu_context*)(handle);
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
 -      if (!smu_dpm_ctx->dpm_context)
 +      if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)
                return -EINVAL;
  
        if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@@ -1148,8 -1126,6 +1148,8 @@@ int smu_adjust_power_state_dynamic(stru
        long workload;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
  
 +      if (!smu->pm_enabled)
 +              return -EINVAL;
        if (!skip_display_settings) {
                ret = smu_display_config_changed(smu);
                if (ret) {
                }
        }
  
 +      if (!smu->pm_enabled)
 +              return -EINVAL;
        ret = smu_apply_clocks_adjust_rules(smu);
        if (ret) {
                pr_err("Failed to apply clocks adjust rules!");
index 16591be8b0ca3259389347de78066c02eb06f747,8f81c25c523d03726bc9699b0ddcc3275575f551..c5986d28fbf12b8260f3f94f0c64de6deaa21d61
@@@ -24,6 -24,7 +24,7 @@@
  #include <linux/delay.h>
  #include <linux/fb.h>
  #include <linux/module.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  #include <asm/div64.h>
  #include <drm/amdgpu_drm.h>
@@@ -3532,12 -3533,9 +3533,12 @@@ static int smu7_read_sensor(struct pp_h
                *size = 4;
                return 0;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
 +      case AMDGPU_PP_SENSOR_MEM_LOAD:
                offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
                                                                SMU_SoftRegisters,
 -                                                              AverageGraphicsActivity);
 +                                                              (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
 +                                                              AverageGraphicsActivity:
 +                                                              AverageMemoryActivity);
  
                activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
                activity_percent += 0x80;
index 1d9bb29adaef5ffc24bbba4fb81371689aba1c9e,8e05c14d2d0db2c35c5f431fca3a93240632134a..3be8eb21fd6e6ae66c36aaf32d7a0781015e284e
@@@ -24,6 -24,7 +24,7 @@@
  #include <linux/delay.h>
  #include <linux/fb.h>
  #include <linux/module.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  
  #include "hwmgr.h"
@@@ -356,7 -357,6 +357,7 @@@ static void vega10_init_dpm_defaults(st
        struct vega10_hwmgr *data = hwmgr->backend;
        int i;
        uint32_t sub_vendor_id, hw_revision;
 +      uint32_t top32, bottom32;
        struct amdgpu_device *adev = hwmgr->adev;
  
        vega10_initialize_power_tune_defaults(hwmgr);
                (hw_revision == 0) &&
                (sub_vendor_id != 0x1002))
                data->smu_features[GNLD_PCC_LIMIT].supported = true;
 +
 +      /* Get the SN to turn into a Unique ID */
 +      smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
 +      top32 = smum_get_argument(hwmgr);
 +      smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
 +      bottom32 = smum_get_argument(hwmgr);
 +
 +      adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
  }
  
  #ifdef PPLIB_VEGA10_EVV_SUPPORT
@@@ -2276,8 -2268,8 +2277,8 @@@ static int vega10_populate_avfs_paramet
                        pp_table->AcgAvfsGb.m1                   = avfs_params.ulAcgGbFuseTableM1;
                        pp_table->AcgAvfsGb.m2                   = avfs_params.ulAcgGbFuseTableM2;
                        pp_table->AcgAvfsGb.b                    = avfs_params.ulAcgGbFuseTableB;
 -                      pp_table->AcgAvfsGb.m1_shift             = 0;
 -                      pp_table->AcgAvfsGb.m2_shift             = 0;
 +                      pp_table->AcgAvfsGb.m1_shift             = 24;
 +                      pp_table->AcgAvfsGb.m2_shift             = 12;
                        pp_table->AcgAvfsGb.b_shift              = 0;
  
                } else {
@@@ -2373,10 -2365,6 +2374,10 @@@ static int vega10_avfs_enable(struct pp
        struct vega10_hwmgr *data = hwmgr->backend;
  
        if (data->smu_features[GNLD_AVFS].supported) {
 +              /* Already enabled or disabled */
 +              if (!(enable ^ data->smu_features[GNLD_AVFS].enabled))
 +                      return 0;
 +
                if (enable) {
                        PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
                                        true,
@@@ -2479,6 -2467,11 +2480,6 @@@ static void vega10_check_dpm_table_upda
                        return;
                }
        }
 -
 -      if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
 -              data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
 -              data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
 -      }
  }
  
  /**
@@@ -3691,10 -3684,6 +3692,10 @@@ static int vega10_set_power_state_tasks
  
        vega10_update_avfs(hwmgr);
  
 +      /*
 +       * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
 +       * That will help to keep AVFS disabled.
 +       */
        data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
  
        return 0;
@@@ -3797,18 -3786,6 +3798,18 @@@ static int vega10_read_sensor(struct pp
                *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
                *size = 4;
                break;
 +      case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
 +              smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
 +              *((uint32_t *)value) = smum_get_argument(hwmgr) *
 +                      PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +              *size = 4;
 +              break;
 +      case AMDGPU_PP_SENSOR_MEM_TEMP:
 +              smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
 +              *((uint32_t *)value) = smum_get_argument(hwmgr) *
 +                      PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +              *size = 4;
 +              break;
        case AMDGPU_PP_SENSOR_UVD_POWER:
                *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
                *size = 4;
@@@ -4876,22 -4853,12 +4877,22 @@@ static int vega10_notify_cac_buffer_inf
  static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
                struct PP_TemperatureRange *thermal_data)
  {
 -      struct phm_ppt_v2_information *table_info =
 -                      (struct phm_ppt_v2_information *)hwmgr->pptable;
 +      struct vega10_hwmgr *data = hwmgr->backend;
 +      PPTable_t *pp_table = &(data->smc_state_table.pp_table);
  
        memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
  
 -      thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
 +      thermal_data->max = pp_table->TedgeLimit *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      thermal_data->mem_crit_max = pp_table->ThbmLimit *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
                PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
  
        return 0;
@@@ -5022,70 -4989,13 +5023,70 @@@ static bool vega10_check_clk_voltage_va
        return true;
  }
  
 +static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
 +{
 +      struct vega10_hwmgr *data = hwmgr->backend;
 +      struct pp_power_state *ps = hwmgr->request_ps;
 +      struct vega10_power_state *vega10_ps;
 +      struct vega10_single_dpm_table *gfx_dpm_table =
 +              &data->dpm_table.gfx_table;
 +      struct vega10_single_dpm_table *soc_dpm_table =
 +              &data->dpm_table.soc_table;
 +      struct vega10_single_dpm_table *mem_dpm_table =
 +              &data->dpm_table.mem_table;
 +      int max_level;
 +
 +      if (!ps)
 +              return;
 +
 +      vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
 +      max_level = vega10_ps->performance_level_count - 1;
 +
 +      if (vega10_ps->performance_levels[max_level].gfx_clock !=
 +          gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
 +              vega10_ps->performance_levels[max_level].gfx_clock =
 +                      gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
 +
 +      if (vega10_ps->performance_levels[max_level].soc_clock !=
 +          soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
 +              vega10_ps->performance_levels[max_level].soc_clock =
 +                      soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
 +
 +      if (vega10_ps->performance_levels[max_level].mem_clock !=
 +          mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
 +              vega10_ps->performance_levels[max_level].mem_clock =
 +                      mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
 +
 +      if (!hwmgr->ps)
 +              return;
 +
 +      ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
 +      vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
 +      max_level = vega10_ps->performance_level_count - 1;
 +
 +      if (vega10_ps->performance_levels[max_level].gfx_clock !=
 +          gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
 +              vega10_ps->performance_levels[max_level].gfx_clock =
 +                      gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
 +
 +      if (vega10_ps->performance_levels[max_level].soc_clock !=
 +          soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
 +              vega10_ps->performance_levels[max_level].soc_clock =
 +                      soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
 +
 +      if (vega10_ps->performance_levels[max_level].mem_clock !=
 +          mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
 +              vega10_ps->performance_levels[max_level].mem_clock =
 +                      mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
 +}
 +
  static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
                                                enum PP_OD_DPM_TABLE_COMMAND type)
  {
        struct vega10_hwmgr *data = hwmgr->backend;
        struct phm_ppt_v2_information *table_info = hwmgr->pptable;
        struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
 -      struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
 +      struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table;
  
        struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
                                                        &data->odn_dpm_table.vdd_dep_on_socclk;
                                        break;
                        }
                        if (j == od_vddc_lookup_table->count) {
 -                              od_vddc_lookup_table->entries[j-1].us_vdd =
 +                              j = od_vddc_lookup_table->count - 1;
 +                              od_vddc_lookup_table->entries[j].us_vdd =
                                        podn_vdd_dep->entries[i].vddc;
                                data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
                        }
                }
                dpm_table = &data->dpm_table.soc_table;
                for (i = 0; i < dep_table->count; i++) {
 -                      if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
 -                                      dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
 +                      if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd &&
 +                                      dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) {
                                data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
 -                              podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
 -                              dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
 +                              for (; (i < dep_table->count) &&
 +                                     (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) {
 +                                      podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk;
 +                                      dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
 +                              }
 +                              break;
 +                      } else {
 +                              dpm_table->dpm_levels[i].value = dep_table->entries[i].clk;
 +                              podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc;
 +                              podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd;
 +                              podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk;
                        }
                }
                if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
 -                                      podn_vdd_dep->entries[dep_table->count-1].clk) {
 +                                      podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) {
                        data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
 -                      podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
 -                      dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
 +                      podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk =
 +                              podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
 +                      dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value =
 +                              podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
                }
                if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
 -                                      podn_vdd_dep->entries[dep_table->count-1].vddInd) {
 +                                      podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) {
                        data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
 -                      podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
 +                      podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd =
 +                              podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd;
                }
        }
 +      vega10_odn_update_power_state(hwmgr);
  }
  
  static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
        } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
                memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
                vega10_odn_initial_default_setting(hwmgr);
 +              vega10_odn_update_power_state(hwmgr);
 +              /* force to update all clock tables */
 +              data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK |
 +                                            DPMTABLE_UPDATE_MCLK |
 +                                            DPMTABLE_UPDATE_SOCCLK;
                return 0;
        } else if (PP_OD_COMMIT_DPM_TABLE == type) {
                vega10_check_dpm_table_updated(hwmgr);
@@@ -5311,12 -5202,8 +5312,12 @@@ static const struct pp_hwmgr_func vega1
  
  int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
  {
 +      struct amdgpu_device *adev = hwmgr->adev;
 +
        hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
        hwmgr->pptable_func = &vega10_pptable_funcs;
 +      if (amdgpu_passthrough(adev))
 +              return vega10_baco_set_cap(hwmgr);
  
        return 0;
  }
index 83d22cdeaa29bb2cfbebd8a0c12bf956ad18ec89,b12100de308483745b42dee12b0f076c01ca6e70..f29af5ca0aa02a449713f447acde7debfa1d8371
@@@ -21,6 -21,7 +21,7 @@@
   *
   */
  #include <linux/module.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  #include <linux/fb.h>
  
@@@ -1371,27 -1372,3 +1372,27 @@@ int vega10_get_powerplay_table_entry(st
  
        return result;
  }
 +
 +int vega10_baco_set_cap(struct pp_hwmgr *hwmgr)
 +{
 +      int result = 0;
 +
 +      const ATOM_Vega10_POWERPLAYTABLE *powerplay_table;
 +
 +      powerplay_table = get_powerplay_table(hwmgr);
 +
 +      PP_ASSERT_WITH_CODE((powerplay_table != NULL),
 +              "Missing PowerPlay Table!", return -1);
 +
 +      result = check_powerplay_tables(hwmgr, powerplay_table);
 +
 +      PP_ASSERT_WITH_CODE((result == 0),
 +                          "check_powerplay_tables failed", return result);
 +
 +      set_hw_cap(
 +                      hwmgr,
 +                      0 != (le32_to_cpu(powerplay_table->ulPlatformCaps) & ATOM_VEGA10_PP_PLATFORM_CAP_BACO),
 +                      PHM_PlatformCaps_BACO);
 +      return result;
 +}
 +
index d2eeb624048478b24433295562d63907a7e96b51,d5d317f17c2894e0df6b2e96d1bf3975bf7704d8..463275f88e89e443540969c74a45dd7979671f0b
   * OTHER DEALINGS IN THE SOFTWARE.
   */
  
- #include "pp_debug.h"
  #include <linux/firmware.h>
+ #include <linux/module.h>
+ #include "pp_debug.h"
  #include "amdgpu.h"
  #include "amdgpu_smu.h"
  #include "atomfirmware.h"
@@@ -223,27 -225,20 +225,27 @@@ static int smu_v11_0_check_fw_status(st
  
  static int smu_v11_0_check_fw_version(struct smu_context *smu)
  {
 -      uint32_t smu_version = 0xff;
 +      uint32_t if_version = 0xff, smu_version = 0xff;
 +      uint16_t smu_major;
 +      uint8_t smu_minor, smu_debug;
        int ret = 0;
  
 -      ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
 +      ret = smu_get_smc_version(smu, &if_version, &smu_version);
        if (ret)
 -              goto err;
 +              return ret;
  
 -      ret = smu_read_smc_arg(smu, &smu_version);
 -      if (ret)
 -              goto err;
 +      smu_major = (smu_version >> 16) & 0xffff;
 +      smu_minor = (smu_version >> 8) & 0xff;
 +      smu_debug = (smu_version >> 0) & 0xff;
 +
 +      pr_info("SMU Driver IF Version = 0x%08x, SMU FW Version = 0x%08x (%d.%d.%d)\n",
 +              if_version, smu_version, smu_major, smu_minor, smu_debug);
  
 -      if (smu_version != smu->smc_if_version)
 +      if (if_version != smu->smc_if_version) {
 +              pr_err("SMU driver if version not matched\n");
                ret = -EINVAL;
 -err:
 +      }
 +
        return ret;
  }
  
@@@ -360,8 -355,6 +362,8 @@@ static int smu_v11_0_init_power(struct 
  {
        struct smu_power_context *smu_power = &smu->smu_power;
  
 +      if (!smu->pm_enabled)
 +              return 0;
        if (smu_power->power_context || smu_power->power_context_size != 0)
                return -EINVAL;
  
                return -ENOMEM;
        smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
  
 +      smu->metrics_time = 0;
 +      smu->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
 +      if (!smu->metrics_table) {
 +              kfree(smu_power->power_context);
 +              return -ENOMEM;
 +      }
 +
        return 0;
  }
  
@@@ -385,14 -371,10 +387,14 @@@ static int smu_v11_0_fini_power(struct 
  {
        struct smu_power_context *smu_power = &smu->smu_power;
  
 +      if (!smu->pm_enabled)
 +              return 0;
        if (!smu_power->power_context || smu_power->power_context_size == 0)
                return -EINVAL;
  
 +      kfree(smu->metrics_table);
        kfree(smu_power->power_context);
 +      smu->metrics_table = NULL;
        smu_power->power_context = NULL;
        smu_power->power_context_size = 0;
  
@@@ -654,8 -636,6 +656,8 @@@ static int smu_v11_0_set_min_dcef_deep_
  {
        struct smu_table_context *table_context = &smu->smu_table;
  
 +      if (!smu->pm_enabled)
 +              return 0;
        if (!table_context)
                return -EINVAL;
  
@@@ -684,9 -664,6 +686,9 @@@ static int smu_v11_0_set_tool_table_loc
  static int smu_v11_0_init_display(struct smu_context *smu)
  {
        int ret = 0;
 +
 +      if (!smu->pm_enabled)
 +              return ret;
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
        return ret;
  }
@@@ -696,8 -673,6 +698,8 @@@ static int smu_v11_0_update_feature_ena
        uint32_t feature_low = 0, feature_high = 0;
        int ret = 0;
  
 +      if (!smu->pm_enabled)
 +              return ret;
        if (feature_id >= 0 && feature_id < 31)
                feature_low = (1 << feature_id);
        else if (feature_id > 31 && feature_id < 63)
@@@ -804,13 -779,10 +806,13 @@@ static int smu_v11_0_system_features_co
        uint32_t feature_mask[2];
        int ret = 0;
  
 -      ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
 -                                   SMU_MSG_DisableAllSmuFeatures));
 -      if (ret)
 -              return ret;
 +      if (smu->pm_enabled) {
 +              ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
 +                                           SMU_MSG_DisableAllSmuFeatures));
 +              if (ret)
 +                      return ret;
 +      }
 +
        ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
        if (ret)
                return ret;
@@@ -827,8 -799,6 +829,8 @@@ static int smu_v11_0_notify_display_cha
  {
        int ret = 0;
  
 +      if (!smu->pm_enabled)
 +              return ret;
        if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
            ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
  
@@@ -841,8 -811,6 +843,8 @@@ smu_v11_0_get_max_sustainable_clock(str
  {
        int ret = 0;
  
 +      if (!smu->pm_enabled)
 +              return ret;
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
                                          clock_select << 16);
        if (ret) {
@@@ -1029,20 -997,9 +1031,20 @@@ static int smu_v11_0_get_current_clk_fr
  static int smu_v11_0_get_thermal_range(struct smu_context *smu,
                                struct PP_TemperatureRange *range)
  {
 +      PPTable_t *pptable = smu->smu_table.driver_pptable;
        memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
  
 -      range->max = smu->smu_table.software_shutdown_temp *
 +      range->max = pptable->TedgeLimit *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      range->hotspot_crit_max = pptable->ThotspotLimit *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      range->mem_crit_max = pptable->ThbmLimit *
 +              PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
                PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
  
        return 0;
@@@ -1107,20 -1064,9 +1109,20 @@@ static int smu_v11_0_set_thermal_fan_ta
  static int smu_v11_0_start_thermal_control(struct smu_context *smu)
  {
        int ret = 0;
 -      struct PP_TemperatureRange range;
 +      struct PP_TemperatureRange range = {
 +              TEMP_RANGE_MIN,
 +              TEMP_RANGE_MAX,
 +              TEMP_RANGE_MAX,
 +              TEMP_RANGE_MIN,
 +              TEMP_RANGE_MAX,
 +              TEMP_RANGE_MAX,
 +              TEMP_RANGE_MIN,
 +              TEMP_RANGE_MAX,
 +              TEMP_RANGE_MAX};
        struct amdgpu_device *adev = smu->adev;
  
 +      if (!smu->pm_enabled)
 +              return ret;
        smu_v11_0_get_thermal_range(smu, &range);
  
        if (smu->smu_table.thermal_controller_type) {
  
        adev->pm.dpm.thermal.min_temp = range.min;
        adev->pm.dpm.thermal.max_temp = range.max;
 +      adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
 +      adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
 +      adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
 +      adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
 +      adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
 +      adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
 +      adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
 +
 +      return ret;
 +}
 +
 +static int smu_v11_0_get_metrics_table(struct smu_context *smu,
 +              SmuMetrics_t *metrics_table)
 +{
 +      int ret = 0;
 +
 +      if (!smu->metrics_time || time_after(jiffies, smu->metrics_time + HZ / 1000)) {
 +              ret = smu_update_table(smu, TABLE_SMU_METRICS,
 +                              (void *)metrics_table, false);
 +              if (ret) {
 +                      pr_info("Failed to export SMU metrics table!\n");
 +                      return ret;
 +              }
 +              memcpy(smu->metrics_table, metrics_table, sizeof(SmuMetrics_t));
 +              smu->metrics_time = jiffies;
 +      } else
 +              memcpy(metrics_table, smu->metrics_table, sizeof(SmuMetrics_t));
  
        return ret;
  }
  
  static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
 +                                                enum amd_pp_sensors sensor,
                                                  uint32_t *value)
  {
        int ret = 0;
        if (!value)
                return -EINVAL;
  
 -      ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
 +      ret = smu_v11_0_get_metrics_table(smu, &metrics);
        if (ret)
                return ret;
  
 -      *value = metrics.AverageGfxActivity;
 +      switch (sensor) {
 +      case AMDGPU_PP_SENSOR_GPU_LOAD:
 +              *value = metrics.AverageGfxActivity;
 +              break;
 +      case AMDGPU_PP_SENSOR_MEM_LOAD:
 +              *value = metrics.AverageUclkActivity;
 +              break;
 +      default:
 +              pr_err("Invalid sensor for retrieving clock activity\n");
 +              return -EINVAL;
 +      }
  
        return 0;
  }
  
 -static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
 +static int smu_v11_0_thermal_get_temperature(struct smu_context *smu,
 +                                           enum amd_pp_sensors sensor,
 +                                           uint32_t *value)
  {
        struct amdgpu_device *adev = smu->adev;
 +      SmuMetrics_t metrics;
        uint32_t temp = 0;
 +      int ret = 0;
  
        if (!value)
                return -EINVAL;
  
 -      temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
 -      temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
 -                      CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
 +      ret = smu_v11_0_get_metrics_table(smu, &metrics);
 +      if (ret)
 +              return ret;
  
 -      temp = temp & 0x1ff;
 -      temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +      switch (sensor) {
 +      case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
 +              temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
 +              temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
 +                              CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
  
 -      *value = temp;
 +              temp = temp & 0x1ff;
 +              temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +
 +              *value = temp;
 +              break;
 +      case AMDGPU_PP_SENSOR_EDGE_TEMP:
 +              *value = metrics.TemperatureEdge *
 +                      PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +              break;
 +      case AMDGPU_PP_SENSOR_MEM_TEMP:
 +              *value = metrics.TemperatureHBM *
 +                      PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
 +              break;
 +      default:
 +              pr_err("Invalid sensor for retrieving temp\n");
 +              return -EINVAL;
 +      }
  
        return 0;
  }
@@@ -1249,7 -1134,7 +1251,7 @@@ static int smu_v11_0_get_gpu_power(stru
        if (!value)
                return -EINVAL;
  
 -      ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
 +      ret = smu_v11_0_get_metrics_table(smu, &metrics);
        if (ret)
                return ret;
  
@@@ -1291,9 -1176,7 +1293,9 @@@ static int smu_v11_0_read_sensor(struc
        int ret = 0;
        switch (sensor) {
        case AMDGPU_PP_SENSOR_GPU_LOAD:
 +      case AMDGPU_PP_SENSOR_MEM_LOAD:
                ret = smu_v11_0_get_current_activity_percent(smu,
 +                                                           sensor,
                                                             (uint32_t *)data);
                *size = 4;
                break;
                ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
                *size = 4;
                break;
 -      case AMDGPU_PP_SENSOR_GPU_TEMP:
 -              ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
 +      case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
 +      case AMDGPU_PP_SENSOR_EDGE_TEMP:
 +      case AMDGPU_PP_SENSOR_MEM_TEMP:
 +              ret = smu_v11_0_thermal_get_temperature(smu, sensor, (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GPU_POWER:
@@@ -1356,8 -1237,6 +1358,8 @@@ smu_v11_0_display_clock_voltage_request
        PPCLK_e clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
  
 +      if (!smu->pm_enabled)
 +              return -EINVAL;
        if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
                switch (clk_type) {
                case amd_pp_dcef_clock:
@@@ -1641,7 -1520,7 +1643,7 @@@ static int smu_v11_0_get_power_profile_
                        "PD_Data_error_rate_coeff"};
        int result = 0;
  
 -      if (!buf)
 +      if (!smu->pm_enabled || !buf)
                return -EINVAL;
  
        size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@@ -1728,8 -1607,6 +1730,8 @@@ static int smu_v11_0_set_power_profile_
  
        smu->power_profile_mode = input[size];
  
 +      if (!smu->pm_enabled)
 +              return ret;
        if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
                pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
                return -EINVAL;
@@@ -1835,24 -1712,24 +1837,24 @@@ static int smu_v11_0_update_od8_setting
  
  static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
  {
 -      if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
 +      if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
                return 0;
  
 -      if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
 +      if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
                return 0;
  
 -      return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
 +      return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
  }
  
  static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
  {
 -      if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
 +      if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
                return 0;
  
 -      if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
 +      if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
                return 0;
  
 -      return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
 +      return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
  }
  
  static int smu_v11_0_get_current_rpm(struct smu_context *smu,
index 9ef57fcf7e786eb39382174daadd58c88ca4de6a,46858b901d058931de9bbf2751637eadd3c4b361..7184d39dcbee9f3333064c7c89af5252c2b39603
@@@ -25,6 -25,7 +25,7 @@@
  #include <linux/fb.h>
  #include "linux/delay.h"
  #include <linux/types.h>
+ #include <linux/pci.h>
  
  #include "smumgr.h"
  #include "pp_debug.h"
@@@ -2254,8 -2255,6 +2255,8 @@@ static uint32_t ci_get_offsetof(uint32_
                        return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
                case AverageGraphicsActivity:
                        return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
 +              case AverageMemoryActivity:
 +                      return offsetof(SMU7_SoftRegisters, AverageMemoryA);
                case PreVBlankGap:
                        return offsetof(SMU7_SoftRegisters, PreVBlankGap);
                case VBlankTimeout:
index f24f13d77808568d8fb99b9cffb241405eae669f,0eee7b2f8b9a9c9eac9b5265acc0b224e51600a9..73091ac0b6476a6c70672922f96d3437e7b6d01e
@@@ -25,6 -25,7 +25,7 @@@
  #include "pp_debug.h"
  #include <linux/types.h>
  #include <linux/kernel.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  #include <linux/gfp.h>
  
@@@ -2219,8 -2220,6 +2220,8 @@@ static uint32_t iceland_get_offsetof(ui
                        return offsetof(SMU71_SoftRegisters, VoltageChangeTimeout);
                case AverageGraphicsActivity:
                        return offsetof(SMU71_SoftRegisters, AverageGraphicsActivity);
 +              case AverageMemoryActivity:
 +                      return offsetof(SMU71_SoftRegisters, AverageMemoryActivity);
                case PreVBlankGap:
                        return offsetof(SMU71_SoftRegisters, PreVBlankGap);
                case VBlankTimeout:
index 0d8958e71b94f8ef52afee87c7833f6136d3c0ce,477c1c870591c214a05d283ef995b0409bef3084..d6052e6daef2ac15738a35c20f2f4497682579f4
@@@ -21,6 -21,8 +21,8 @@@
   *
   */
  
+ #include <linux/pci.h>
  #include "pp_debug.h"
  #include "smumgr.h"
  #include "smu74.h"
@@@ -2313,8 -2315,6 +2315,8 @@@ static uint32_t polaris10_get_offsetof(
                        return offsetof(SMU74_SoftRegisters, VoltageChangeTimeout);
                case AverageGraphicsActivity:
                        return offsetof(SMU74_SoftRegisters, AverageGraphicsActivity);
 +              case AverageMemoryActivity:
 +                      return offsetof(SMU74_SoftRegisters, AverageMemoryActivity);
                case PreVBlankGap:
                        return offsetof(SMU74_SoftRegisters, PreVBlankGap);
                case VBlankTimeout:
index 060c0f7f523852991ea809b1d9a3a447c62db706,8f6f2808094aef5bc916d2d86cf1ba67cb7de7fa..e4e976b9d64ead26b4b13d42f8486b044f69e962
@@@ -23,6 -23,7 +23,7 @@@
  #include "pp_debug.h"
  #include <linux/types.h>
  #include <linux/kernel.h>
+ #include <linux/pci.h>
  #include <linux/slab.h>
  #include <linux/gfp.h>
  
@@@ -2611,8 -2612,6 +2612,8 @@@ static uint32_t tonga_get_offsetof(uint
                        return offsetof(SMU72_SoftRegisters, VoltageChangeTimeout);
                case AverageGraphicsActivity:
                        return offsetof(SMU72_SoftRegisters, AverageGraphicsActivity);
 +              case AverageMemoryActivity:
 +                      return offsetof(SMU72_SoftRegisters, AverageMemoryActivity);
                case PreVBlankGap:
                        return offsetof(SMU72_SoftRegisters, PreVBlankGap);
                case VBlankTimeout:
index 7411e69e2712c9a670f4f2bb2f58f9979d850137,d061e638f12ce477657a5fcc17975f393eb47719..d8bc5d2dfd618690ad34629bbf46c8f1bb293498
   *          Alex Deucher
   *          Jerome Glisse
   */
- #include <drm/drmP.h>
+ #include <drm/drm_debugfs.h>
+ #include <drm/drm_device.h>
+ #include <drm/drm_file.h>
+ #include <drm/drm_pci.h>
  #include <drm/radeon_drm.h>
  #include "radeon.h"
  
  void radeon_gem_object_free(struct drm_gem_object *gobj)
@@@ -559,7 -564,7 +564,7 @@@ static void radeon_gem_va_update_vm(str
        if (!vm_bos)
                return;
  
 -      r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
 +      r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
        if (r)
                goto error_free;
  
index 36683de0300baf39d5104f8f27d7385b0316d0b7,1741eb7abb8eed1e78d3b37cb3075cfbb8d1cec7..21f73fc86f3888ac632964f540693d29612fd5b3
   *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
   *    Dave Airlie
   */
+ #include <linux/io.h>
  #include <linux/list.h>
  #include <linux/slab.h>
- #include <drm/drmP.h>
- #include <drm/radeon_drm.h>
  #include <drm/drm_cache.h>
+ #include <drm/drm_prime.h>
+ #include <drm/radeon_drm.h>
  #include "radeon.h"
  #include "radeon_trace.h"
  
  int radeon_ttm_init(struct radeon_device *rdev);
  void radeon_ttm_fini(struct radeon_device *rdev);
  static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
@@@ -539,7 -542,7 +542,7 @@@ int radeon_bo_list_validate(struct rade
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
  
        INIT_LIST_HEAD(&duplicates);
 -      r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
 +      r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
        if (unlikely(r != 0)) {
                return r;
        }
index fd9e0d36154f6f54d1f277c3a641a9064e545ddf,5b81ba2a7f272b1368a23534813a929f11386a28..f2a5d4d997073ac46f0978645f65ae5e864bfbb4
@@@ -63,12 -63,10 +63,12 @@@ struct rcar_lvds 
                struct clk *extal;              /* External clock */
                struct clk *dotclkin[2];        /* External DU clocks */
        } clocks;
 -      bool enabled;
  
        struct drm_display_mode display_mode;
        enum rcar_lvds_mode mode;
 +
 +      struct drm_bridge *companion;
 +      bool dual_link;
  };
  
  #define bridge_to_rcar_lvds(bridge) \
@@@ -94,13 -92,15 +94,15 @@@ static int rcar_lvds_connector_get_mode
  }
  
  static int rcar_lvds_connector_atomic_check(struct drm_connector *connector,
-                                           struct drm_connector_state *state)
+                                           struct drm_atomic_state *state)
  {
        struct rcar_lvds *lvds = connector_to_rcar_lvds(connector);
        const struct drm_display_mode *panel_mode;
+       struct drm_connector_state *conn_state;
        struct drm_crtc_state *crtc_state;
  
-       if (!state->crtc)
+       conn_state = drm_atomic_get_new_connector_state(state, connector);
+       if (!conn_state->crtc)
                return 0;
  
        if (list_empty(&connector->modes)) {
                                      struct drm_display_mode, head);
  
        /* We're not allowed to modify the resolution. */
-       crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
-       if (IS_ERR(crtc_state))
-               return PTR_ERR(crtc_state);
+       crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
+       if (!crtc_state)
+               return -EINVAL;
  
        if (crtc_state->mode.hdisplay != panel_mode->hdisplay ||
            crtc_state->mode.vdisplay != panel_mode->vdisplay)
@@@ -370,12 -370,15 +372,12 @@@ int rcar_lvds_clk_enable(struct drm_bri
  
        dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
  
 -      WARN_ON(lvds->enabled);
 -
        ret = clk_prepare_enable(lvds->clocks.mod);
        if (ret < 0)
                return ret;
  
        __rcar_lvds_pll_setup_d3_e3(lvds, freq, true);
  
 -      lvds->enabled = true;
        return 0;
  }
  EXPORT_SYMBOL_GPL(rcar_lvds_clk_enable);
@@@ -389,9 -392,13 +391,9 @@@ void rcar_lvds_clk_disable(struct drm_b
  
        dev_dbg(lvds->dev, "disabling LVDS PLL\n");
  
 -      WARN_ON(!lvds->enabled);
 -
        rcar_lvds_write(lvds, LVDPLLCR, 0);
  
        clk_disable_unprepare(lvds->clocks.mod);
 -
 -      lvds->enabled = false;
  }
  EXPORT_SYMBOL_GPL(rcar_lvds_clk_disable);
  
@@@ -403,18 -410,21 +405,18 @@@ static void rcar_lvds_enable(struct drm
  {
        struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
        const struct drm_display_mode *mode = &lvds->display_mode;
 -      /*
 -       * FIXME: We should really retrieve the CRTC through the state, but how
 -       * do we get a state pointer?
 -       */
 -      struct drm_crtc *crtc = lvds->bridge.encoder->crtc;
        u32 lvdhcr;
        u32 lvdcr0;
        int ret;
  
 -      WARN_ON(lvds->enabled);
 -
        ret = clk_prepare_enable(lvds->clocks.mod);
        if (ret < 0)
                return;
  
 +      /* Enable the companion LVDS encoder in dual-link mode. */
 +      if (lvds->dual_link && lvds->companion)
 +              lvds->companion->funcs->enable(lvds->companion);
 +
        /*
         * Hardcode the channels and control signals routing for now.
         *
        rcar_lvds_write(lvds, LVDCHCR, lvdhcr);
  
        if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK) {
 -              /* Disable dual-link mode. */
 -              rcar_lvds_write(lvds, LVDSTRIPE, 0);
 +              /*
 +               * Configure vertical stripe based on the mode of operation of
 +               * the connected device.
 +               */
 +              rcar_lvds_write(lvds, LVDSTRIPE,
 +                              lvds->dual_link ? LVDSTRIPE_ST_ON : 0);
        }
  
 -      /* PLL clock configuration. */
 -      lvds->info->pll_setup(lvds, mode->clock * 1000);
 +      /*
 +       * PLL clock configuration on all instances but the companion in
 +       * dual-link mode.
 +       */
 +      if (!lvds->dual_link || lvds->companion)
 +              lvds->info->pll_setup(lvds, mode->clock * 1000);
  
        /* Set the LVDS mode and select the input. */
        lvdcr0 = lvds->mode << LVDCR0_LVMD_SHIFT;
 -      if (drm_crtc_index(crtc) == 2)
 -              lvdcr0 |= LVDCR0_DUSEL;
 +
 +      if (lvds->bridge.encoder) {
 +              /*
 +               * FIXME: We should really retrieve the CRTC through the state,
 +               * but how do we get a state pointer?
 +               */
 +              if (drm_crtc_index(lvds->bridge.encoder->crtc) == 2)
 +                      lvdcr0 |= LVDCR0_DUSEL;
 +      }
 +
        rcar_lvds_write(lvds, LVDCR0, lvdcr0);
  
        /* Turn all the channels on. */
                drm_panel_prepare(lvds->panel);
                drm_panel_enable(lvds->panel);
        }
 -
 -      lvds->enabled = true;
  }
  
  static void rcar_lvds_disable(struct drm_bridge *bridge)
  {
        struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
  
 -      WARN_ON(!lvds->enabled);
 -
        if (lvds->panel) {
                drm_panel_disable(lvds->panel);
                drm_panel_unprepare(lvds->panel);
        rcar_lvds_write(lvds, LVDCR1, 0);
        rcar_lvds_write(lvds, LVDPLLCR, 0);
  
 -      clk_disable_unprepare(lvds->clocks.mod);
 +      /* Disable the companion LVDS encoder in dual-link mode. */
 +      if (lvds->dual_link && lvds->companion)
 +              lvds->companion->funcs->disable(lvds->companion);
  
 -      lvds->enabled = false;
 +      clk_disable_unprepare(lvds->clocks.mod);
  }
  
  static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
@@@ -598,6 -594,8 +600,6 @@@ static void rcar_lvds_mode_set(struct d
  {
        struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
  
 -      WARN_ON(lvds->enabled);
 -
        lvds->display_mode = *adjusted_mode;
  
        rcar_lvds_get_lvds_mode(lvds);
@@@ -650,57 -648,10 +652,57 @@@ static const struct drm_bridge_funcs rc
        .mode_set = rcar_lvds_mode_set,
  };
  
 +bool rcar_lvds_dual_link(struct drm_bridge *bridge)
 +{
 +      struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
 +
 +      return lvds->dual_link;
 +}
 +EXPORT_SYMBOL_GPL(rcar_lvds_dual_link);
 +
  /* -----------------------------------------------------------------------------
   * Probe & Remove
   */
  
 +static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
 +{
 +      const struct of_device_id *match;
 +      struct device_node *companion;
 +      struct device *dev = lvds->dev;
 +      int ret = 0;
 +
 +      /* Locate the companion LVDS encoder for dual-link operation, if any. */
 +      companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
 +      if (!companion) {
 +              dev_err(dev, "Companion LVDS encoder not found\n");
 +              return -ENXIO;
 +      }
 +
 +      /*
 +       * Sanity check: the companion encoder must have the same compatible
 +       * string.
 +       */
 +      match = of_match_device(dev->driver->of_match_table, dev);
 +      if (!of_device_is_compatible(companion, match->compatible)) {
 +              dev_err(dev, "Companion LVDS encoder is invalid\n");
 +              ret = -ENXIO;
 +              goto done;
 +      }
 +
 +      lvds->companion = of_drm_find_bridge(companion);
 +      if (!lvds->companion) {
 +              ret = -EPROBE_DEFER;
 +              goto done;
 +      }
 +
 +      dev_dbg(dev, "Found companion encoder %pOF\n", companion);
 +
 +done:
 +      of_node_put(companion);
 +
 +      return ret;
 +}
 +
  static int rcar_lvds_parse_dt(struct rcar_lvds *lvds)
  {
        struct device_node *local_output = NULL;
  
        if (is_bridge) {
                lvds->next_bridge = of_drm_find_bridge(remote);
 -              if (!lvds->next_bridge)
 +              if (!lvds->next_bridge) {
                        ret = -EPROBE_DEFER;
 +                      goto done;
 +              }
 +
 +              if (lvds->info->quirks & RCAR_LVDS_QUIRK_DUAL_LINK)
 +                      lvds->dual_link = lvds->next_bridge->timings
 +                                      ? lvds->next_bridge->timings->dual_link
 +                                      : false;
        } else {
                lvds->panel = of_drm_find_panel(remote);
 -              if (IS_ERR(lvds->panel))
 +              if (IS_ERR(lvds->panel)) {
                        ret = PTR_ERR(lvds->panel);
 +                      goto done;
 +              }
        }
  
 +      if (lvds->dual_link)
 +              ret = rcar_lvds_parse_dt_companion(lvds);
 +
  done:
        of_node_put(local_output);
        of_node_put(remote_input);
@@@ -856,6 -795,7 +858,6 @@@ static int rcar_lvds_probe(struct platf
  
        lvds->dev = &pdev->dev;
        lvds->info = of_device_get_match_data(&pdev->dev);
 -      lvds->enabled = false;
  
        ret = rcar_lvds_parse_dt(lvds);
        if (ret < 0)
@@@ -928,7 -868,6 +930,7 @@@ static const struct rcar_lvds_device_in
  static const struct of_device_id rcar_lvds_of_table[] = {
        { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info },
        { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info },
 +      { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info },
        { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info },
        { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info },
        { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info },
index fe0bd6274db612ed6200b351b9d7d632f56c664f,c0ba1ead740f48ebeccffdd4c8179a2534d33835..ac60be9b5c1904a0fe82d3b103803d8e609fcef2
@@@ -63,7 -63,7 +63,7 @@@ int virtio_gpu_object_list_validate(str
        struct virtio_gpu_object *qobj;
        int ret;
  
 -      ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
 +      ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
        if (ret != 0)
                return ret;
  
@@@ -168,7 -168,7 +168,7 @@@ static int virtio_gpu_execbuffer_ioctl(
                        goto out_unused_fd;
                }
  
-               user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
+               user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
                if (copy_from_user(bo_handles, user_bo_handles,
                                   exbuf->num_bo_handles * sizeof(uint32_t))) {
                        ret = -EFAULT;
        if (ret)
                goto out_free;
  
-       buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
-                         exbuf->size);
+       buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
        if (IS_ERR(buf)) {
                ret = PTR_ERR(buf);
                goto out_unresv;
@@@ -263,10 -262,9 +262,9 @@@ static int virtio_gpu_getparam_ioctl(st
        default:
                return -EINVAL;
        }
-       if (copy_to_user((void __user *)(unsigned long)param->value,
-                        &value, sizeof(int))) {
+       if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
                return -EFAULT;
-       }
        return 0;
  }
  
@@@ -526,7 -524,6 +524,6 @@@ static int virtio_gpu_get_caps_ioctl(st
        list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
                if (cache_ent->id == args->cap_set_id &&
                    cache_ent->version == args->cap_set_ver) {
-                       ptr = cache_ent->caps_cache;
                        spin_unlock(&vgdev->display_info_lock);
                        goto copy_exit;
                }
        virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
                                  &cache_ent);
  
+ copy_exit:
        ret = wait_event_timeout(vgdev->resp_wq,
                                 atomic_read(&cache_ent->is_valid), 5 * HZ);
        if (!ret)
                return -EBUSY;
  
+       /* is_valid check must proceed before copy of the cache entry. */
+       smp_rmb();
        ptr = cache_ent->caps_cache;
  
- copy_exit:
-       if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
+       if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
                return -EFAULT;
  
        return 0;
diff --combined include/drm/drm_bridge.h
index aea1fcfd92a7da30cb4a5c941936241cd45c5d91,3228018848141948fb91e109fabf831d9cbad610..7616f6562fe488e47312e0b7e77da5977cffd902
@@@ -237,6 -237,103 +237,103 @@@ struct drm_bridge_funcs 
         * The enable callback is optional.
         */
        void (*enable)(struct drm_bridge *bridge);
+       /**
+        * @atomic_pre_enable:
+        *
+        * This callback should enable the bridge. It is called right before
+        * the preceding element in the display pipe is enabled. If the
+        * preceding element is a bridge this means it's called before that
+        * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
+        * element is a &drm_encoder it's called right before the encoder's
+        * &drm_encoder_helper_funcs.atomic_enable hook.
+        *
+        * The display pipe (i.e. clocks and timing signals) feeding this bridge
+        * will not yet be running when this callback is called. The bridge must
+        * not enable the display link feeding the next bridge in the chain (if
+        * there is one) when this callback is called.
+        *
+        * Note that this function will only be invoked in the context of an
+        * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
+        * would be prudent to also provide an implementation of @pre_enable if
+        * you are expecting driver calls into &drm_bridge_pre_enable.
+        *
+        * The @atomic_pre_enable callback is optional.
+        */
+       void (*atomic_pre_enable)(struct drm_bridge *bridge,
+                                 struct drm_atomic_state *state);
+       /**
+        * @atomic_enable:
+        *
+        * This callback should enable the bridge. It is called right after
+        * the preceding element in the display pipe is enabled. If the
+        * preceding element is a bridge this means it's called after that
+        * bridge's @atomic_enable or @enable function. If the preceding element
+        * is a &drm_encoder it's called right after the encoder's
+        * &drm_encoder_helper_funcs.atomic_enable hook.
+        *
+        * The bridge can assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is running when this callback is called. This
+        * callback must enable the display link feeding the next bridge in the
+        * chain if there is one.
+        *
+        * Note that this function will only be invoked in the context of an
+        * atomic commit. It will not be invoked from &drm_bridge_enable. It
+        * would be prudent to also provide an implementation of @enable if
+        * you are expecting driver calls into &drm_bridge_enable.
+        *
+        * The enable callback is optional.
+        */
+       void (*atomic_enable)(struct drm_bridge *bridge,
+                             struct drm_atomic_state *state);
+       /**
+        * @atomic_disable:
+        *
+        * This callback should disable the bridge. It is called right before
+        * the preceding element in the display pipe is disabled. If the
+        * preceding element is a bridge this means it's called before that
+        * bridge's @atomic_disable or @disable vfunc. If the preceding element
+        * is a &drm_encoder it's called right before the
+        * &drm_encoder_helper_funcs.atomic_disable hook.
+        *
+        * The bridge can assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is still running when this callback is called.
+        *
+        * Note that this function will only be invoked in the context of an
+        * atomic commit. It will not be invoked from &drm_bridge_disable. It
+        * would be prudent to also provide an implementation of @disable if
+        * you are expecting driver calls into &drm_bridge_disable.
+        *
+        * The disable callback is optional.
+        */
+       void (*atomic_disable)(struct drm_bridge *bridge,
+                              struct drm_atomic_state *state);
+       /**
+        * @atomic_post_disable:
+        *
+        * This callback should disable the bridge. It is called right after the
+        * preceding element in the display pipe is disabled. If the preceding
+        * element is a bridge this means it's called after that bridge's
+        * @atomic_post_disable or @post_disable function. If the preceding
+        * element is a &drm_encoder it's called right after the encoder's
+        * &drm_encoder_helper_funcs.atomic_disable hook.
+        *
+        * The bridge must assume that the display pipe (i.e. clocks and timing
+        * signals) feeding it is no longer running when this callback is
+        * called.
+        *
+        * Note that this function will only be invoked in the context of an
+        * atomic commit. It will not be invoked from &drm_bridge_post_disable.
+        * It would be prudent to also provide an implementation of
+        * @post_disable if you are expecting driver calls into
+        * &drm_bridge_post_disable.
+        *
+        * The post_disable callback is optional.
+        */
+       void (*atomic_post_disable)(struct drm_bridge *bridge,
+                                   struct drm_atomic_state *state);
  };
  
  /**
@@@ -265,14 -362,6 +362,14 @@@ struct drm_bridge_timings 
         * input signal after the clock edge.
         */
        u32 hold_time_ps;
 +      /**
 +       * @dual_link:
 +       *
 +       * True if the bus operates in dual-link mode. The exact meaning is
 +       * dependent on the bus type. For LVDS buses, this indicates that even-
 +       * and odd-numbered pixels are received on separate links.
 +       */
 +      bool dual_link;
  };
  
  /**
@@@ -322,6 -411,15 +419,15 @@@ void drm_bridge_mode_set(struct drm_bri
  void drm_bridge_pre_enable(struct drm_bridge *bridge);
  void drm_bridge_enable(struct drm_bridge *bridge);
  
+ void drm_atomic_bridge_disable(struct drm_bridge *bridge,
+                              struct drm_atomic_state *state);
+ void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+                                   struct drm_atomic_state *state);
+ void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
+                                 struct drm_atomic_state *state);
+ void drm_atomic_bridge_enable(struct drm_bridge *bridge,
+                             struct drm_atomic_state *state);
  #ifdef CONFIG_DRM_PANEL_BRIDGE
  struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
                                        u32 connector_type);
This page took 0.619806 seconds and 4 git commands to generate.