]> Git Repo - J-linux.git/commitdiff
Merge tag 'amd-drm-next-6.3-2023-01-06' of https://gitlab.freedesktop.org/agd5f/linux...
authorDave Airlie <[email protected]>
Mon, 16 Jan 2023 04:00:12 +0000 (14:00 +1000)
committerDave Airlie <[email protected]>
Mon, 16 Jan 2023 04:00:12 +0000 (14:00 +1000)
amd-drm-next-6.3-2023-01-06:

amdgpu:
- secure display support for multiple displays
- DML optimizations
- DCN 3.2 updates
- PSR updates
- DP 2.1 updates
- SR-IOV RAS updates
- VCN RAS support
- SMU 13.x updates
- Switch 1 element arrays to flexible arrays
- Add RAS support for DF 4.3
- Stack size improvements
- S0ix rework
- Soft reset fix
- Allow 0 as a vram limit on APUs
- Display fixes
- Misc code cleanups
- Documentation fixes
- Handle profiling modes for SMU13.x

amdkfd:
- Error handling fixes
- PASID fixes

radeon:
- Switch 1 element arrays to flexible arrays

drm:
- Add DP adaptive sync DPCD definitions

UAPI:
- Add new INFO queries for peak and min sclk/mclk for profile modes on newer chips
  Proposed mesa patch: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/278

Signed-off-by: Dave Airlie <[email protected]>
From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c

index 2644cd9912109f5ec92fd3aaf5eecdec2e8c53fa,872450a3a164c17b451493f095327abdcf386799..4e4efd10cb895d0e6e6b8723e8d4672db1f754d9
@@@ -52,7 -52,8 +52,7 @@@
  #include <linux/pci.h>
  #include <linux/aer.h>
  
 -#include <drm/ttm/ttm_bo_api.h>
 -#include <drm/ttm/ttm_bo_driver.h>
 +#include <drm/ttm/ttm_bo.h>
  #include <drm/ttm/ttm_placement.h>
  #include <drm/ttm/ttm_execbuf_util.h>
  
@@@ -149,7 -150,7 +149,7 @@@ struct amdgpu_watchdog_time
   * Modules parameters.
   */
  extern int amdgpu_modeset;
- extern int amdgpu_vram_limit;
+ extern unsigned int amdgpu_vram_limit;
  extern int amdgpu_vis_vram_limit;
  extern int amdgpu_gart_size;
  extern int amdgpu_gtt_size;
@@@ -194,6 -195,7 +194,7 @@@ extern int amdgpu_emu_mode
  extern uint amdgpu_smu_memory_pool_size;
  extern int amdgpu_smu_pptable_id;
  extern uint amdgpu_dc_feature_mask;
+ extern uint amdgpu_freesync_vid_mode;
  extern uint amdgpu_dc_debug_mask;
  extern uint amdgpu_dc_visual_confirm;
  extern uint amdgpu_dm_abm_level;
@@@ -607,7 -609,7 +608,7 @@@ int amdgpu_cs_wait_fences_ioctl(struct 
                                struct drm_file *filp);
  
  /* VRAM scratch page for HDP bug, default vram page */
- struct amdgpu_vram_scratch {
+ struct amdgpu_mem_scratch {
        struct amdgpu_bo                *robj;
        volatile uint32_t               *ptr;
        u64                             gpu_addr;
@@@ -754,6 -756,11 +755,11 @@@ struct amdgpu_mqd 
  #define AMDGPU_PRODUCT_NAME_LEN 64
  struct amdgpu_reset_domain;
  
+ /*
+  * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
+  */
+ #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
  struct amdgpu_device {
        struct device                   *dev;
        struct pci_dev                  *pdev;
  
        /* memory management */
        struct amdgpu_mman              mman;
-       struct amdgpu_vram_scratch      vram_scratch;
+       struct amdgpu_mem_scratch       mem_scratch;
        struct amdgpu_wb                wb;
        atomic64_t                      num_bytes_moved;
        atomic64_t                      num_evictions;
        struct amdgpu_vkms_output       *amdgpu_vkms_output;
        struct amdgpu_mode_info         mode_info;
        /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
-       struct work_struct              hotplug_work;
+       struct delayed_work         hotplug_work;
        struct amdgpu_irq_src           crtc_irq;
        struct amdgpu_irq_src           vline0_irq;
        struct amdgpu_irq_src           vupdate_irq;
index 076ae400d099490d36e2dd5573e7a3315cb8ba13,585e73f2839e2f4abe96f74cdd95094b2ec7bb07..79ae7a2bce288b37b58597774d83566d74e89d7a
@@@ -924,32 -924,33 +924,33 @@@ static int amdgpu_device_asic_init(stru
  }
  
  /**
-  * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
+  * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
   *
   * @adev: amdgpu_device pointer
   *
   * Allocates a scratch page of VRAM for use by various things in the
   * driver.
   */
- static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
+ static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
  {
-       return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
-                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                      &adev->vram_scratch.robj,
-                                      &adev->vram_scratch.gpu_addr,
-                                      (void **)&adev->vram_scratch.ptr);
+       return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+                                      AMDGPU_GEM_DOMAIN_VRAM |
+                                      AMDGPU_GEM_DOMAIN_GTT,
+                                      &adev->mem_scratch.robj,
+                                      &adev->mem_scratch.gpu_addr,
+                                      (void **)&adev->mem_scratch.ptr);
  }
  
  /**
-  * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
+  * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
   *
   * @adev: amdgpu_device pointer
   *
   * Frees the VRAM scratch page.
   */
- static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
+ static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
  {
-       amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
+       amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
  }
  
  /**
@@@ -2390,9 -2391,9 +2391,9 @@@ static int amdgpu_device_ip_init(struc
                        if (amdgpu_sriov_vf(adev))
                                amdgpu_virt_exchange_data(adev);
  
-                       r = amdgpu_device_vram_scratch_init(adev);
+                       r = amdgpu_device_mem_scratch_init(adev);
                        if (r) {
-                               DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
+                               DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
                                goto init_failed;
                        }
                        r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
                        /* right after GMC hw init, we create CSA */
                        if (amdgpu_mcbp) {
                                r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
-                                                               AMDGPU_GEM_DOMAIN_VRAM,
-                                                               AMDGPU_CSA_SIZE);
+                                                              AMDGPU_GEM_DOMAIN_VRAM |
+                                                              AMDGPU_GEM_DOMAIN_GTT,
+                                                              AMDGPU_CSA_SIZE);
                                if (r) {
                                        DRM_ERROR("allocate CSA failed %d\n", r);
                                        goto init_failed;
@@@ -2581,9 -2583,10 +2583,10 @@@ int amdgpu_device_set_cg_state(struct a
                i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
-               /* skip CG for GFX on S0ix */
+               /* skip CG for GFX, SDMA on S0ix */
                if (adev->in_s0ix &&
-                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
                        continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@@ -2617,9 -2620,10 +2620,10 @@@ int amdgpu_device_set_pg_state(struct a
                i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
-               /* skip PG for GFX on S0ix */
+               /* skip PG for GFX, SDMA on S0ix */
                if (adev->in_s0ix &&
-                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
                        continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@@ -2871,7 -2875,7 +2875,7 @@@ static int amdgpu_device_ip_fini(struc
                        amdgpu_ucode_free_bo(adev);
                        amdgpu_free_static_csa(&adev->virt.csa_obj);
                        amdgpu_device_wb_fini(adev);
-                       amdgpu_device_vram_scratch_fini(adev);
+                       amdgpu_device_mem_scratch_fini(adev);
                        amdgpu_ib_pool_fini(adev);
                }
  
@@@ -3027,6 -3031,12 +3031,12 @@@ static int amdgpu_device_ip_suspend_pha
                     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
                        continue;
  
+               /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+               if (adev->in_s0ix &&
+                   (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
                /* XXX handle errors */
                r = adev->ip_blocks[i].version->funcs->suspend(adev);
                /* XXX handle errors */
@@@ -3227,15 -3237,6 +3237,6 @@@ static int amdgpu_device_ip_resume_phas
                        return r;
                }
                adev->ip_blocks[i].status.hw = true;
-               if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
-                       /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
-                        * amdgpu_device_resume() after IP resume.
-                        */
-                       amdgpu_gfx_off_ctrl(adev, false);
-                       DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
-               }
        }
  
        return 0;
@@@ -3989,8 -3990,10 +3990,8 @@@ void amdgpu_device_fini_hw(struct amdgp
        }
        amdgpu_fence_driver_hw_fini(adev);
  
 -      if (adev->mman.initialized) {
 -              flush_delayed_work(&adev->mman.bdev.wq);
 -              ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 -      }
 +      if (adev->mman.initialized)
 +              drain_workqueue(adev->mman.bdev.wq);
  
        if (adev->pm_sysfs_en)
                amdgpu_pm_sysfs_fini(adev);
@@@ -4221,13 -4224,6 +4222,6 @@@ exit
        /* Make sure IB tests flushed */
        flush_delayed_work(&adev->delayed_init_work);
  
-       if (adev->in_s0ix) {
-               /* re-enable gfxoff after IP resume. This re-enables gfxoff after
-                * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
-                */
-               amdgpu_gfx_off_ctrl(adev, true);
-               DRM_DEBUG("will enable gfxoff for the mission mode\n");
-       }
        if (fbcon)
                drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
  
@@@ -4608,11 -4604,6 +4602,6 @@@ bool amdgpu_device_should_recover_gpu(s
        if (!amdgpu_ras_is_poison_mode_supported(adev))
                return true;
  
-       if (!amdgpu_device_ip_check_soft_reset(adev)) {
-               dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
-               return false;
-       }
        if (amdgpu_sriov_vf(adev))
                return true;
  
@@@ -4737,7 -4728,8 +4726,8 @@@ int amdgpu_device_pre_asic_reset(struc
                if (!need_full_reset)
                        need_full_reset = amdgpu_device_ip_need_full_reset(adev);
  
-               if (!need_full_reset && amdgpu_gpu_recovery) {
+               if (!need_full_reset && amdgpu_gpu_recovery &&
+                   amdgpu_device_ip_check_soft_reset(adev)) {
                        amdgpu_device_ip_pre_soft_reset(adev);
                        r = amdgpu_device_ip_soft_reset(adev);
                        amdgpu_device_ip_post_soft_reset(adev);
index 12871b71b07b38a8f59178b2cd8b50e72544e12f,adfc7512c61b1823fcd46b76a90f48aa0b836550..94f10ac0eef743007f2b02e164d60bbd686b84a4
@@@ -35,7 -35,6 +35,7 @@@
  #include "amdgpu_xgmi.h"
  
  #include <drm/drm_drv.h>
 +#include <drm/ttm/ttm_tt.h>
  
  /**
   * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
@@@ -202,13 -201,20 +202,20 @@@ uint64_t amdgpu_gmc_agp_addr(struct ttm
  void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
                              u64 base)
  {
+       uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
        uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
  
        mc->vram_start = base;
        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-       if (limit && limit < mc->real_vram_size)
+       if (limit < mc->real_vram_size)
                mc->real_vram_size = limit;
  
+       if (vis_limit && vis_limit < mc->visible_vram_size)
+               mc->visible_vram_size = vis_limit;
+       if (mc->real_vram_size < mc->visible_vram_size)
+               mc->visible_vram_size = mc->real_vram_size;
        if (mc->xgmi.num_physical_nodes == 0) {
                mc->fb_start = mc->vram_start;
                mc->fb_end = mc->vram_end;
index 28a7d2ea6661722b4a0ed5de15f7617861a14499,ce34b73d05bcf49925b5271dd98db79c9844b6f6..c5ef7f7bdc15cb4085c5889e038620d8ec26f8a4
  #include <linux/module.h>
  
  #include <drm/drm_drv.h>
 -#include <drm/ttm/ttm_bo_api.h>
 -#include <drm/ttm/ttm_bo_driver.h>
 +#include <drm/ttm/ttm_bo.h>
  #include <drm/ttm/ttm_placement.h>
  #include <drm/ttm/ttm_range_manager.h>
 +#include <drm/ttm/ttm_tt.h>
  
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_drv.h>
@@@ -1679,10 -1679,10 +1679,10 @@@ static int amdgpu_ttm_reserve_tmr(struc
                /* reserve vram for mem train according to TMR location */
                amdgpu_ttm_training_data_block_init(adev);
                ret = amdgpu_bo_create_kernel_at(adev,
-                                        ctx->c2p_train_data_offset,
-                                        ctx->train_data_size,
-                                        &ctx->c2p_bo,
-                                        NULL);
+                                                ctx->c2p_train_data_offset,
+                                                ctx->train_data_size,
+                                                &ctx->c2p_bo,
+                                                NULL);
                if (ret) {
                        DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
                        amdgpu_ttm_training_reserve_vram_fini(adev);
        }
  
        ret = amdgpu_bo_create_kernel_at(adev,
-                               adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
-                               adev->mman.discovery_tmr_size,
-                               &adev->mman.discovery_memory,
-                               NULL);
+                                        adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+                                        adev->mman.discovery_tmr_size,
+                                        &adev->mman.discovery_memory,
+                                        NULL);
        if (ret) {
                DRM_ERROR("alloc tmr failed(%d)!\n", ret);
                amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
@@@ -1718,7 -1718,6 +1718,6 @@@ int amdgpu_ttm_init(struct amdgpu_devic
  {
        uint64_t gtt_size;
        int r;
-       u64 vis_vram_limit;
  
        mutex_init(&adev->mman.gtt_window_lock);
  
                return r;
        }
  
-       /* Reduce size of CPU-visible VRAM if requested */
-       vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
-       if (amdgpu_vis_vram_limit > 0 &&
-           vis_vram_limit <= adev->gmc.visible_vram_size)
-               adev->gmc.visible_vram_size = vis_vram_limit;
        /* Change the size here instead of the init above so only lpfn is affected */
        amdgpu_ttm_set_buffer_funcs_status(adev, false);
  #ifdef CONFIG_64BIT
index 86bc23a67d9731da15d163b11b6d5340d99f38f3,f3fbf104cc65913404dbd48636d37b04968eb4c0..cabe02cb307c134aafcfb1584adc3914a41f3d4d
@@@ -210,7 -210,7 +210,7 @@@ static void amdgpu_dm_destroy_drm_devic
  
  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                                    struct amdgpu_dm_connector *amdgpu_dm_connector,
-                                   uint32_t link_index,
+                                   u32 link_index,
                                    struct amdgpu_encoder *amdgpu_encoder);
  static int amdgpu_dm_encoder_init(struct drm_device *dev,
                                  struct amdgpu_encoder *aencoder,
@@@ -262,7 -262,7 +262,7 @@@ static u32 dm_vblank_get_counter(struc
  static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                                  u32 *vbl, u32 *position)
  {
-       uint32_t v_blank_start, v_blank_end, h_position, v_position;
+       u32 v_blank_start, v_blank_end, h_position, v_position;
  
        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
                return -EINVAL;
@@@ -361,7 -361,7 +361,7 @@@ static void dm_pflip_high_irq(void *int
        struct amdgpu_device *adev = irq_params->adev;
        unsigned long flags;
        struct drm_pending_vblank_event *e;
-       uint32_t vpos, hpos, v_blank_start, v_blank_end;
+       u32 vpos, hpos, v_blank_start, v_blank_end;
        bool vrr_active;
  
        amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@@ -648,7 -648,7 +648,7 @@@ static void dmub_hpd_callback(struct am
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        struct dc_link *link;
-       uint8_t link_index = 0;
+       u8 link_index = 0;
        struct drm_device *dev;
  
        if (adev == NULL)
@@@ -749,7 -749,7 +749,7 @@@ static void dm_dmub_outbox1_low_irq(voi
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_display_manager *dm = &adev->dm;
        struct dmcub_trace_buf_entry entry = { 0 };
-       uint32_t count = 0;
+       u32 count = 0;
        struct dmub_hpd_work *dmub_hpd_wrk;
        struct dc_link *plink = NULL;
  
@@@ -1015,7 -1015,7 +1015,7 @@@ static int dm_dmub_hw_init(struct amdgp
        struct dmub_srv_hw_params hw_params;
        enum dmub_status status;
        const unsigned char *fw_inst_const, *fw_bss_data;
-       uint32_t i, fw_inst_const_size, fw_bss_data_size;
+       u32 i, fw_inst_const_size, fw_bss_data_size;
        bool has_hw_support;
  
        if (!dmub_srv)
@@@ -1176,10 -1176,10 +1176,10 @@@ static void dm_dmub_hw_resume(struct am
  
  static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
  {
-       uint64_t pt_base;
-       uint32_t logical_addr_low;
-       uint32_t logical_addr_high;
-       uint32_t agp_base, agp_bot, agp_top;
+       u64 pt_base;
+       u32 logical_addr_low;
+       u32 logical_addr_high;
+       u32 agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
  
        memset(pa_config, 0, sizeof(*pa_config));
@@@ -1642,7 -1642,10 +1642,10 @@@ static int amdgpu_dm_init(struct amdgpu
        }
  #endif
  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+       adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+       if (!adev->dm.secure_display_ctxs) {
+               DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+       }
  #endif
        if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
@@@ -1737,10 -1740,15 +1740,15 @@@ static void amdgpu_dm_fini(struct amdgp
        amdgpu_dm_destroy_drm_device(&adev->dm);
  
  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       if (adev->dm.crc_rd_wrk) {
-               flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
-               kfree(adev->dm.crc_rd_wrk);
-               adev->dm.crc_rd_wrk = NULL;
+       if (adev->dm.secure_display_ctxs) {
+               for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+                       if (adev->dm.secure_display_ctxs[i].crtc) {
+                               flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+                               flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+                       }
+               }
+               kfree(adev->dm.secure_display_ctxs);
+               adev->dm.secure_display_ctxs = NULL;
        }
  #endif
  #ifdef CONFIG_DRM_AMD_DC_HDCP
@@@ -2080,7 -2088,9 +2088,9 @@@ static int dm_dmub_sw_init(struct amdgp
         * TODO: Move this into GART.
         */
        r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
-                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+                                   AMDGPU_GEM_DOMAIN_VRAM |
+                                   AMDGPU_GEM_DOMAIN_GTT,
+                                   &adev->dm.dmub_bo,
                                    &adev->dm.dmub_bo_gpu_addr,
                                    &adev->dm.dmub_bo_cpu_addr);
        if (r)
@@@ -2165,6 -2175,8 +2175,8 @@@ static int detect_mst_link_for_all_conn
                                DRM_ERROR("DM_MST: Failed to start MST\n");
                                aconnector->dc_link->type =
                                        dc_connection_single;
+                               ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+                                                                    aconnector->dc_link);
                                break;
                        }
                }
@@@ -2486,7 -2498,7 +2498,7 @@@ struct amdgpu_dm_connector 
  amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
                                             struct drm_crtc *crtc)
  {
-       uint32_t i;
+       u32 i;
        struct drm_connector_state *new_con_state;
        struct drm_connector *connector;
        struct drm_crtc *crtc_from_state;
@@@ -2734,12 -2746,14 +2746,14 @@@ static int dm_resume(void *handle
        drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
  
+               if (!aconnector->dc_link)
+                       continue;
                /*
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->dc_link &&
-                   aconnector->dc_link->type == dc_connection_mst_branch)
+               if (aconnector->dc_link->type == dc_connection_mst_branch)
                        continue;
  
                mutex_lock(&aconnector->hpd_lock);
@@@ -3117,8 -3131,8 +3131,8 @@@ static void handle_hpd_irq(void *param
  
  static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
  {
-       uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
-       uint8_t dret;
+       u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+       u8 dret;
        bool new_irq_handled = false;
        int dpcd_addr;
        int dpcd_bytes_to_read;
  
        while (dret == dpcd_bytes_to_read &&
                process_count < max_process_count) {
-               uint8_t retry;
+               u8 retry;
                dret = 0;
  
                process_count++;
                                dpcd_bytes_to_read - 1;
  
                        for (retry = 0; retry < 3; retry++) {
-                               uint8_t wret;
+                               u8 wret;
  
                                wret = drm_dp_dpcd_write(
                                        &aconnector->dm_dp_aux.aux,
@@@ -4179,12 -4193,12 +4193,12 @@@ static void amdgpu_set_panel_orientatio
  static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
  {
        struct amdgpu_display_manager *dm = &adev->dm;
-       int32_t i;
+       s32 i;
        struct amdgpu_dm_connector *aconnector = NULL;
        struct amdgpu_encoder *aencoder = NULL;
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
-       uint32_t link_cnt;
-       int32_t primary_planes;
+       u32 link_cnt;
+       s32 primary_planes;
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
        bool psr_feature_enabled = false;
                amdgpu_set_panel_orientation(&aconnector->base);
        }
  
 +      /* If we didn't find a panel, notify the acpi video detection */
 +      if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
 +              acpi_video_report_nolcd();
 +
        /* Software is initialized. Now we can register interrupt handlers. */
        switch (adev->asic_type) {
  #if defined(CONFIG_DRM_AMD_DC_SI)
@@@ -4701,7 -4711,7 +4715,7 @@@ fill_plane_color_attributes(const struc
  static int
  fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const struct drm_plane_state *plane_state,
-                           const uint64_t tiling_flags,
+                           const u64 tiling_flags,
                            struct dc_plane_info *plane_info,
                            struct dc_plane_address *address,
                            bool tmz_surface,
@@@ -4876,7 -4886,7 +4890,7 @@@ static int fill_dc_plane_attributes(str
  
  static inline void fill_dc_dirty_rect(struct drm_plane *plane,
                                      struct rect *dirty_rect, int32_t x,
-                                     int32_t y, int32_t width, int32_t height,
+                                     s32 y, s32 width, s32 height,
                                      int *i, bool ffu)
  {
        if (*i > DC_MAX_DIRTY_RECTS)
@@@ -4932,11 -4942,11 +4946,11 @@@ static void fill_dc_dirty_rects(struct 
  {
        struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
        struct rect *dirty_rects = flip_addrs->dirty_rects;
-       uint32_t num_clips;
+       u32 num_clips;
        struct drm_mode_rect *clips;
        bool bb_changed;
        bool fb_changed;
-       uint32_t i = 0;
+       u32 i = 0;
  
        /*
         * Cursor plane has it's own dirty rect update interface. See
@@@ -5082,7 -5092,7 +5096,7 @@@ static enum dc_color_dept
  convert_color_depth_from_display_info(const struct drm_connector *connector,
                                      bool is_y420, int requested_bpc)
  {
-       uint8_t bpc;
+       u8 bpc;
  
        if (is_y420) {
                bpc = 8;
@@@ -5626,8 -5636,8 +5640,8 @@@ static void apply_dsc_policy_for_edp(st
                                    uint32_t max_dsc_target_bpp_limit_override)
  {
        const struct dc_link_settings *verified_link_cap = NULL;
-       uint32_t link_bw_in_kbps;
-       uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+       u32 link_bw_in_kbps;
+       u32 edp_min_bpp_x16, edp_max_bpp_x16;
        struct dc *dc = sink->ctx->dc;
        struct dc_dsc_bw_range bw_range = {0};
        struct dc_dsc_config dsc_cfg = {0};
@@@ -5684,11 -5694,11 +5698,11 @@@ static void apply_dsc_policy_for_stream
                                        struct dsc_dec_dpcd_caps *dsc_caps)
  {
        struct drm_connector *drm_connector = &aconnector->base;
-       uint32_t link_bandwidth_kbps;
+       u32 link_bandwidth_kbps;
        struct dc *dc = sink->ctx->dc;
-       uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
-       uint32_t dsc_max_supported_bw_in_kbps;
-       uint32_t max_dsc_target_bpp_limit_override =
+       u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+       u32 dsc_max_supported_bw_in_kbps;
+       u32 max_dsc_target_bpp_limit_override =
                drm_connector->display_info.max_dsc_bpp;
  
        link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@@ -5835,7 -5845,8 +5849,8 @@@ create_stream_for_sink(struct amdgpu_dm
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+               recalculate_timing = amdgpu_freesync_vid_mode &&
+                                is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
                        drm_mode_copy(&saved_mode, &mode);
@@@ -6909,7 -6920,7 +6924,7 @@@ static uint add_fs_modes(struct amdgpu_
        const struct drm_display_mode *m;
        struct drm_display_mode *new_mode;
        uint i;
-       uint32_t new_modes_count = 0;
+       u32 new_modes_count = 0;
  
        /* Standard FPS values
         *
         * 60           - Commonly used
         * 48,72,96,120 - Multiples of 24
         */
-       static const uint32_t common_rates[] = {
+       static const u32 common_rates[] = {
                23976, 24000, 25000, 29970, 30000,
                48000, 50000, 60000, 72000, 96000, 120000
        };
                return 0;
  
        for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
-               uint64_t target_vtotal, target_vtotal_diff;
-               uint64_t num, den;
+               u64 target_vtotal, target_vtotal_diff;
+               u64 num, den;
  
                if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
                        continue;
@@@ -6986,7 -6997,7 +7001,7 @@@ static void amdgpu_dm_connector_add_fre
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
  
-       if (!edid)
+       if (!(amdgpu_freesync_vid_mode && edid))
                return;
  
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@@ -7182,7 -7193,7 +7197,7 @@@ create_i2c(struct ddc_service *ddc_serv
   */
  static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                                    struct amdgpu_dm_connector *aconnector,
-                                   uint32_t link_index,
+                                   u32 link_index,
                                    struct amdgpu_encoder *aencoder)
  {
        int res = 0;
@@@ -7367,27 -7378,55 +7382,55 @@@ is_scaling_state_different(const struc
  }
  
  #ifdef CONFIG_DRM_AMD_DC_HDCP
- static bool is_content_protection_different(struct drm_connector_state *state,
-                                           const struct drm_connector_state *old_state,
-                                           const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+ static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+                                           struct drm_crtc_state *old_crtc_state,
+                                           struct drm_connector_state *new_conn_state,
+                                           struct drm_connector_state *old_conn_state,
+                                           const struct drm_connector *connector,
+                                           struct hdcp_workqueue *hdcp_w)
  {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
  
-       /* Handle: Type0/1 change */
-       if (old_state->hdcp_content_type != state->hdcp_content_type &&
-           state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+       pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+               connector->index, connector->status, connector->dpms);
+       pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+               old_conn_state->content_protection, new_conn_state->content_protection);
+       if (old_crtc_state)
+               pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+               old_crtc_state->enable,
+               old_crtc_state->active,
+               old_crtc_state->mode_changed,
+               old_crtc_state->active_changed,
+               old_crtc_state->connectors_changed);
+       if (new_crtc_state)
+               pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+               new_crtc_state->enable,
+               new_crtc_state->active,
+               new_crtc_state->mode_changed,
+               new_crtc_state->active_changed,
+               new_crtc_state->connectors_changed);
+       /* hdcp content type change */
+       if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+           new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
                return true;
        }
  
-       /* CP is being re enabled, ignore this
-        *
-        * Handles:     ENABLED -> DESIRED
-        */
-       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
-           state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+       /* CP is being re enabled, ignore this */
+       if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+           new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               if (new_crtc_state && new_crtc_state->mode_changed) {
+                       new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+                       pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+                       return true;
+               };
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+               pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
                return false;
        }
  
         *
         * Handles:     UNDESIRED -> ENABLED
         */
-       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
-           state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+       if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+           new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  
        /* Stream removed and re-enabled
         *
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
-       if (!(old_state->crtc && old_state->crtc->enabled) &&
-               state->crtc && state->crtc->enabled &&
+       if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+               new_conn_state->crtc && new_conn_state->crtc->enabled &&
                connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
                dm_con_state->update_hdcp = false;
+               pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+                       __func__);
                return true;
        }
  
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
-       if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
-           connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+       if (dm_con_state->update_hdcp &&
+       new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+       connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
                dm_con_state->update_hdcp = false;
+               pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+                       __func__);
                return true;
        }
  
-       /*
-        * Handles:     UNDESIRED -> UNDESIRED
-        *              DESIRED -> DESIRED
-        *              ENABLED -> ENABLED
-        */
-       if (old_state->content_protection == state->content_protection)
+       if (old_conn_state->content_protection == new_conn_state->content_protection) {
+               if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+                       if (new_crtc_state && new_crtc_state->mode_changed) {
+                               pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+                                       __func__);
+                               return true;
+                       };
+                       pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+                               __func__);
+                       return false;
+               };
+               pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
                return false;
+       }
  
-       /*
-        * Handles:     UNDESIRED -> DESIRED
-        *              DESIRED -> UNDESIRED
-        *              ENABLED -> UNDESIRED
-        */
-       if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+       if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+               pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+                       __func__);
                return true;
+       }
  
-       /*
-        * Handles:     DESIRED -> ENABLED
-        */
+       pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
        return false;
  }
  #endif
  static void remove_stream(struct amdgpu_device *adev,
                          struct amdgpu_crtc *acrtc,
                          struct dc_stream_state *stream)
@@@ -7666,8 -7714,8 +7718,8 @@@ static void amdgpu_dm_commit_planes(str
                                    struct drm_crtc *pcrtc,
                                    bool wait_for_vblank)
  {
-       uint32_t i;
-       uint64_t timestamp_ns;
+       u32 i;
+       u64 timestamp_ns;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
                        to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
        int planes_count = 0, vpos, hpos;
        unsigned long flags;
-       uint32_t target_vblank, last_flip_vblank;
+       u32 target_vblank, last_flip_vblank;
        bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
        bool cursor_update = false;
        bool pflip_present = false;
@@@ -8116,7 -8164,7 +8168,7 @@@ static void amdgpu_dm_atomic_commit_tai
        struct amdgpu_display_manager *dm = &adev->dm;
        struct dm_atomic_state *dm_state;
        struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
-       uint32_t i, j;
+       u32 i, j;
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        unsigned long flags;
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
                struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
  
+               pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+               if (!connector)
+                       continue;
+               pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+                       connector->index, connector->status, connector->dpms);
+               pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+                       old_con_state->content_protection, new_con_state->content_protection);
+               if (aconnector->dc_sink) {
+                       if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+                               aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+                               pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+                               aconnector->dc_sink->edid_caps.display_name);
+                       }
+               }
                new_crtc_state = NULL;
+               old_crtc_state = NULL;
  
-               if (acrtc)
+               if (acrtc) {
                        new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+                       old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+               }
+               if (old_crtc_state)
+                       pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+                       old_crtc_state->enable,
+                       old_crtc_state->active,
+                       old_crtc_state->mode_changed,
+                       old_crtc_state->active_changed,
+                       old_crtc_state->connectors_changed);
+               if (new_crtc_state)
+                       pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+                       new_crtc_state->enable,
+                       new_crtc_state->active,
+                       new_crtc_state->mode_changed,
+                       new_crtc_state->active_changed,
+                       new_crtc_state->connectors_changed);
+       }
+       for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+               struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+               struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+               struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+               new_crtc_state = NULL;
+               old_crtc_state = NULL;
+               if (acrtc) {
+                       new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+                       old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+               }
  
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
  
                        continue;
                }
  
-               if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+               if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+                                                                                       old_con_state, connector, adev->dm.hdcp_workqueue)) {
+                       /* when display is unplugged from mst hub, connctor will
+                        * be destroyed within dm_dp_mst_connector_destroy. connector
+                        * hdcp perperties, like type, undesired, desired, enabled,
+                        * will be lost. So, save hdcp properties into hdcp_work within
+                        * amdgpu_dm_atomic_commit_tail. if the same display is
+                        * plugged back with same display index, its hdcp properties
+                        * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+                        */
+                       bool enable_encryption = false;
+                       if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+                               enable_encryption = true;
+                       if (aconnector->dc_link && aconnector->dc_sink &&
+                               aconnector->dc_link->type == dc_connection_mst_branch) {
+                               struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+                               struct hdcp_workqueue *hdcp_w =
+                                       &hdcp_work[aconnector->dc_link->link_index];
+                               hdcp_w->hdcp_content_type[connector->index] =
+                                       new_con_state->hdcp_content_type;
+                               hdcp_w->content_protection[connector->index] =
+                                       new_con_state->content_protection;
+                       }
+                       if (new_crtc_state && new_crtc_state->mode_changed &&
+                               new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+                               enable_encryption = true;
+                       DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
                        hdcp_update_display(
                                adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
-                               new_con_state->hdcp_content_type,
-                               new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+                               new_con_state->hdcp_content_type, enable_encryption);
+               }
        }
  #endif
  
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
  #ifdef CONFIG_DEBUG_FS
                enum amdgpu_dm_pipe_crc_source cur_crc_src;
- #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-               struct crc_rd_work *crc_rd_wrk;
- #endif
  #endif
                /* Count number of newly disabled CRTCs for dropping PM refs later. */
                if (old_crtc_state->active && !new_crtc_state->active)
                update_stream_irq_parameters(dm, dm_new_crtc_state);
  
  #ifdef CONFIG_DEBUG_FS
- #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-               crc_rd_wrk = dm->crc_rd_wrk;
- #endif
                spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                cur_crc_src = acrtc->dm_irq_params.crc_src;
                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                                if (amdgpu_dm_crc_window_is_activated(crtc)) {
                                        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                                        acrtc->dm_irq_params.window_param.update_win = true;
+                                       /**
+                                        * It takes 2 frames for HW to stably generate CRC when
+                                        * resuming from suspend, so we set skip_frame_cnt 2.
+                                        */
                                        acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
-                                       spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
-                                       crc_rd_wrk->crtc = crtc;
-                                       spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
                                        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                                }
  #endif
@@@ -8679,15 -8807,22 +8811,22 @@@ static void get_freesync_config_for_crt
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
        int vrefresh = drm_mode_vrefresh(mode);
        bool fs_vid_mode = false;
+       bool drr_active = false;
  
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
                                        vrefresh >= aconnector->min_vfreq &&
                                        vrefresh <= aconnector->max_vfreq;
  
-       if (new_crtc_state->vrr_supported) {
+       drr_active = new_crtc_state->vrr_supported &&
+               new_crtc_state->freesync_config.state != VRR_STATE_DISABLED &&
+               new_crtc_state->freesync_config.state != VRR_STATE_INACTIVE &&
+               new_crtc_state->freesync_config.state != VRR_STATE_UNSUPPORTED;
+       if (drr_active)
                new_crtc_state->stream->ignore_msa_timing_param = true;
-               fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
  
+       if (new_crtc_state->vrr_supported) {
+               fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
                config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
                config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
                config.vsif_supported = true;
@@@ -8747,7 -8882,7 +8886,7 @@@ is_timing_unchanged_for_freesync(struc
  }
  
  static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
-       uint64_t num, den, res;
+       u64 num, den, res;
        struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
  
        dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@@ -8850,7 -8985,8 +8989,8 @@@ static int dm_update_crtc_state(struct 
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
-               if (dm_new_crtc_state->stream &&
+               if (amdgpu_freesync_vid_mode &&
+                   dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
                        goto skip_modeset;
  
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
  
-               if (dm_new_crtc_state->stream &&
+               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
                        set_freesync_fixed_config(dm_new_crtc_state);
  
                        goto skip_modeset;
-               } else if (aconnector &&
+               } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
                        struct drm_display_mode *high_mode;
@@@ -9883,7 -10019,7 +10023,7 @@@ fail
  static bool is_dp_capable_without_timing_msa(struct dc *dc,
                                             struct amdgpu_dm_connector *amdgpu_dm_connector)
  {
-       uint8_t dpcd_data;
+       u8 dpcd_data;
        bool capable = false;
  
        if (amdgpu_dm_connector->dc_link &&
  static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
                unsigned int offset,
                unsigned int total_length,
-               uint8_t *data,
+               u8 *data,
                unsigned int length,
                struct amdgpu_hdmi_vsdb_info *vsdb)
  {
  }
  
  static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
  {
        int i;
  }
  
  static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
  {
        int i;
  }
  
  static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
  {
        struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
  static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
                struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
  {
-       uint8_t *edid_ext = NULL;
+       u8 *edid_ext = NULL;
        int i;
        bool valid_vsdb_found = false;
  
@@@ -10204,7 -10340,7 +10344,7 @@@ void amdgpu_dm_trigger_timing_sync(stru
  }
  
  void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
-                      uint32_t value, const char *func_name)
+                      u32 value, const char *func_name)
  {
  #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
  uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
                          const char *func_name)
  {
-       uint32_t value;
+       u32 value;
  #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
                DC_ERR("invalid register read; address = 0\n");
This page took 0.145111 seconds and 4 git commands to generate.