]> Git Repo - linux.git/commitdiff
Merge tag 'amd-drm-next-5.8-2020-04-24' of git://people.freedesktop.org/~agd5f/linux...
authorDave Airlie <[email protected]>
Thu, 30 Apr 2020 01:08:54 +0000 (11:08 +1000)
committerDave Airlie <[email protected]>
Thu, 30 Apr 2020 01:08:54 +0000 (11:08 +1000)
amd-drm-next-5.8-2020-04-24:

amdgpu:
- Documentation improvements
- Enable FRU chip access on boards that support it
- RAS updates
- SR-IOV updates
- Powerplay locking fixes for older SMU versions
- VCN DPG (dynamic powergating) cleanup
- VCN 2.5 DPG enablement
- Rework GPU scheduler handling
- Improve scheduler priority handling
- Add SPM (streaming performance monitor) golden settings for navi
- GFX10 clockgating fixes
- DC ABM (automatic backlight modulation) fixes
- DC cursor and plane fixes
- DC watermark fixes
- DC clock handling fixes
- DC color management fixes
- GPU reset fixes
- Clean up MMIO access macros
- EEPROM access fixes
- Misc code cleanups

amdkfd:
- Misc code cleanups

radeon:
- Clean up safe reg list generation
- Misc code cleanups

From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1  2 
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c

index e42608115c99a5c4b574471c1717d992b15376ae,32f36c940abb5a9eb922c79cf9af97192ec85c1a..245aec521388a280a885b8f881a042fb7b3cb930
@@@ -29,7 -29,6 +29,7 @@@
  #include <linux/module.h>
  #include <linux/pagemap.h>
  #include <linux/pci.h>
 +#include <linux/dma-buf.h>
  
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_debugfs.h>
@@@ -162,16 -161,17 +162,17 @@@ void amdgpu_gem_object_close(struct drm
  
        struct amdgpu_bo_list_entry vm_pd;
        struct list_head list, duplicates;
+       struct dma_fence *fence = NULL;
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
-       int r;
+       long r;
  
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&duplicates);
  
        tv.bo = &bo->tbo;
-       tv.num_shared = 1;
+       tv.num_shared = 2;
        list_add(&tv.head, &list);
  
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%d)\n", r);
+                       "we fail to reserve bo (%ld)\n", r);
                return;
        }
        bo_va = amdgpu_vm_bo_find(vm, bo);
-       if (bo_va && --bo_va->ref_count == 0) {
-               amdgpu_vm_bo_rmv(adev, bo_va);
-               if (amdgpu_vm_ready(vm)) {
-                       struct dma_fence *fence = NULL;
+       if (!bo_va || --bo_va->ref_count)
+               goto out_unlock;
  
-                       r = amdgpu_vm_clear_freed(adev, vm, &fence);
-                       if (unlikely(r)) {
-                               dev_err(adev->dev, "failed to clear page "
-                                       "tables on GEM object close (%d)\n", r);
-                       }
+       amdgpu_vm_bo_rmv(adev, bo_va);
+       if (!amdgpu_vm_ready(vm))
+               goto out_unlock;
  
-                       if (fence) {
-                               amdgpu_bo_fence(bo, fence, true);
-                               dma_fence_put(fence);
-                       }
-               }
+       fence = dma_resv_get_excl(bo->tbo.base.resv);
+       if (fence) {
+               amdgpu_bo_fence(bo, fence, true);
+               fence = NULL;
        }
+       r = amdgpu_vm_clear_freed(adev, vm, &fence);
+       if (r || !fence)
+               goto out_unlock;
+       amdgpu_bo_fence(bo, fence, true);
+       dma_fence_put(fence);
+ out_unlock:
+       if (unlikely(r < 0))
+               dev_err(adev->dev, "failed to clear page "
+                       "tables on GEM object close (%ld)\n", r);
        ttm_eu_backoff_reservation(&ticket, &list);
  }
  
@@@ -855,8 -861,7 +862,8 @@@ static int amdgpu_debugfs_gem_bo_info(i
        attachment = READ_ONCE(bo->tbo.base.import_attach);
  
        if (attachment)
 -              seq_printf(m, " imported from %p", dma_buf);
 +              seq_printf(m, " imported from %p%s", dma_buf,
 +                         attachment->peer2peer ? " P2P" : "");
        else if (dma_buf)
                seq_printf(m, " exported as %p", dma_buf);
  
index 6309ff72bd78765f45baa9772058becc52dbf12e,1331b4c5bdca06341a2d2709ab4d670b9cccba56..6880c023ca8b8a782feb23686132550718cf54b9
@@@ -770,6 -770,7 +770,6 @@@ struct amdgpu_ttm_tt 
  static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
        (1 << 0), /* HMM_PFN_VALID */
        (1 << 1), /* HMM_PFN_WRITE */
 -      0 /* HMM_PFN_DEVICE_PRIVATE */
  };
  
  static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
@@@ -850,7 -851,7 +850,7 @@@ retry
        range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
  
        down_read(&mm->mmap_sem);
 -      r = hmm_range_fault(range, 0);
 +      r = hmm_range_fault(range);
        up_read(&mm->mmap_sem);
        if (unlikely(r <= 0)) {
                /*
@@@ -2042,7 -2043,8 +2042,8 @@@ static int amdgpu_map_buffer(struct ttm
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8;
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+                                                                       AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
  
@@@ -2101,7 -2103,8 +2102,8 @@@ int amdgpu_copy_buffer(struct amdgpu_ri
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
+                       direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
  
@@@ -2190,7 -2193,7 +2192,7 @@@ int amdgpu_fill_buffer(struct amdgpu_b
        /* for IB padding */
        num_dw += 64;
  
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
  
index 5f3a5ee2a3f4e20948678c36e733e8d8875d8752,f092f12fecfe8ff247b71184eb5582284112efe3..3e99f31b4bd03bd42e0acddf366e110113d5e8fd
@@@ -115,17 -115,21 +115,21 @@@ static const struct soc15_reg_golden go
  static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
  };
  
  static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
  };
  
  static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
@@@ -174,6 -178,7 +178,7 @@@ static const struct soc15_reg_golden go
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
  };
  
  static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
  };
  
  static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@@ -222,27 -228,35 +228,35 @@@ static const struct soc15_reg_golden go
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
        SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
        SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
-       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+       SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
  };
  
  static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
@@@ -677,7 -691,7 +691,7 @@@ static uint64_t sdma_v4_0_ring_get_wptr
  }
  
  /**
 - * sdma_v4_0_ring_set_wptr - commit the write pointer
 + * sdma_v4_0_page_ring_set_wptr - commit the write pointer
   *
   * @ring: amdgpu ring pointer
   *
@@@ -923,8 -937,6 +937,6 @@@ static void sdma_v4_0_gfx_stop(struct a
                ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-               sdma[i]->sched.ready = false;
        }
  }
  
@@@ -971,13 -983,11 +983,11 @@@ static void sdma_v4_0_page_stop(struct 
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
                                        IB_ENABLE, 0);
                WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-               sdma[i]->sched.ready = false;
        }
  }
  
  /**
 - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
 + * sdma_v4_0_ctx_switch_enable - stop the async dma engines context switch
   *
   * @adev: amdgpu_device pointer
   * @enable: enable/disable the DMA MEs context switch.
@@@ -1539,7 -1549,8 +1549,8 @@@ static int sdma_v4_0_ring_test_ib(struc
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
  
@@@ -1840,7 -1851,7 +1851,7 @@@ static int sdma_v4_0_sw_init(void *hand
                ring->ring_obj = NULL;
                ring->use_doorbell = true;
  
-               DRM_INFO("use_doorbell being set to: [%s]\n",
+               DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
                                ring->use_doorbell?"true":"false");
  
                /* doorbell size is 2 dwords, get DWORD offset */
  
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+                                    AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+                                    AMDGPU_RING_PRIO_DEFAULT);
                if (r)
                        return r;
  
                        sprintf(ring->name, "page%d", i);
                        r = amdgpu_ring_init(adev, ring, 1024,
                                             &adev->sdma.trap_irq,
-                                            AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+                                            AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+                                            AMDGPU_RING_PRIO_DEFAULT);
                        if (r)
                                return r;
                }
index f7c5cdc10a705b1b78cfb074596649f55d831639,6cd661545f35614caae2c480a40152aea78b6d2e..4c8b1bc989135e88f53dea7602e4b29cc4138594
@@@ -825,8 -825,9 +825,9 @@@ static int dm_dmub_hw_init(struct amdgp
                                fw_inst_const_size);
        }
  
-       memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
-              fw_bss_data_size);
+       if (fw_bss_data_size)
+               memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+                      fw_bss_data, fw_bss_data_size);
  
        /* Copy firmware bios info into FB memory. */
        memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
@@@ -1265,6 -1266,10 +1266,10 @@@ static int dm_dmub_sw_init(struct amdgp
                adev->dm.dmub_fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
                le32_to_cpu(hdr->inst_const_bytes);
+       region_params.fw_inst_const =
+               adev->dm.dmub_fw->data +
+               le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+               PSP_HEADER_BYTES;
  
        status = dmub_srv_calc_region_info(dmub_srv, &region_params,
                                           &region_info);
@@@ -3340,7 -3345,8 +3345,8 @@@ fill_plane_dcc_attributes(struct amdgpu
                          const union dc_tiling_info *tiling_info,
                          const uint64_t info,
                          struct dc_plane_dcc_param *dcc,
-                         struct dc_plane_address *address)
+                         struct dc_plane_address *address,
+                         bool force_disable_dcc)
  {
        struct dc *dc = adev->dm.dc;
        struct dc_dcc_surface_param input;
        memset(&input, 0, sizeof(input));
        memset(&output, 0, sizeof(output));
  
+       if (force_disable_dcc)
+               return 0;
        if (!offset)
                return 0;
  
@@@ -3401,7 -3410,8 +3410,8 @@@ fill_plane_buffer_attributes(struct amd
                             union dc_tiling_info *tiling_info,
                             struct plane_size *plane_size,
                             struct dc_plane_dcc_param *dcc,
-                            struct dc_plane_address *address)
+                            struct dc_plane_address *address,
+                            bool force_disable_dcc)
  {
        const struct drm_framebuffer *fb = &afb->base;
        int ret;
  
                ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
                                                plane_size, tiling_info,
-                                               tiling_flags, dcc, address);
+                                               tiling_flags, dcc, address,
+                                               force_disable_dcc);
                if (ret)
                        return ret;
        }
@@@ -3599,7 -3610,8 +3610,8 @@@ fill_dc_plane_info_and_addr(struct amdg
                            const struct drm_plane_state *plane_state,
                            const uint64_t tiling_flags,
                            struct dc_plane_info *plane_info,
-                           struct dc_plane_address *address)
+                           struct dc_plane_address *address,
+                           bool force_disable_dcc)
  {
        const struct drm_framebuffer *fb = plane_state->fb;
        const struct amdgpu_framebuffer *afb =
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
-                                          &plane_info->dcc, address);
+                                          &plane_info->dcc, address,
+                                          force_disable_dcc);
        if (ret)
                return ret;
  
@@@ -3704,6 -3717,7 +3717,7 @@@ static int fill_dc_plane_attributes(str
        struct dc_plane_info plane_info;
        uint64_t tiling_flags;
        int ret;
+       bool force_disable_dcc = false;
  
        ret = fill_dc_scaling_info(plane_state, &scaling_info);
        if (ret)
        if (ret)
                return ret;
  
+       force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
        ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
                                          &plane_info,
-                                         &dc_plane_state->address);
+                                         &dc_plane_state->address,
+                                         force_disable_dcc);
        if (ret)
                return ret;
  
@@@ -4324,14 -4340,10 +4340,10 @@@ create_stream_for_sink(struct amdgpu_dm
  
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
-       if (stream->link->psr_feature_enabled)  {
+       if (stream->link->psr_settings.psr_feature_enabled)     {
                struct dc  *core_dc = stream->link->ctx->dc;
  
                if (dc_is_dmcu_initialized(core_dc)) {
-                       struct dmcu *dmcu = core_dc->res_pool->dmcu;
-                       stream->psr_version = dmcu->dmcu_version.psr_version;
                        //
                        // should decide stream support vsc sdp colorimetry capability
                        // before building vsc info packet
@@@ -4664,6 -4676,7 +4676,7 @@@ static void amdgpu_dm_connector_destroy
                i2c_del_adapter(&aconnector->i2c->base);
                kfree(aconnector->i2c);
        }
+       kfree(aconnector->dm_dp_aux.aux.name);
  
        kfree(connector);
  }
@@@ -4723,10 -4736,19 +4736,19 @@@ amdgpu_dm_connector_atomic_duplicate_st
  static int
  amdgpu_dm_connector_late_register(struct drm_connector *connector)
  {
 +#if defined(CONFIG_DEBUG_FS)
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
+       int r;
+       if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+           (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+               amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+               r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+               if (r)
+                       return r;
+       }
  
 -#if defined(CONFIG_DEBUG_FS)
        connector_debugfs_init(amdgpu_dm_connector);
  #endif
  
@@@ -5332,6 -5354,7 +5354,7 @@@ static int dm_plane_helper_prepare_fb(s
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
+       bool force_disable_dcc = false;
  
        dm_plane_state_old = to_dm_plane_state(plane->state);
        dm_plane_state_new = to_dm_plane_state(new_state);
                        dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
                struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
  
+               force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
                fill_plane_buffer_attributes(
                        adev, afb, plane_state->format, plane_state->rotation,
                        tiling_flags, &plane_state->tiling_info,
                        &plane_state->plane_size, &plane_state->dcc,
-                       &plane_state->address);
+                       &plane_state->address,
+                       force_disable_dcc);
        }
  
        return 0;
@@@ -6092,7 -6117,7 +6117,7 @@@ static int amdgpu_dm_connector_init(str
  
        if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
                || connector_type == DRM_MODE_CONNECTOR_eDP)
-               amdgpu_dm_initialize_dp_connector(dm, aconnector);
+               amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
  
  out_free:
        if (res) {
@@@ -6619,6 -6644,7 +6644,7 @@@ static void amdgpu_dm_commit_planes(str
                if (new_pcrtc_state->color_mgmt_changed) {
                        bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
                        bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+                       bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
                }
  
                fill_dc_scaling_info(new_plane_state,
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state, tiling_flags,
                        &bundle->plane_infos[planes_count],
-                       &bundle->flip_addrs[planes_count].address);
+                       &bundle->flip_addrs[planes_count].address,
+                       false);
+               DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+                                new_plane_state->plane->index,
+                                bundle->plane_infos[planes_count].dcc.enable);
  
                bundle->surface_updates[planes_count].plane_info =
                        &bundle->plane_infos[planes_count];
                }
                mutex_lock(&dm->dc_lock);
                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                               acrtc_state->stream->link->psr_allow_active)
+                               acrtc_state->stream->link->psr_settings.psr_allow_active)
                        amdgpu_dm_psr_disable(acrtc_state->stream);
  
                dc_commit_updates_for_stream(dm->dc,
                                                     dc_state);
  
                if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
-                                               acrtc_state->stream->psr_version &&
-                                               !acrtc_state->stream->link->psr_feature_enabled)
+                               acrtc_state->stream->link->psr_settings.psr_version != PSR_VERSION_UNSUPPORTED &&
+                               !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
                        amdgpu_dm_link_setup_psr(acrtc_state->stream);
                else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
-                                               acrtc_state->stream->link->psr_feature_enabled &&
-                                               !acrtc_state->stream->link->psr_allow_active) {
+                               acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+                               !acrtc_state->stream->link->psr_settings.psr_allow_active) {
                        amdgpu_dm_psr_enable(acrtc_state->stream);
                }
  
@@@ -7137,7 -7168,7 +7168,7 @@@ static void amdgpu_dm_atomic_commit_tai
                        DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
                        /* i.e. reset mode */
                        if (dm_old_crtc_state->stream) {
-                               if (dm_old_crtc_state->stream->link->psr_allow_active)
+                               if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
                                        amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
  
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@@ -8063,6 -8094,8 +8094,8 @@@ dm_determine_update_type_for_commit(str
                                                new_dm_plane_state->dc_state->gamma_correction;
                                bundle->surface_updates[num_plane].in_transfer_func =
                                                new_dm_plane_state->dc_state->in_transfer_func;
+                               bundle->surface_updates[num_plane].gamut_remap_matrix =
+                                               &new_dm_plane_state->dc_state->gamut_remap_matrix;
                                bundle->stream_update.gamut_remap =
                                                &new_dm_crtc_state->stream->gamut_remap_matrix;
                                bundle->stream_update.output_csc_transform =
                                ret = fill_dc_plane_info_and_addr(
                                        dm->adev, new_plane_state, tiling_flags,
                                        plane_info,
-                                       &flip_addr->address);
+                                       &flip_addr->address,
+                                       false);
                                if (ret)
                                        goto cleanup;
  
@@@ -8586,8 -8620,17 +8620,17 @@@ static void amdgpu_dm_set_psr_caps(stru
                return;
        if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
                                        dpcd_data, sizeof(dpcd_data))) {
-               link->psr_feature_enabled = dpcd_data[0] ? true:false;
-               DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+               link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+               if (dpcd_data[0] == 0) {
+                       link->psr_settings.psr_version = PSR_VERSION_UNSUPPORTED;
+                       link->psr_settings.psr_feature_enabled = false;
+               } else {
+                       link->psr_settings.psr_version = PSR_VERSION_1;
+                       link->psr_settings.psr_feature_enabled = true;
+               }
+               DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
        }
  }
  
@@@ -8611,7 -8654,7 +8654,7 @@@ static bool amdgpu_dm_link_setup_psr(st
        link = stream->link;
        dc = link->ctx->dc;
  
-       psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+       psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
  
        if (psr_config.psr_version > 0) {
                psr_config.psr_exit_link_training_required = 0x1;
                ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
  
        }
-       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_feature_enabled);
+       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
  
        return ret;
  }
index 3db1ec35d2b4112ad0f190dfabdc11adec82be5e,69056660672d3f3e5c5d0b498608200463892b93..ae0a7ef1d595a6e226754a007e6c1f163cad254b
  #include "amdgpu_dm_debugfs.h"
  #endif
  
  #if defined(CONFIG_DRM_AMD_DC_DCN)
  #include "dc/dcn20/dcn20_resource.h"
  #endif
  
- /* #define TRACE_DPCD */
- #ifdef TRACE_DPCD
- #define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
- static inline char *side_band_msg_type_to_str(uint32_t address)
- {
-       static char str[10] = {0};
-       if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
-               strcpy(str, "DOWN_REQ");
-       else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
-               strcpy(str, "UP_REP");
-       else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
-               strcpy(str, "DOWN_REP");
-       else
-               strcpy(str, "UP_REQ");
-       return str;
- }
- static void log_dpcd(uint8_t type,
-                    uint32_t address,
-                    uint8_t *data,
-                    uint32_t size,
-                    bool res)
- {
-       DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
-                       (type == DP_AUX_NATIVE_READ) ||
-                       (type == DP_AUX_I2C_READ) ?
-                                       "Read" : "Write",
-                       address,
-                       SIDE_BAND_MSG(address) ?
-                                       side_band_msg_type_to_str(address) : "Nop",
-                       res ? "OK" : "Fail");
-       if (res) {
-               print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
-       }
- }
- #endif
  static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                  struct drm_dp_aux_msg *msg)
  {
  static void
  dm_dp_mst_connector_destroy(struct drm_connector *connector)
  {
 -      struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
 -      struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
 +      struct amdgpu_dm_connector *aconnector =
 +              to_amdgpu_dm_connector(connector);
 +      struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
  
 -      kfree(amdgpu_dm_connector->edid);
 -      amdgpu_dm_connector->edid = NULL;
 +      if (aconnector->dc_sink) {
 +              dc_link_remove_remote_sink(aconnector->dc_link,
 +                                         aconnector->dc_sink);
 +              dc_sink_release(aconnector->dc_sink);
 +      }
 +
 +      kfree(aconnector->edid);
  
        drm_encoder_cleanup(&amdgpu_encoder->base);
        kfree(amdgpu_encoder);
        drm_connector_cleanup(connector);
 -      drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
 -      kfree(amdgpu_dm_connector);
 +      drm_dp_mst_put_port_malloc(aconnector->port);
 +      kfree(aconnector);
  }
  
  static int
@@@ -162,16 -113,16 +119,16 @@@ amdgpu_dm_mst_connector_late_register(s
                to_amdgpu_dm_connector(connector);
        int r;
  
-       amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
-       r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
-       if (r)
+       r = drm_dp_mst_connector_late_register(connector,
+                                              amdgpu_dm_connector->port);
+       if (r < 0)
                return r;
  
  #if defined(CONFIG_DEBUG_FS)
        connector_debugfs_init(amdgpu_dm_connector);
  #endif
  
-       return r;
+       return 0;
  }
  
  static void
@@@ -441,19 -392,49 +398,22 @@@ dm_dp_add_mst_connector(struct drm_dp_m
         */
        amdgpu_dm_connector_funcs_reset(connector);
  
 -      DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
 -               aconnector, connector->base.id, aconnector->mst_port);
 -
        drm_dp_mst_get_port_malloc(port);
  
 -      DRM_DEBUG_KMS(":%d\n", connector->base.id);
 -
        return connector;
  }
  
 -static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 -                                      struct drm_connector *connector)
 -{
 -      struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 -
 -      DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
 -               aconnector, connector->base.id, aconnector->mst_port);
 -
 -      if (aconnector->dc_sink) {
 -              amdgpu_dm_update_freesync_caps(connector, NULL);
 -              dc_link_remove_remote_sink(aconnector->dc_link,
 -                                         aconnector->dc_sink);
 -              dc_sink_release(aconnector->dc_sink);
 -              aconnector->dc_sink = NULL;
 -              aconnector->dc_link->cur_link_settings.lane_count = 0;
 -      }
 -
 -      drm_connector_unregister(connector);
 -      drm_connector_put(connector);
 -}
 -
  static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
        .add_connector = dm_dp_add_mst_connector,
 -      .destroy_connector = dm_dp_destroy_mst_connector,
  };
  
  void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
-                                      struct amdgpu_dm_connector *aconnector)
+                                      struct amdgpu_dm_connector *aconnector,
+                                      int link_index)
  {
-       aconnector->dm_dp_aux.aux.name = "dmdc";
+       aconnector->dm_dp_aux.aux.name =
+               kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
+                         link_index);
        aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
        aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
  
index aa3c45a69b5ec70ef4f021c25a9633612a77c4c0,a87302f729c77032c413a3153b62c25173279466..d5b306384d790c28e7cf1027f9eadc867d0448fb
@@@ -1710,19 -1710,10 +1710,10 @@@ bool dc_link_dp_sync_lt_end(struct dc_l
  
  static struct dc_link_settings get_max_link_cap(struct dc_link *link)
  {
-       /* Set Default link settings */
-       struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
-                       LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-       /* Higher link settings based on feature supported */
-       if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
-               max_link_cap.link_rate = LINK_RATE_HIGH2;
-       if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
-               max_link_cap.link_rate = LINK_RATE_HIGH3;
+       struct dc_link_settings max_link_cap = {0};
  
-       if (link->link_enc->funcs->get_max_link_cap)
-               link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+       /* get max link encoder capability */
+       link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
  
        /* Lower link settings based on sink's link cap */
        if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@@ -2426,7 -2417,7 +2417,7 @@@ static bool handle_hpd_irq_psr_sink(str
  {
        union dpcd_psr_configuration psr_configuration;
  
-       if (!link->psr_feature_enabled)
+       if (!link->psr_settings.psr_feature_enabled)
                return false;
  
        dm_helpers_dp_read_dpcd(
@@@ -2530,7 -2521,7 +2521,7 @@@ static void dp_test_send_phy_test_patte
        /* get phy test pattern and pattern parameters from DP receiver */
        core_link_read_dpcd(
                        link,
 -                      DP_TEST_PHY_PATTERN,
 +                      DP_PHY_TEST_PATTERN,
                        &dpcd_test_pattern.raw,
                        sizeof(dpcd_test_pattern));
        core_link_read_dpcd(
@@@ -2908,6 -2899,12 +2899,12 @@@ bool dc_link_handle_hpd_rx_irq(struct d
                                        sizeof(hpd_irq_dpcd_data),
                                        "Status: ");
  
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.blank_stream(pipe_ctx);
+               }
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
                if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        dc_link_reallocate_mst_payload(link);
  
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+               }
                status = false;
                if (out_link_loss)
                        *out_link_loss = true;
@@@ -4227,6 -4230,21 +4230,21 @@@ void dp_set_fec_enable(struct dc_link *
  void dpcd_set_source_specific_data(struct dc_link *link)
  {
        const uint32_t post_oui_delay = 30; // 30ms
+       uint8_t dspc = 0;
+       enum dc_status ret = DC_ERROR_UNEXPECTED;
+       ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+                                 sizeof(dspc));
+       if (ret != DC_OK) {
+               DC_LOG_ERROR("Error in DP aux read transaction,"
+                            " not writing source specific data\n");
+               return;
+       }
+       /* Return if OUI unsupported */
+       if (!(dspc & DP_OUI_SUPPORT))
+               return;
  
        if (!link->dc->vendor_signature.is_valid) {
                struct dpcd_amd_signature amd_signature;
index 655ba4fb05dcdf330e53c3548e54681a18a58208,a97b2964ca7c6e7c21c8288b4640257647a583c9..0045b54b19ed9b35efe16ad5ebfe005beb7c1e52
@@@ -201,13 -201,15 +201,15 @@@ int smu_v11_0_load_microcode(struct smu
        const struct smc_firmware_header_v1_0 *hdr;
        uint32_t addr_start = MP1_SRAM;
        uint32_t i;
+       uint32_t smc_fw_size;
        uint32_t mp1_fw_flags;
  
        hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
        src = (const uint32_t *)(adev->pm.fw->data +
                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+       smc_fw_size = hdr->header.ucode_size_bytes;
  
-       for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+       for (i = 1; i < smc_fw_size/4 - 1; i++) {
                WREG32_PCIE(addr_start, src[i]);
                addr_start += 4;
        }
@@@ -264,23 -266,23 +266,23 @@@ int smu_v11_0_check_fw_version(struct s
  
        switch (smu->adev->asic_type) {
        case CHIP_VEGA20:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
                break;
        case CHIP_ARCTURUS:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
                break;
        case CHIP_NAVI10:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
                break;
        case CHIP_NAVI12:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
                break;
        case CHIP_NAVI14:
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
                break;
        default:
                pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
-               smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+               smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
                break;
        }
  
         * Considering above, we just leave user a warning message instead
         * of halt driver loading.
         */
-       if (if_version != smu->smc_if_version) {
+       if (if_version != smu->smc_driver_if_version) {
                pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
                        "smu fw version = 0x%08x (%d.%d.%d)\n",
-                       smu->smc_if_version, if_version,
+                       smu->smc_driver_if_version, if_version,
                        smu_version, smu_major, smu_minor, smu_debug);
                pr_warn("SMU driver if version not matched\n");
        }
@@@ -479,8 -481,6 +481,6 @@@ int smu_v11_0_init_power(struct smu_con
  {
        struct smu_power_context *smu_power = &smu->smu_power;
  
-       if (!smu->pm_enabled)
-               return 0;
        if (smu_power->power_context || smu_power->power_context_size != 0)
                return -EINVAL;
  
@@@ -497,8 -497,6 +497,6 @@@ int smu_v11_0_fini_power(struct smu_con
  {
        struct smu_power_context *smu_power = &smu->smu_power;
  
-       if (!smu->pm_enabled)
-               return 0;
        if (!smu_power->power_context || smu_power->power_context_size == 0)
                return -EINVAL;
  
@@@ -783,8 -781,6 +781,6 @@@ int smu_v11_0_set_min_dcef_deep_sleep(s
  {
        struct smu_table_context *table_context = &smu->smu_table;
  
-       if (!smu->pm_enabled)
-               return 0;
        if (!table_context)
                return -EINVAL;
  
@@@ -835,9 -831,6 +831,6 @@@ int smu_v11_0_init_display_count(struc
  {
        int ret = 0;
  
-       if (!smu->pm_enabled)
-               return ret;
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
        return ret;
  }
@@@ -932,8 -925,6 +925,6 @@@ int smu_v11_0_notify_display_change(str
  {
        int ret = 0;
  
-       if (!smu->pm_enabled)
-               return ret;
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
            smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@@ -948,9 -939,6 +939,6 @@@ smu_v11_0_get_max_sustainable_clock(str
        int ret = 0;
        int clk_id;
  
-       if (!smu->pm_enabled)
-               return ret;
        if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
            (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
                return 0;
@@@ -1205,9 -1193,6 +1193,6 @@@ int smu_v11_0_start_thermal_control(str
        struct smu_temperature_range range;
        struct amdgpu_device *adev = smu->adev;
  
-       if (!smu->pm_enabled)
-               return ret;
        memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
  
        ret = smu_get_thermal_temperature_range(smu, &range);
@@@ -1321,9 -1306,6 +1306,6 @@@ smu_v11_0_display_clock_voltage_request
        enum smu_clk_type clk_select = 0;
        uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
  
-       if (!smu->pm_enabled)
-               return -EINVAL;
        if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
                smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                switch (clk_type) {
@@@ -1718,12 -1700,6 +1700,12 @@@ int smu_v11_0_baco_set_state(struct smu
                if (ret)
                        goto out;
  
 +              if (ras && ras->supported) {
 +                      ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
 +                      if (ret)
 +                              goto out;
 +              }
 +
                /* clear vbios scratch 6 and 7 for coming asic reinit */
                WREG32(adev->bios_scratch_reg_offset + 6, 0);
                WREG32(adev->bios_scratch_reg_offset + 7, 0);
This page took 0.116834 seconds and 4 git commands to generate.