]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drm/amd/display: Add writeback enable field (wb_enabled)
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_virt.c
index 25b4d7f0bd35996b4bb69d952dcb3ef2008bf57b..a0aa624f5a923c1b5abfa18107adeba22ca144e9 100644 (file)
@@ -66,9 +66,6 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
        adev->cg_flags = 0;
        adev->pg_flags = 0;
 
-       /* enable mcbp for sriov */
-       amdgpu_mcbp = 1;
-
        /* Reduce kcq number to 2 to reduce latency */
        if (amdgpu_num_kcq == -1)
                amdgpu_num_kcq = 2;
@@ -523,7 +520,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
                        tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
                        adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
                }
-               if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
+               if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
                        adev->virt.is_mm_bw_enabled = true;
 
                adev->unique_id =
@@ -838,9 +835,19 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad
        return mode;
 }
 
+void amdgpu_virt_post_reset(struct amdgpu_device *adev)
+{
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
+               /* force set to GFXOFF state after reset,
+                * to avoid some invalid operation before GC enable
+                */
+               adev->gfx.is_poweron = false;
+       }
+}
+
 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
 {
-       switch (adev->ip_versions[MP0_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
        case IP_VERSION(13, 0, 0):
                /* no vf autoload, white list */
                if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
@@ -848,6 +855,17 @@ bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_i
                        return false;
                else
                        return true;
+       case IP_VERSION(11, 0, 9):
+       case IP_VERSION(11, 0, 7):
+               /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
+               if (ucode_id == AMDGPU_UCODE_ID_RLC_G
+                   || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+                   || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+                   || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+                   || ucode_id == AMDGPU_UCODE_ID_SMC)
+                       return true;
+               else
+                       return false;
        case IP_VERSION(13, 0, 10):
                /* white list */
                if (ucode_id == AMDGPU_UCODE_ID_CAP
@@ -957,7 +975,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
        return ret;
 }
 
-static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
 {
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
        uint32_t timeout = 50000;
@@ -975,7 +993,12 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
                return 0;
        }
 
-       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+       if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
+               dev_err(adev->dev, "invalid xcc\n");
+               return 0;
+       }
+
+       reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
        scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
        scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
        scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
@@ -1040,13 +1063,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
 
 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
                       u32 offset, u32 value,
-                      u32 acc_flags, u32 hwip)
+                      u32 acc_flags, u32 hwip, u32 xcc_id)
 {
        u32 rlcg_flag;
 
        if (!amdgpu_sriov_runtime(adev) &&
                amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
-               amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+               amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
                return;
        }
 
@@ -1057,13 +1080,13 @@ void amdgpu_sriov_wreg(struct amdgpu_device *adev,
 }
 
 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
-                     u32 offset, u32 acc_flags, u32 hwip)
+                     u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
 {
        u32 rlcg_flag;
 
        if (!amdgpu_sriov_runtime(adev) &&
                amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
-               return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+               return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
 
        if (acc_flags & AMDGPU_REGS_NO_KIQ)
                return RREG32_NO_KIQ(offset);
This page took 0.060193 seconds and 4 git commands to generate.