tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
}
- if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
+ if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
adev->virt.is_mm_bw_enabled = true;
adev->unique_id =
return mode;
}
+void amdgpu_virt_post_reset(struct amdgpu_device *adev)
+{
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
+ /* force set to GFXOFF state after reset,
+ * to avoid some invalid operation before GC enable
+ */
+ adev->gfx.is_poweron = false;
+ }
+}
+
bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
{
- switch (adev->ip_versions[MP0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
case IP_VERSION(13, 0, 0):
/* no vf autoload, white list */
if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
return false;
else
return true;
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
+ if (ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
case IP_VERSION(13, 0, 10):
/* white list */
if (ucode_id == AMDGPU_UCODE_ID_CAP
return ret;
}
-static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
{
struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
uint32_t timeout = 50000;
return 0;
}
- reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
+ if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
+ dev_err(adev->dev, "invalid xcc\n");
+ return 0;
+ }
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
void amdgpu_sriov_wreg(struct amdgpu_device *adev,
u32 offset, u32 value,
- u32 acc_flags, u32 hwip)
+ u32 acc_flags, u32 hwip, u32 xcc_id)
{
u32 rlcg_flag;
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
- amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
+ amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
return;
}
}
u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
- u32 offset, u32 acc_flags, u32 hwip)
+ u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
{
u32 rlcg_flag;
if (!amdgpu_sriov_runtime(adev) &&
amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
- return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
+ return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
if (acc_flags & AMDGPU_REGS_NO_KIQ)
return RREG32_NO_KIQ(offset);