return psp_xgmi_terminate(&adev->psp);
}
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
{
int r;
struct ras_ih_if ih_info = {
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
- amdgpu_xgmi_reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
if (!adev->gmc.xgmi.ras_if) {
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
return r;
}
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
adev->gmc.xgmi.ras_if) {
WREG32_PCIE(pcs_status_reg, 0);
}
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
uint32_t i;
return 0;
}
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status)
+static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
int i;
break;
}
- amdgpu_xgmi_reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
return 0;
}
+
+const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
+ .ras_late_init = amdgpu_xgmi_ras_late_init,
+ .ras_fini = amdgpu_xgmi_ras_fini,
+ .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
+ .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+};