#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ return amdgpu_ras_get_context(adev)->error_query_ready;
+
+ return false;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct ras_debug_if data;
int ret = 0;
+ if (!amdgpu_ras_get_error_query_ready(adev)) {
+ dev_warn(adev->dev, "RAS WARN: error injection "
+ "currently inaccessible\n");
+ return size;
+ }
+
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
if (ret)
return -EINVAL;
case 2:
if ((data.inject.address >= adev->gmc.mc_vram_size) ||
(data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ dev_warn(adev->dev, "RAS WARN: input address "
+ "0x%llx is invalid.",
+ data.inject.address);
ret = -EINVAL;
break;
}
/* umc ce/ue error injection for a bad page is not allowed */
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
+ "as bad before error injection!\n",
data.inject.address);
break;
}
.head = obj->head,
};
+ if (!amdgpu_ras_get_error_query_ready(obj->adev))
+ return snprintf(buf, PAGE_SIZE,
+ "Query currently inaccessible\n");
+
if (amdgpu_ras_error_query(obj->adev, &info))
return -EINVAL;
}
/* obj end */
+static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev,
+ const char* invoke_type,
+ const char* block_name,
+ enum ta_ras_status ret)
+{
+ switch (ret) {
+ case TA_RAS_STATUS__SUCCESS:
+ return;
+ case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
+ dev_warn(adev->dev,
+ "RAS WARN: %s %s currently unavailable\n",
+ invoke_type,
+ block_name);
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS ERROR: %s %s error failed ret 0x%X\n",
+ invoke_type,
+ block_name,
+ ret);
+ }
+}
+
/* feature ctl begin */
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
struct ras_common_if *head)
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- union ta_ras_cmd_input info;
+ union ta_ras_cmd_input *info;
int ret;
if (!con)
return -EINVAL;
+ info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
if (!enable) {
- info.disable_features = (struct ta_ras_disable_features_input) {
+ info->disable_features = (struct ta_ras_disable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
} else {
- info.enable_features = (struct ta_ras_enable_features_input) {
+ info->enable_features = (struct ta_ras_enable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
/* Are we alerady in that state we are going to set? */
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
- return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
+ ret = 0;
+ goto out;
+ }
if (!amdgpu_ras_intr_triggered()) {
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ (enum ta_ras_status)ret);
if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+
+ goto out;
}
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-
- return 0;
+ ret = 0;
+out:
+ kfree(info);
+ return ret;
}
/* Only used in device probe stage and called only once. */
if (ret == -EINVAL) {
ret = __amdgpu_ras_feature_enable(adev, head, 1);
if (!ret)
- DRM_INFO("RAS INFO: %s setup object\n",
+ dev_info(adev->dev,
+ "RAS INFO: %s setup object\n",
ras_block_str(head->block));
}
} else {
if (adev->nbio.funcs->query_ras_error_count)
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ break;
default:
break;
}
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
- dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld correctable hardware errors "
+ "detected in %s block, no user "
+ "action is needed.\n",
+ obj->err_data.ce_count,
+ ras_block_str(info->head.block));
}
if (err_data.ue_count) {
- dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ obj->err_data.ue_count,
+ ras_block_str(info->head.block));
}
return 0;
}
-uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
+/* Trigger XGMI/WAFL error */
+static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ struct ta_ras_trigger_error_input *block_info)
{
- uint32_t df_inst_id;
+ int ret;
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
- if ((!adev->df.funcs) ||
- (!adev->df.funcs->get_df_inst_id) ||
- (!adev->df.funcs->get_dram_base_addr))
- return addr;
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+ dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+ ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
- df_inst_id = adev->df.funcs->get_df_inst_id(adev);
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
- return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret;
}
/* wrapper of psp_ras_trigger_error */
/* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- block_info.address = get_xgmi_relative_phy_addr(adev,
- block_info.address);
+ block_info.address =
+ amdgpu_xgmi_get_relative_phy_addr(adev,
+ block_info.address);
}
switch (info->head.block) {
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
- case AMDGPU_RAS_BLOCK__XGMI_WAFL:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
+ break;
default:
- DRM_INFO("%s error injection is not supported yet\n",
+ dev_info(adev->dev, "%s error injection is not supported yet\n",
ras_block_str(info->head.block));
ret = -EINVAL;
}
- if (ret)
- DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
- ras_block_str(info->head.block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ "inject",
+ ras_block_str(info->head.block),
+ (enum ta_ras_status)ret);
return ret;
}
&amdgpu_ras_debugfs_ops);
}
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_fs_if fs_info;
+
+ /*
+ * it won't be called in resume path, no need to check
+ * suspend and gpu reset status
+ */
+ if (!con)
+ return;
+
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ list_for_each_entry(obj, &con->head, node) {
+ if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+ (obj->attr_inuse == 1)) {
+ sprintf(fs_info.debugfs_name, "%s_err_inject",
+ ras_block_str(obj->head.block));
+ fs_info.head = obj->head;
+ amdgpu_ras_debugfs_create(adev, &fs_info);
+ }
+ }
+}
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
amdgpu_ras_sysfs_create_feature_node(adev);
- amdgpu_ras_debugfs_create_ctrl_node(adev);
return 0;
}
}
/* ih end */
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ /*
+ * PCIE_BIF IP has one different isr by ras controller
+ * interrupt, the specific ras counter query will be
+ * done in that isr. So skip such block from common
+ * sync flood interrupt isr calling.
+ */
+ if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+ continue;
+
+ amdgpu_ras_error_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
{
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ struct amdgpu_device *remote_adev = NULL;
+ struct amdgpu_device *adev = ras->adev;
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ device_list_handle = &hive->device_list;
+ else {
+ INIT_LIST_HEAD(&device_list);
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
data = con->eh_data;
save_count = data->count - control->num_recs;
/* only new entries are saved */
- if (save_count > 0)
+ if (save_count > 0) {
if (amdgpu_ras_eeprom_process_recods(control,
&data->bps[control->num_recs],
true,
save_count)) {
- DRM_ERROR("Failed to save EEPROM table data!");
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
+ dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ }
+
return 0;
}
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
control->num_recs)) {
- DRM_ERROR("Failed to load EEPROM table records!");
+ dev_err(adev->dev, "Failed to load EEPROM table records!");
ret = -EIO;
goto out;
}
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL))
- DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_warn(adev->dev, "RAS WARN: reserve vram for "
+ "retired page %llx fail\n", bp);
data->bps_bo[i] = bo;
data->last_reserved = i + 1;
kfree(*data);
con->eh_data = NULL;
out:
- DRM_WARN("Failed to initialize ras recovery!\n");
+ dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
return ret;
}
*hw_supported = 0;
*supported = 0;
- if (amdgpu_sriov_vf(adev) ||
+ if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 &&
adev->asic_type != CHIP_ARCTURUS))
return;
- if (adev->is_atom_fw &&
- (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
- amdgpu_atomfirmware_sram_ecc_supported(adev)))
- *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ dev_info(adev->dev, "HBM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ dev_info(adev->dev, "HBM ECC is not presented.\n");
+
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ dev_info(adev->dev, "SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
+
+ /* hw_supported needs to be aligned with RAS block mask. */
+ *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
*supported = amdgpu_ras_enable == 0 ?
- 0 : *hw_supported & amdgpu_ras_mask;
+ 0 : *hw_supported & amdgpu_ras_mask;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
if (!con->hw_supported) {
- amdgpu_ras_set_context(adev, NULL);
- kfree(con);
- return 0;
+ r = 0;
+ goto err_out;
}
con->features = 0;
if (adev->nbio.funcs->init_ras_controller_interrupt) {
r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
if (r)
- return r;
+ goto err_out;
}
if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
if (r)
- return r;
+ goto err_out;
}
- amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
-
- if (amdgpu_ras_fs_init(adev))
- goto fs_out;
+ if (amdgpu_ras_fs_init(adev)) {
+ r = -EINVAL;
+ goto err_out;
+ }
- DRM_INFO("RAS INFO: ras initialized successfully, "
+ dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
-fs_out:
+err_out:
amdgpu_ras_set_context(adev, NULL);
kfree(con);
- return -EINVAL;
+ return r;
}
/* helper function to handle common stuff in ip late init phase */
goto interrupt;
}
- amdgpu_ras_debugfs_create(adev, fs_info);
-
r = amdgpu_ras_sysfs_create(adev, fs_info);
if (r)
goto sysfs;
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
sysfs:
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
interrupt:
return;
amdgpu_ras_sysfs_remove(adev, ras_block);
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
amdgpu_ras_feature_enable(adev, ras_block, 0);
return;
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
- DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+ dev_info(adev->dev, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
amdgpu_ras_reset_gpu(adev);
}
}
+
+bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_VEGA20 &&
+ adev->pm.fw_version <= 0x283400) {
+ return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
+ amdgpu_ras_intr_triggered();
+ }
+
+ return false;
+}