2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_ras.h"
26 static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
27 void *ras_error_status,
28 struct amdgpu_iv_entry *entry,
31 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
32 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
35 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
36 ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc));
37 if (ret == -EOPNOTSUPP) {
38 if (adev->umc.ras_funcs &&
39 adev->umc.ras_funcs->query_ras_error_count)
40 adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
42 if (adev->umc.ras_funcs &&
43 adev->umc.ras_funcs->query_ras_error_address &&
44 adev->umc.max_ras_err_cnt_per_query) {
46 kcalloc(adev->umc.max_ras_err_cnt_per_query,
47 sizeof(struct eeprom_table_record), GFP_KERNEL);
49 /* still call query_ras_error_address to clear error status
50 * even NOMEM error is encountered
52 if(!err_data->err_addr)
53 dev_warn(adev->dev, "Failed to alloc memory for "
54 "umc error address record!\n");
56 /* umc query_ras_error_address is also responsible for clearing
59 adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
62 if (adev->umc.ras_funcs &&
63 adev->umc.ras_funcs->ecc_info_query_ras_error_count)
64 adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status);
66 if (adev->umc.ras_funcs &&
67 adev->umc.ras_funcs->ecc_info_query_ras_error_address &&
68 adev->umc.max_ras_err_cnt_per_query) {
70 kcalloc(adev->umc.max_ras_err_cnt_per_query,
71 sizeof(struct eeprom_table_record), GFP_KERNEL);
73 /* still call query_ras_error_address to clear error status
74 * even NOMEM error is encountered
76 if(!err_data->err_addr)
77 dev_warn(adev->dev, "Failed to alloc memory for "
78 "umc error address record!\n");
80 /* umc query_ras_error_address is also responsible for clearing
83 adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status);
87 /* only uncorrectable error needs gpu reset */
88 if (err_data->ue_count) {
89 dev_info(adev->dev, "%ld uncorrectable hardware errors "
90 "detected in UMC block\n",
93 if ((amdgpu_bad_page_threshold != 0) &&
94 err_data->err_addr_cnt) {
95 amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
96 err_data->err_addr_cnt);
97 amdgpu_ras_save_bad_pages(adev);
99 if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num)
100 adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
104 amdgpu_ras_reset_gpu(adev);
107 kfree(err_data->err_addr);
108 return AMDGPU_RAS_SUCCESS;
111 int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
112 void *ras_error_status,
116 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
117 struct ras_common_if head = {
118 .block = AMDGPU_RAS_BLOCK__UMC,
120 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
123 amdgpu_umc_do_page_retirement(adev, ras_error_status, NULL, reset);
125 if (ret == AMDGPU_RAS_SUCCESS && obj) {
126 obj->err_data.ue_count += err_data->ue_count;
127 obj->err_data.ce_count += err_data->ce_count;
133 static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
134 void *ras_error_status,
135 struct amdgpu_iv_entry *entry)
137 return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
140 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
143 struct ras_fs_if fs_info = {
144 .sysfs_name = "umc_err_count",
146 struct ras_ih_if ih_info = {
147 .cb = amdgpu_umc_process_ras_data_cb,
150 if (!adev->umc.ras_if) {
152 kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
153 if (!adev->umc.ras_if)
155 adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
156 adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
157 adev->umc.ras_if->sub_block_index = 0;
159 ih_info.head = fs_info.head = *adev->umc.ras_if;
161 r = amdgpu_ras_late_init(adev, adev->umc.ras_if,
166 if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) {
167 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
175 /* ras init of specific umc version */
176 if (adev->umc.ras_funcs &&
177 adev->umc.ras_funcs->err_cnt_init)
178 adev->umc.ras_funcs->err_cnt_init(adev);
183 amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info);
185 kfree(adev->umc.ras_if);
186 adev->umc.ras_if = NULL;
190 void amdgpu_umc_ras_fini(struct amdgpu_device *adev)
192 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
194 struct ras_common_if *ras_if = adev->umc.ras_if;
195 struct ras_ih_if ih_info = {
197 .cb = amdgpu_umc_process_ras_data_cb,
200 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
205 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
206 struct amdgpu_irq_src *source,
207 struct amdgpu_iv_entry *entry)
209 struct ras_common_if *ras_if = adev->umc.ras_if;
210 struct ras_dispatch_if ih_data = {
217 ih_data.head = *ras_if;
219 amdgpu_ras_interrupt_dispatch(adev, &ih_data);