2 * Copyright 2023 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "umc_v12_0.h"
24 #include "amdgpu_ras.h"
25 #include "amdgpu_umc.h"
27 #include "umc/umc_12_0_0_offset.h"
28 #include "umc/umc_12_0_0_sh_mask.h"
29 #include "mp/mp_13_0_6_sh_mask.h"
31 #define MAX_ECC_NUM_PER_RETIREMENT 32
32 #define DELAYED_TIME_FOR_GPU_RESET 1000 //ms
34 static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
39 uint32_t index = umc_inst * adev->umc.channel_inst_num + ch_inst;
40 uint64_t cross_node_offset = (node_inst == 0) ? 0 : UMC_V12_0_CROSS_NODE_OFFSET;
45 return adev->umc.channel_offs * ch_inst + UMC_V12_0_INST_DIST * umc_inst +
46 UMC_V12_0_NODE_DIST * node_inst + cross_node_offset;
49 static int umc_v12_0_reset_error_count_per_channel(struct amdgpu_device *adev,
50 uint32_t node_inst, uint32_t umc_inst,
51 uint32_t ch_inst, void *data)
53 uint64_t odecc_err_cnt_addr;
54 uint64_t umc_reg_offset =
55 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
58 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
60 /* clear error count */
61 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4,
62 UMC_V12_0_CE_CNT_INIT);
67 static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
69 amdgpu_umc_loop_channels(adev,
70 umc_v12_0_reset_error_count_per_channel, NULL);
73 bool umc_v12_0_is_deferred_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
76 "MCA_UMC_STATUS(0x%llx): Val:%llu, Poison:%llu, Deferred:%llu, PCC:%llu, UC:%llu, TCC:%llu\n",
78 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val),
79 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Poison),
80 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred),
81 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC),
82 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC),
83 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC)
86 return (amdgpu_ras_is_poison_mode_supported(adev) &&
87 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
88 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1));
91 bool umc_v12_0_is_uncorrectable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
93 if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
96 return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
97 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
98 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
99 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
102 bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_status)
104 if (umc_v12_0_is_deferred_error(adev, mc_umc_status))
107 return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
108 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
109 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 &&
110 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 0) ||
111 /* Identify data parity error in replay mode */
112 ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0x5 ||
113 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 0xb) &&
114 !(umc_v12_0_is_uncorrectable_error(adev, mc_umc_status)))));
117 static void umc_v12_0_query_error_count_per_type(struct amdgpu_device *adev,
118 uint64_t umc_reg_offset,
119 unsigned long *error_count,
120 check_error_type_func error_type_func)
122 uint64_t mc_umc_status;
123 uint64_t mc_umc_status_addr;
126 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
128 /* Check MCUMC_STATUS */
130 RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
132 if (error_type_func(adev, mc_umc_status))
136 static int umc_v12_0_query_error_count(struct amdgpu_device *adev,
137 uint32_t node_inst, uint32_t umc_inst,
138 uint32_t ch_inst, void *data)
140 struct ras_err_data *err_data = (struct ras_err_data *)data;
141 unsigned long ue_count = 0, ce_count = 0, de_count = 0;
143 /* NOTE: node_inst is converted by adev->umc.active_mask and the range is [0-3],
144 * which can be used as die ID directly */
145 struct amdgpu_smuio_mcm_config_info mcm_info = {
146 .socket_id = adev->smuio.funcs->get_socket_id(adev),
150 uint64_t umc_reg_offset =
151 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
153 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
154 &ce_count, umc_v12_0_is_correctable_error);
155 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
156 &ue_count, umc_v12_0_is_uncorrectable_error);
157 umc_v12_0_query_error_count_per_type(adev, umc_reg_offset,
158 &de_count, umc_v12_0_is_deferred_error);
160 amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, NULL, ue_count);
161 amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, ce_count);
162 amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, de_count);
167 static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
168 void *ras_error_status)
170 amdgpu_umc_loop_channels(adev,
171 umc_v12_0_query_error_count, ras_error_status);
173 umc_v12_0_reset_error_count(adev);
176 static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
177 struct ras_err_data *err_data,
178 struct ta_ras_query_address_input *addr_in)
180 uint32_t col, row, row_xor, bank, channel_index;
181 uint64_t soc_pa, retired_page, column, err_addr;
182 struct ta_ras_query_address_output addr_out;
184 err_addr = addr_in->ma.err_addr;
185 addr_in->addr_type = TA_RAS_MCA_TO_PA;
186 if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
187 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
193 soc_pa = addr_out.pa.pa;
194 bank = addr_out.pa.bank;
195 channel_index = addr_out.pa.channel_idx;
197 col = (err_addr >> 1) & 0x1fULL;
198 row = (err_addr >> 10) & 0x3fffULL;
199 row_xor = row ^ (0x1ULL << 13);
200 /* clear [C3 C2] in soc physical address */
201 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
202 /* clear [C4] in soc physical address */
203 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
205 /* loop for all possibilities of [C4 C3 C2] */
206 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
207 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
208 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
209 /* include column bit 0 and 1 */
211 col |= (column << 2);
213 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
214 retired_page, row, col, bank, channel_index);
215 amdgpu_umc_fill_error_record(err_data, err_addr,
216 retired_page, channel_index, addr_in->ma.umc_inst);
219 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
221 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
222 retired_page, row_xor, col, bank, channel_index);
223 amdgpu_umc_fill_error_record(err_data, err_addr,
224 retired_page, channel_index, addr_in->ma.umc_inst);
228 static int umc_v12_0_convert_err_addr(struct amdgpu_device *adev,
229 struct ta_ras_query_address_input *addr_in,
230 uint64_t *pfns, int len)
232 uint32_t col, row, row_xor, bank, channel_index;
233 uint64_t soc_pa, retired_page, column, err_addr;
234 struct ta_ras_query_address_output addr_out;
237 err_addr = addr_in->ma.err_addr;
238 addr_in->addr_type = TA_RAS_MCA_TO_PA;
239 if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
240 dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
245 soc_pa = addr_out.pa.pa;
246 bank = addr_out.pa.bank;
247 channel_index = addr_out.pa.channel_idx;
249 col = (err_addr >> 1) & 0x1fULL;
250 row = (err_addr >> 10) & 0x3fffULL;
251 row_xor = row ^ (0x1ULL << 13);
252 /* clear [C3 C2] in soc physical address */
253 soc_pa &= ~(0x3ULL << UMC_V12_0_PA_C2_BIT);
254 /* clear [C4] in soc physical address */
255 soc_pa &= ~(0x1ULL << UMC_V12_0_PA_C4_BIT);
257 /* loop for all possibilities of [C4 C3 C2] */
258 for (column = 0; column < UMC_V12_0_NA_MAP_PA_NUM; column++) {
259 retired_page = soc_pa | ((column & 0x3) << UMC_V12_0_PA_C2_BIT);
260 retired_page |= (((column & 0x4) >> 2) << UMC_V12_0_PA_C4_BIT);
264 pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
266 /* include column bit 0 and 1 */
268 col |= (column << 2);
270 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
271 retired_page, row, col, bank, channel_index);
274 retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT);
278 pfns[pos++] = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
281 "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
282 retired_page, row_xor, col, bank, channel_index);
288 static int umc_v12_0_query_error_address(struct amdgpu_device *adev,
289 uint32_t node_inst, uint32_t umc_inst,
290 uint32_t ch_inst, void *data)
292 struct ras_err_data *err_data = (struct ras_err_data *)data;
293 struct ta_ras_query_address_input addr_in;
294 uint64_t mc_umc_status_addr;
295 uint64_t mc_umc_status, err_addr;
296 uint64_t mc_umc_addrt0;
297 uint64_t umc_reg_offset =
298 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
301 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
303 mc_umc_status = RREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4);
305 if (mc_umc_status == 0)
308 if (!err_data->err_addr) {
309 /* clear umc status */
310 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
315 /* calculate error address if ue error is detected */
316 if (umc_v12_0_is_uncorrectable_error(adev, mc_umc_status) ||
317 umc_v12_0_is_deferred_error(adev, mc_umc_status)) {
319 SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
321 err_addr = RREG64_PCIE_EXT((mc_umc_addrt0 + umc_reg_offset) * 4);
323 err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
325 if (!adev->aid_mask &&
327 adev->smuio.funcs->get_socket_id)
328 addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev);
330 addr_in.ma.socket_id = 0;
332 addr_in.ma.err_addr = err_addr;
333 addr_in.ma.ch_inst = ch_inst;
334 addr_in.ma.umc_inst = umc_inst;
335 addr_in.ma.node_inst = node_inst;
337 umc_v12_0_convert_error_address(adev, err_data, &addr_in);
340 /* clear umc status */
341 WREG64_PCIE_EXT((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
346 static void umc_v12_0_query_ras_error_address(struct amdgpu_device *adev,
347 void *ras_error_status)
349 amdgpu_umc_loop_channels(adev,
350 umc_v12_0_query_error_address, ras_error_status);
353 static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev,
354 uint32_t node_inst, uint32_t umc_inst,
355 uint32_t ch_inst, void *data)
357 uint32_t odecc_cnt_sel;
358 uint64_t odecc_cnt_sel_addr, odecc_err_cnt_addr;
359 uint64_t umc_reg_offset =
360 get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst);
363 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccCntSel);
365 SOC15_REG_OFFSET(UMC, 0, regUMCCH0_OdEccErrCnt);
367 odecc_cnt_sel = RREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4);
369 /* set ce error interrupt type to APIC based interrupt */
370 odecc_cnt_sel = REG_SET_FIELD(odecc_cnt_sel, UMCCH0_OdEccCntSel,
372 WREG32_PCIE_EXT((odecc_cnt_sel_addr + umc_reg_offset) * 4, odecc_cnt_sel);
374 /* set error count to initial value */
375 WREG32_PCIE_EXT((odecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V12_0_CE_CNT_INIT);
380 static bool umc_v12_0_check_ecc_err_status(struct amdgpu_device *adev,
381 enum amdgpu_mca_error_type type, void *ras_error_status)
383 uint64_t mc_umc_status = *(uint64_t *)ras_error_status;
386 case AMDGPU_MCA_ERROR_TYPE_UE:
387 return umc_v12_0_is_uncorrectable_error(adev, mc_umc_status);
388 case AMDGPU_MCA_ERROR_TYPE_CE:
389 return umc_v12_0_is_correctable_error(adev, mc_umc_status);
390 case AMDGPU_MCA_ERROR_TYPE_DE:
391 return umc_v12_0_is_deferred_error(adev, mc_umc_status);
399 static void umc_v12_0_err_cnt_init(struct amdgpu_device *adev)
401 amdgpu_umc_loop_channels(adev,
402 umc_v12_0_err_cnt_init_per_channel, NULL);
405 static bool umc_v12_0_query_ras_poison_mode(struct amdgpu_device *adev)
408 * Force return true, because regUMCCH0_EccCtrl
409 * is not accessible from host side
414 const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = {
415 .query_ras_error_count = umc_v12_0_query_ras_error_count,
416 .query_ras_error_address = umc_v12_0_query_ras_error_address,
419 static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
420 enum aca_smu_type type, void *data)
422 struct amdgpu_device *adev = handle->adev;
423 struct aca_bank_info info;
424 enum aca_error_type err_type;
429 status = bank->regs[ACA_REG_IDX_STATUS];
430 if (umc_v12_0_is_deferred_error(adev, status))
431 err_type = ACA_ERROR_TYPE_DEFERRED;
432 else if (umc_v12_0_is_uncorrectable_error(adev, status))
433 err_type = ACA_ERROR_TYPE_UE;
434 else if (umc_v12_0_is_correctable_error(adev, status))
435 err_type = ACA_ERROR_TYPE_CE;
439 ret = aca_bank_info_decode(bank, &info);
443 amdgpu_umc_update_ecc_status(adev,
444 bank->regs[ACA_REG_IDX_STATUS],
445 bank->regs[ACA_REG_IDX_IPID],
446 bank->regs[ACA_REG_IDX_ADDR]);
448 ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
449 count = ext_error_code == 0 ?
450 ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL;
452 return aca_error_cache_log_bank_error(handle, &info, err_type, count);
455 static const struct aca_bank_ops umc_v12_0_aca_bank_ops = {
456 .aca_bank_parser = umc_v12_0_aca_bank_parser,
459 const struct aca_info umc_v12_0_aca_info = {
460 .hwip = ACA_HWIP_TYPE_UMC,
461 .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK,
462 .bank_ops = &umc_v12_0_aca_bank_ops,
465 static int umc_v12_0_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
469 ret = amdgpu_umc_ras_late_init(adev, ras_block);
473 ret = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__UMC,
474 &umc_v12_0_aca_info, NULL);
481 static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
482 uint64_t status, uint64_t ipid, uint64_t addr)
484 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
485 uint16_t hwid, mcatype;
486 struct ta_ras_query_address_input addr_in;
487 uint64_t page_pfn[UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL];
488 uint64_t err_addr, hash_val = 0;
489 struct ras_ecc_err *ecc_err;
493 hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
494 mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
496 if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
502 if (!umc_v12_0_is_deferred_error(adev, status))
505 err_addr = REG_GET_FIELD(addr,
506 MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
509 "UMC:IPID:0x%llx, socket:%llu, aid:%llu, inst:%llu, ch:%llu, err_addr:0x%llx\n",
511 MCA_IPID_2_SOCKET_ID(ipid),
512 MCA_IPID_2_DIE_ID(ipid),
513 MCA_IPID_2_UMC_INST(ipid),
514 MCA_IPID_2_UMC_CH(ipid),
517 memset(page_pfn, 0, sizeof(page_pfn));
519 memset(&addr_in, 0, sizeof(addr_in));
520 addr_in.ma.err_addr = err_addr;
521 addr_in.ma.ch_inst = MCA_IPID_2_UMC_CH(ipid);
522 addr_in.ma.umc_inst = MCA_IPID_2_UMC_INST(ipid);
523 addr_in.ma.node_inst = MCA_IPID_2_DIE_ID(ipid);
524 addr_in.ma.socket_id = MCA_IPID_2_SOCKET_ID(ipid);
526 count = umc_v12_0_convert_err_addr(adev,
527 &addr_in, page_pfn, ARRAY_SIZE(page_pfn));
529 dev_warn(adev->dev, "Fail to convert error address! count:%d\n", count);
533 ret = amdgpu_umc_build_pages_hash(adev,
534 page_pfn, count, &hash_val);
536 dev_err(adev->dev, "Fail to build error pages hash\n");
540 ecc_err = kzalloc(sizeof(*ecc_err), GFP_KERNEL);
544 ecc_err->err_pages.pfn = kcalloc(count, sizeof(*ecc_err->err_pages.pfn), GFP_KERNEL);
545 if (!ecc_err->err_pages.pfn) {
550 memcpy(ecc_err->err_pages.pfn, page_pfn, count * sizeof(*ecc_err->err_pages.pfn));
551 ecc_err->err_pages.count = count;
553 ecc_err->hash_index = hash_val;
554 ecc_err->status = status;
555 ecc_err->ipid = ipid;
556 ecc_err->addr = addr;
558 ret = amdgpu_umc_logs_ecc_err(adev, &con->umc_ecc_log.de_page_tree, ecc_err);
561 con->umc_ecc_log.de_queried_count++;
563 dev_err(adev->dev, "Fail to log ecc error! ret:%d\n", ret);
565 kfree(ecc_err->err_pages.pfn);
570 con->umc_ecc_log.de_queried_count++;
572 /* The problem case is as follows:
573 * 1. GPU A triggers a gpu ras reset, and GPU A drives
574 * GPU B to also perform a gpu ras reset.
575 * 2. After gpu B ras reset started, gpu B queried a DE
576 * data. Since the DE data was queried in the ras reset
577 * thread instead of the page retirement thread, bad
578 * page retirement work would not be triggered. Then
579 * even if all gpu resets are completed, the bad pages
580 * will be cached in RAM until GPU B's bad page retirement
581 * work is triggered again and then saved to eeprom.
582 * Trigger delayed work to save the bad pages to eeprom in time
583 * after gpu ras reset is completed.
585 if (amdgpu_ras_in_recovery(adev))
586 schedule_delayed_work(&con->page_retirement_dwork,
587 msecs_to_jiffies(DELAYED_TIME_FOR_GPU_RESET));
592 static int umc_v12_0_fill_error_record(struct amdgpu_device *adev,
593 struct ras_ecc_err *ecc_err, void *ras_error_status)
595 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
599 if (!err_data || !ecc_err)
602 for (i = 0; i < ecc_err->err_pages.count; i++) {
603 ret = amdgpu_umc_fill_error_record(err_data,
605 ecc_err->err_pages.pfn[i] << AMDGPU_GPU_PAGE_SHIFT,
606 MCA_IPID_2_UMC_CH(ecc_err->ipid),
607 MCA_IPID_2_UMC_INST(ecc_err->ipid));
612 err_data->de_count++;
617 static void umc_v12_0_query_ras_ecc_err_addr(struct amdgpu_device *adev,
618 void *ras_error_status)
620 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
621 struct ras_ecc_err *entries[MAX_ECC_NUM_PER_RETIREMENT];
622 struct radix_tree_root *ecc_tree;
623 int new_detected, ret, i;
625 ecc_tree = &con->umc_ecc_log.de_page_tree;
627 mutex_lock(&con->umc_ecc_log.lock);
628 new_detected = radix_tree_gang_lookup_tag(ecc_tree, (void **)entries,
629 0, ARRAY_SIZE(entries), UMC_ECC_NEW_DETECTED_TAG);
630 for (i = 0; i < new_detected; i++) {
634 ret = umc_v12_0_fill_error_record(adev, entries[i], ras_error_status);
636 dev_err(adev->dev, "Fail to fill umc error record, ret:%d\n", ret);
639 radix_tree_tag_clear(ecc_tree, entries[i]->hash_index, UMC_ECC_NEW_DETECTED_TAG);
641 mutex_unlock(&con->umc_ecc_log.lock);
644 struct amdgpu_umc_ras umc_v12_0_ras = {
646 .hw_ops = &umc_v12_0_ras_hw_ops,
647 .ras_late_init = umc_v12_0_ras_late_init,
649 .err_cnt_init = umc_v12_0_err_cnt_init,
650 .query_ras_poison_mode = umc_v12_0_query_ras_poison_mode,
651 .ecc_info_query_ras_error_address = umc_v12_0_query_ras_ecc_err_addr,
652 .check_ecc_err_status = umc_v12_0_check_ecc_err_status,
653 .update_ecc_status = umc_v12_0_update_ecc_status,