2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_ras.h"
25 #include "amdgpu_mca.h"
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
30 static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
33 if (adev->umc.ras->check_ecc_err_status)
34 return adev->umc.ras->check_ecc_err_status(adev,
35 AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
40 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
41 uint64_t mc_status_addr,
42 unsigned long *error_count)
44 uint64_t mc_status = RREG64_PCIE(mc_status_addr);
46 if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
47 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
51 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
52 uint64_t mc_status_addr,
53 unsigned long *error_count)
55 uint64_t mc_status = RREG64_PCIE(mc_status_addr);
57 if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
58 (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
59 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
60 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
61 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
62 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
66 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
67 uint64_t mc_status_addr)
69 WREG64_PCIE(mc_status_addr, 0x0ULL);
72 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
73 uint64_t mc_status_addr,
74 void *ras_error_status)
76 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
78 amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
79 amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
81 amdgpu_mca_reset_error_count(adev, mc_status_addr);
84 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
87 struct amdgpu_mca_ras_block *ras;
89 if (!adev->mca.mp0.ras)
92 ras = adev->mca.mp0.ras;
94 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
96 dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
100 strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
101 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
102 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
103 adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
108 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
111 struct amdgpu_mca_ras_block *ras;
113 if (!adev->mca.mp1.ras)
116 ras = adev->mca.mp1.ras;
118 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
120 dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
124 strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
125 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
126 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
127 adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
132 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
135 struct amdgpu_mca_ras_block *ras;
137 if (!adev->mca.mpio.ras)
140 ras = adev->mca.mpio.ras;
142 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
144 dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
148 strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
149 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
150 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
151 adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
156 void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
161 memset(mca_set, 0, sizeof(*mca_set));
162 INIT_LIST_HEAD(&mca_set->list);
165 int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
167 struct mca_bank_node *node;
172 node = kvzalloc(sizeof(*node), GFP_KERNEL);
176 memcpy(&node->entry, entry, sizeof(*entry));
178 INIT_LIST_HEAD(&node->node);
179 list_add_tail(&node->node, &mca_set->list);
181 mca_set->nr_entries++;
186 void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
188 struct mca_bank_node *node, *tmp;
190 list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
191 list_del(&node->node);
196 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
198 struct amdgpu_mca *mca = &adev->mca;
200 mca->mca_funcs = mca_funcs;
203 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
205 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
207 if (mca_funcs && mca_funcs->mca_set_debug_mode)
208 return mca_funcs->mca_set_debug_mode(adev, enable);
213 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry,
214 struct ras_query_context *qctx)
216 u64 event_id = qctx->event_id;
218 RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
219 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
220 idx, entry->regs[MCA_REG_IDX_STATUS]);
221 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
222 idx, entry->regs[MCA_REG_IDX_ADDR]);
223 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
224 idx, entry->regs[MCA_REG_IDX_MISC0]);
225 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
226 idx, entry->regs[MCA_REG_IDX_IPID]);
227 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
228 idx, entry->regs[MCA_REG_IDX_SYND]);
231 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
232 struct ras_err_data *err_data, struct ras_query_context *qctx)
234 struct amdgpu_smuio_mcm_config_info mcm_info;
235 struct ras_err_addr err_addr = {0};
236 struct mca_bank_set mca_set;
237 struct mca_bank_node *node;
238 struct mca_bank_entry *entry;
242 amdgpu_mca_bank_set_init(&mca_set);
244 ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set);
246 goto out_mca_release;
248 list_for_each_entry(node, &mca_set.list, node) {
249 entry = &node->entry;
251 amdgpu_mca_smu_mca_bank_dump(adev, i++, entry, qctx);
254 ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
256 goto out_mca_release;
261 mcm_info.socket_id = entry->info.socket_id;
262 mcm_info.die_id = entry->info.aid;
264 if (blk == AMDGPU_RAS_BLOCK__UMC) {
265 err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
266 err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
267 err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
270 if (type == AMDGPU_MCA_ERROR_TYPE_UE)
271 amdgpu_ras_error_statistic_ue_count(err_data,
272 &mcm_info, &err_addr, (uint64_t)count);
274 if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
275 amdgpu_ras_error_statistic_de_count(err_data,
276 &mcm_info, &err_addr, (uint64_t)count);
278 amdgpu_ras_error_statistic_ce_count(err_data,
279 &mcm_info, &err_addr, (uint64_t)count);
284 amdgpu_mca_bank_set_release(&mca_set);
290 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
292 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
297 if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
298 return mca_funcs->mca_get_valid_mca_count(adev, type, count);
303 int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
304 enum amdgpu_mca_error_type type, uint32_t *total)
306 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
307 struct mca_bank_set mca_set;
308 struct mca_bank_node *node;
309 struct mca_bank_entry *entry;
319 if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count)
322 amdgpu_mca_bank_set_init(&mca_set);
324 ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set);
326 goto err_mca_set_release;
329 list_for_each_entry(node, &mca_set.list, node) {
330 entry = &node->entry;
333 ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count);
335 goto err_mca_set_release;
341 amdgpu_mca_bank_set_release(&mca_set);
346 int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
347 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
349 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
350 if (!count || !entry)
353 if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
357 return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
360 int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
361 enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
363 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
368 if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set)
371 WARN_ON(!list_empty(&mca_set->list));
373 return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set);
376 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
377 int idx, struct mca_bank_entry *entry)
379 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
382 if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
386 case AMDGPU_MCA_ERROR_TYPE_UE:
387 count = mca_funcs->max_ue_count;
389 case AMDGPU_MCA_ERROR_TYPE_CE:
390 count = mca_funcs->max_ce_count;
399 return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
402 #if defined(CONFIG_DEBUG_FS)
403 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
405 struct amdgpu_device *adev = (struct amdgpu_device *)data;
408 ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
412 dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off");
417 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
419 int i, idx = entry->idx;
420 int reg_idx_array[] = {
428 seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
429 seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
430 seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
431 idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
433 for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
434 seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
437 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
439 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
440 struct mca_bank_entry *entry;
444 ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
448 seq_printf(m, "amdgpu smu %s valid mca count: %d\n",
449 type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count);
454 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
458 for (i = 0; i < count; i++) {
459 memset(entry, 0, sizeof(*entry));
461 ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry);
465 mca_dump_entry(m, entry);
474 static int mca_dump_ce_show(struct seq_file *m, void *unused)
476 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE);
479 static int mca_dump_ce_open(struct inode *inode, struct file *file)
481 return single_open(file, mca_dump_ce_show, inode->i_private);
484 static const struct file_operations mca_ce_dump_debug_fops = {
485 .owner = THIS_MODULE,
486 .open = mca_dump_ce_open,
489 .release = single_release,
492 static int mca_dump_ue_show(struct seq_file *m, void *unused)
494 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE);
497 static int mca_dump_ue_open(struct inode *inode, struct file *file)
499 return single_open(file, mca_dump_ue_show, inode->i_private);
502 static const struct file_operations mca_ue_dump_debug_fops = {
503 .owner = THIS_MODULE,
504 .open = mca_dump_ue_open,
507 .release = single_release,
510 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n");
513 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
515 #if defined(CONFIG_DEBUG_FS)
516 if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6))
519 debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
520 debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops);
521 debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops);