2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_ras.h"
25 #include "amdgpu_mca.h"
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
30 static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
33 if (adev->umc.ras->check_ecc_err_status)
34 return adev->umc.ras->check_ecc_err_status(adev,
35 AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
40 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
41 uint64_t mc_status_addr,
42 unsigned long *error_count)
44 uint64_t mc_status = RREG64_PCIE(mc_status_addr);
46 if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
47 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
51 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
52 uint64_t mc_status_addr,
53 unsigned long *error_count)
55 uint64_t mc_status = RREG64_PCIE(mc_status_addr);
57 if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
58 (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
59 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
60 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
61 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
62 REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
66 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
67 uint64_t mc_status_addr)
69 WREG64_PCIE(mc_status_addr, 0x0ULL);
72 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
73 uint64_t mc_status_addr,
74 void *ras_error_status)
76 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
78 amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
79 amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
81 amdgpu_mca_reset_error_count(adev, mc_status_addr);
84 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
87 struct amdgpu_mca_ras_block *ras;
89 if (!adev->mca.mp0.ras)
92 ras = adev->mca.mp0.ras;
94 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
96 dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
100 strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
101 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
102 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
103 adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
108 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
111 struct amdgpu_mca_ras_block *ras;
113 if (!adev->mca.mp1.ras)
116 ras = adev->mca.mp1.ras;
118 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
120 dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
124 strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
125 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
126 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
127 adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
132 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
135 struct amdgpu_mca_ras_block *ras;
137 if (!adev->mca.mpio.ras)
140 ras = adev->mca.mpio.ras;
142 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
144 dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
148 strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
149 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
150 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
151 adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
156 static void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
161 memset(mca_set, 0, sizeof(*mca_set));
162 INIT_LIST_HEAD(&mca_set->list);
165 static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
167 struct mca_bank_node *node;
172 node = kvzalloc(sizeof(*node), GFP_KERNEL);
176 memcpy(&node->entry, entry, sizeof(*entry));
178 INIT_LIST_HEAD(&node->node);
179 list_add_tail(&node->node, &mca_set->list);
181 mca_set->nr_entries++;
186 static int amdgpu_mca_bank_set_merge(struct mca_bank_set *mca_set, struct mca_bank_set *new)
188 struct mca_bank_node *node;
190 list_for_each_entry(node, &new->list, node)
191 amdgpu_mca_bank_set_add_entry(mca_set, &node->entry);
196 static void amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
201 list_del(&node->node);
204 mca_set->nr_entries--;
207 static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
209 struct mca_bank_node *node, *tmp;
211 if (list_empty(&mca_set->list))
214 list_for_each_entry_safe(node, tmp, &mca_set->list, node)
215 amdgpu_mca_bank_set_remove_node(mca_set, node);
218 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
220 struct amdgpu_mca *mca = &adev->mca;
222 mca->mca_funcs = mca_funcs;
225 int amdgpu_mca_init(struct amdgpu_device *adev)
227 struct amdgpu_mca *mca = &adev->mca;
228 struct mca_bank_cache *mca_cache;
231 atomic_set(&mca->ue_update_flag, 0);
233 for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
234 mca_cache = &mca->mca_caches[i];
235 mutex_init(&mca_cache->lock);
236 amdgpu_mca_bank_set_init(&mca_cache->mca_set);
242 void amdgpu_mca_fini(struct amdgpu_device *adev)
244 struct amdgpu_mca *mca = &adev->mca;
245 struct mca_bank_cache *mca_cache;
248 atomic_set(&mca->ue_update_flag, 0);
250 for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
251 mca_cache = &mca->mca_caches[i];
252 amdgpu_mca_bank_set_release(&mca_cache->mca_set);
253 mutex_destroy(&mca_cache->lock);
257 int amdgpu_mca_reset(struct amdgpu_device *adev)
259 amdgpu_mca_fini(adev);
261 return amdgpu_mca_init(adev);
264 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
266 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
268 if (mca_funcs && mca_funcs->mca_set_debug_mode)
269 return mca_funcs->mca_set_debug_mode(adev, enable);
274 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry,
275 struct ras_query_context *qctx)
277 u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
279 RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
280 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
281 idx, entry->regs[MCA_REG_IDX_STATUS]);
282 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
283 idx, entry->regs[MCA_REG_IDX_ADDR]);
284 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
285 idx, entry->regs[MCA_REG_IDX_MISC0]);
286 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
287 idx, entry->regs[MCA_REG_IDX_IPID]);
288 RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
289 idx, entry->regs[MCA_REG_IDX_SYND]);
292 static int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
294 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
299 if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
300 return mca_funcs->mca_get_valid_mca_count(adev, type, count);
305 static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
306 int idx, struct mca_bank_entry *entry)
308 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
311 if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
315 case AMDGPU_MCA_ERROR_TYPE_UE:
316 count = mca_funcs->max_ue_count;
318 case AMDGPU_MCA_ERROR_TYPE_CE:
319 count = mca_funcs->max_ce_count;
328 return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
331 static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgpu_mca_error_type type)
333 struct amdgpu_mca *mca = &adev->mca;
337 * Because the UE Valid MCA count will only be cleared after reset,
338 * in order to avoid repeated counting of the error count,
339 * the aca bank is only updated once during the gpu recovery stage.
341 if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
342 if (amdgpu_ras_intr_triggered())
343 ret = atomic_cmpxchg(&mca->ue_update_flag, 0, 1) == 0;
345 atomic_set(&mca->ue_update_flag, 0);
351 static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set,
352 struct ras_query_context *qctx)
354 struct mca_bank_entry entry;
355 uint32_t count = 0, i;
361 if (!amdgpu_mca_bank_should_update(adev, type))
364 ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
368 for (i = 0; i < count; i++) {
369 memset(&entry, 0, sizeof(entry));
370 ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, &entry);
374 amdgpu_mca_bank_set_add_entry(mca_set, &entry);
376 amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
382 static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
383 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
385 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
387 if (!count || !entry)
390 if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
393 return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
396 static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
397 struct mca_bank_set *mca_set, struct ras_err_data *err_data)
399 struct amdgpu_smuio_mcm_config_info mcm_info;
400 struct mca_bank_node *node, *tmp;
401 struct mca_bank_entry *entry;
408 if (!mca_set->nr_entries)
411 list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
412 entry = &node->entry;
415 ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
416 if (ret && ret != -EOPNOTSUPP)
422 memset(&mcm_info, 0, sizeof(mcm_info));
424 mcm_info.socket_id = entry->info.socket_id;
425 mcm_info.die_id = entry->info.aid;
427 if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
428 amdgpu_ras_error_statistic_ue_count(err_data,
429 &mcm_info, (uint64_t)count);
431 if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
432 amdgpu_ras_error_statistic_de_count(err_data,
433 &mcm_info, (uint64_t)count);
435 amdgpu_ras_error_statistic_ce_count(err_data,
436 &mcm_info, (uint64_t)count);
439 amdgpu_mca_bank_set_remove_node(mca_set, node);
445 static int amdgpu_mca_add_mca_set_to_cache(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *new)
447 struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
450 mutex_lock(&mca_cache->lock);
451 ret = amdgpu_mca_bank_set_merge(&mca_cache->mca_set, new);
452 mutex_unlock(&mca_cache->lock);
457 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
458 struct ras_err_data *err_data, struct ras_query_context *qctx)
460 struct mca_bank_set mca_set;
461 struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
464 amdgpu_mca_bank_set_init(&mca_set);
466 ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, qctx);
468 goto out_mca_release;
470 ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_set, err_data);
472 goto out_mca_release;
474 /* add remain mca bank to mca cache */
475 if (mca_set.nr_entries) {
476 ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set);
478 goto out_mca_release;
481 /* dispatch mca set again if mca cache has valid data */
482 mutex_lock(&mca_cache->lock);
483 if (mca_cache->mca_set.nr_entries)
484 ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_cache->mca_set, err_data);
485 mutex_unlock(&mca_cache->lock);
488 amdgpu_mca_bank_set_release(&mca_set);
493 #if defined(CONFIG_DEBUG_FS)
494 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
496 struct amdgpu_device *adev = (struct amdgpu_device *)data;
499 ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
503 dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off");
508 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
510 int i, idx = entry->idx;
511 int reg_idx_array[] = {
519 seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
520 seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
521 seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
522 idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
524 for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
525 seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
528 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
530 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
531 struct mca_bank_node *node;
532 struct mca_bank_set mca_set;
533 struct ras_query_context qctx;
536 amdgpu_mca_bank_set_init(&mca_set);
538 qctx.evid.event_id = RAS_EVENT_INVALID_ID;
539 ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, &qctx);
541 goto err_free_mca_set;
543 seq_printf(m, "amdgpu smu %s valid mca count: %d\n",
544 type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", mca_set.nr_entries);
546 if (!mca_set.nr_entries)
547 goto err_free_mca_set;
549 list_for_each_entry(node, &mca_set.list, node)
550 mca_dump_entry(m, &node->entry);
552 /* add mca bank to mca bank cache */
553 ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set);
556 amdgpu_mca_bank_set_release(&mca_set);
561 static int mca_dump_ce_show(struct seq_file *m, void *unused)
563 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE);
566 static int mca_dump_ce_open(struct inode *inode, struct file *file)
568 return single_open(file, mca_dump_ce_show, inode->i_private);
571 static const struct file_operations mca_ce_dump_debug_fops = {
572 .owner = THIS_MODULE,
573 .open = mca_dump_ce_open,
576 .release = single_release,
579 static int mca_dump_ue_show(struct seq_file *m, void *unused)
581 return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE);
584 static int mca_dump_ue_open(struct inode *inode, struct file *file)
586 return single_open(file, mca_dump_ue_show, inode->i_private);
589 static const struct file_operations mca_ue_dump_debug_fops = {
590 .owner = THIS_MODULE,
591 .open = mca_dump_ue_open,
594 .release = single_release,
597 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n");
600 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
602 #if defined(CONFIG_DEBUG_FS)
606 debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
607 debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops);
608 debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops);