]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
Merge tag 'input-for-v6.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_mca.c
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ras.h"
24 #include "amdgpu.h"
25 #include "amdgpu_mca.h"
26
27 #include "umc/umc_6_7_0_offset.h"
28 #include "umc/umc_6_7_0_sh_mask.h"
29
30 static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
31                                         uint64_t mc_status)
32 {
33         if (adev->umc.ras->check_ecc_err_status)
34                 return adev->umc.ras->check_ecc_err_status(adev,
35                                 AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
36
37         return false;
38 }
39
40 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
41                                               uint64_t mc_status_addr,
42                                               unsigned long *error_count)
43 {
44         uint64_t mc_status = RREG64_PCIE(mc_status_addr);
45
46         if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
47             REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
48                 *error_count += 1;
49 }
50
51 void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
52                                                 uint64_t mc_status_addr,
53                                                 unsigned long *error_count)
54 {
55         uint64_t mc_status = RREG64_PCIE(mc_status_addr);
56
57         if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
58             (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
59             REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
60             REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
61             REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
62             REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
63                 *error_count += 1;
64 }
65
66 void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
67                                   uint64_t mc_status_addr)
68 {
69         WREG64_PCIE(mc_status_addr, 0x0ULL);
70 }
71
72 void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
73                                       uint64_t mc_status_addr,
74                                       void *ras_error_status)
75 {
76         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
77
78         amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
79         amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
80
81         amdgpu_mca_reset_error_count(adev, mc_status_addr);
82 }
83
84 int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
85 {
86         int err;
87         struct amdgpu_mca_ras_block *ras;
88
89         if (!adev->mca.mp0.ras)
90                 return 0;
91
92         ras = adev->mca.mp0.ras;
93
94         err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
95         if (err) {
96                 dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
97                 return err;
98         }
99
100         strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
101         ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
102         ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
103         adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
104
105         return 0;
106 }
107
108 int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
109 {
110         int err;
111         struct amdgpu_mca_ras_block *ras;
112
113         if (!adev->mca.mp1.ras)
114                 return 0;
115
116         ras = adev->mca.mp1.ras;
117
118         err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
119         if (err) {
120                 dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
121                 return err;
122         }
123
124         strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
125         ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
126         ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
127         adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
128
129         return 0;
130 }
131
132 int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
133 {
134         int err;
135         struct amdgpu_mca_ras_block *ras;
136
137         if (!adev->mca.mpio.ras)
138                 return 0;
139
140         ras = adev->mca.mpio.ras;
141
142         err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
143         if (err) {
144                 dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
145                 return err;
146         }
147
148         strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
149         ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
150         ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
151         adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
152
153         return 0;
154 }
155
156 void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
157 {
158         if (!mca_set)
159                 return;
160
161         memset(mca_set, 0, sizeof(*mca_set));
162         INIT_LIST_HEAD(&mca_set->list);
163 }
164
165 int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
166 {
167         struct mca_bank_node *node;
168
169         if (!entry)
170                 return -EINVAL;
171
172         node = kvzalloc(sizeof(*node), GFP_KERNEL);
173         if (!node)
174                 return -ENOMEM;
175
176         memcpy(&node->entry, entry, sizeof(*entry));
177
178         INIT_LIST_HEAD(&node->node);
179         list_add_tail(&node->node, &mca_set->list);
180
181         mca_set->nr_entries++;
182
183         return 0;
184 }
185
186 void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
187 {
188         struct mca_bank_node *node, *tmp;
189
190         list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
191                 list_del(&node->node);
192                 kvfree(node);
193         }
194 }
195
196 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
197 {
198         struct amdgpu_mca *mca = &adev->mca;
199
200         mca->mca_funcs = mca_funcs;
201 }
202
203 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
204 {
205         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
206
207         if (mca_funcs && mca_funcs->mca_set_debug_mode)
208                 return mca_funcs->mca_set_debug_mode(adev, enable);
209
210         return -EOPNOTSUPP;
211 }
212
213 static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
214 {
215         dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n");
216         dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
217                  idx, entry->regs[MCA_REG_IDX_STATUS]);
218         dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
219                  idx, entry->regs[MCA_REG_IDX_ADDR]);
220         dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
221                  idx, entry->regs[MCA_REG_IDX_MISC0]);
222         dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
223                  idx, entry->regs[MCA_REG_IDX_IPID]);
224         dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
225                  idx, entry->regs[MCA_REG_IDX_SYND]);
226 }
227
228 int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
229 {
230         struct amdgpu_smuio_mcm_config_info mcm_info;
231         struct ras_err_addr err_addr = {0};
232         struct mca_bank_set mca_set;
233         struct mca_bank_node *node;
234         struct mca_bank_entry *entry;
235         uint32_t count;
236         int ret, i = 0;
237
238         amdgpu_mca_bank_set_init(&mca_set);
239
240         ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set);
241         if (ret)
242                 goto out_mca_release;
243
244         list_for_each_entry(node, &mca_set.list, node) {
245                 entry = &node->entry;
246
247                 amdgpu_mca_smu_mca_bank_dump(adev, i++, entry);
248
249                 count = 0;
250                 ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
251                 if (ret)
252                         goto out_mca_release;
253
254                 if (!count)
255                         continue;
256
257                 mcm_info.socket_id = entry->info.socket_id;
258                 mcm_info.die_id = entry->info.aid;
259
260                 if (blk == AMDGPU_RAS_BLOCK__UMC) {
261                         err_addr.err_status = entry->regs[MCA_REG_IDX_STATUS];
262                         err_addr.err_ipid = entry->regs[MCA_REG_IDX_IPID];
263                         err_addr.err_addr = entry->regs[MCA_REG_IDX_ADDR];
264                 }
265
266                 if (type == AMDGPU_MCA_ERROR_TYPE_UE)
267                         amdgpu_ras_error_statistic_ue_count(err_data,
268                                 &mcm_info, &err_addr, (uint64_t)count);
269                 else {
270                         if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
271                                 amdgpu_ras_error_statistic_de_count(err_data,
272                                         &mcm_info, &err_addr, (uint64_t)count);
273                         else
274                                 amdgpu_ras_error_statistic_ce_count(err_data,
275                                         &mcm_info, &err_addr, (uint64_t)count);
276                 }
277         }
278
279 out_mca_release:
280         amdgpu_mca_bank_set_release(&mca_set);
281
282         return ret;
283 }
284
285
286 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
287 {
288         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
289
290         if (!count)
291                 return -EINVAL;
292
293         if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
294                 return mca_funcs->mca_get_valid_mca_count(adev, type, count);
295
296         return -EOPNOTSUPP;
297 }
298
299 int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
300                                             enum amdgpu_mca_error_type type, uint32_t *total)
301 {
302         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
303         struct mca_bank_set mca_set;
304         struct mca_bank_node *node;
305         struct mca_bank_entry *entry;
306         uint32_t count;
307         int ret;
308
309         if (!total)
310                 return -EINVAL;
311
312         if (!mca_funcs)
313                 return -EOPNOTSUPP;
314
315         if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count)
316                 return -EOPNOTSUPP;
317
318         amdgpu_mca_bank_set_init(&mca_set);
319
320         ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set);
321         if (ret)
322                 goto err_mca_set_release;
323
324         *total = 0;
325         list_for_each_entry(node, &mca_set.list, node) {
326                 entry = &node->entry;
327
328                 count = 0;
329                 ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count);
330                 if (ret)
331                         goto err_mca_set_release;
332
333                 *total += count;
334         }
335
336 err_mca_set_release:
337         amdgpu_mca_bank_set_release(&mca_set);
338
339         return ret;
340 }
341
342 int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
343                                          enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
344 {
345         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
346         if (!count || !entry)
347                 return -EINVAL;
348
349         if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
350                 return -EOPNOTSUPP;
351
352
353         return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
354 }
355
356 int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
357                                enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
358 {
359         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
360
361         if (!mca_set)
362                 return -EINVAL;
363
364         if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set)
365                 return -EOPNOTSUPP;
366
367         WARN_ON(!list_empty(&mca_set->list));
368
369         return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set);
370 }
371
372 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
373                                  int idx, struct mca_bank_entry *entry)
374 {
375         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
376         int count;
377
378         if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
379                 return -EOPNOTSUPP;
380
381         switch (type) {
382         case AMDGPU_MCA_ERROR_TYPE_UE:
383                 count = mca_funcs->max_ue_count;
384                 break;
385         case AMDGPU_MCA_ERROR_TYPE_CE:
386                 count = mca_funcs->max_ce_count;
387                 break;
388         default:
389                 return -EINVAL;
390         }
391
392         if (idx >= count)
393                 return -EINVAL;
394
395         return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
396 }
397
398 #if defined(CONFIG_DEBUG_FS)
399 static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
400 {
401         struct amdgpu_device *adev = (struct amdgpu_device *)data;
402         int ret;
403
404         ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
405         if (ret)
406                 return ret;
407
408         dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off");
409
410         return 0;
411 }
412
413 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
414 {
415         int i, idx = entry->idx;
416         int reg_idx_array[] = {
417                 MCA_REG_IDX_STATUS,
418                 MCA_REG_IDX_ADDR,
419                 MCA_REG_IDX_MISC0,
420                 MCA_REG_IDX_IPID,
421                 MCA_REG_IDX_SYND,
422         };
423
424         seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
425         seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
426         seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
427                    idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
428
429         for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
430                 seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
431 }
432
433 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
434 {
435         struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
436         struct mca_bank_entry *entry;
437         uint32_t count = 0;
438         int i, ret;
439
440         ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
441         if (ret)
442                 return ret;
443
444         seq_printf(m, "amdgpu smu %s valid mca count: %d\n",
445                    type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", count);
446
447         if (!count)
448                 return 0;
449
450         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
451         if (!entry)
452                 return -ENOMEM;
453
454         for (i = 0; i < count; i++) {
455                 memset(entry, 0, sizeof(*entry));
456
457                 ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, entry);
458                 if (ret)
459                         goto err_free_entry;
460
461                 mca_dump_entry(m, entry);
462         }
463
464 err_free_entry:
465         kfree(entry);
466
467         return ret;
468 }
469
470 static int mca_dump_ce_show(struct seq_file *m, void *unused)
471 {
472         return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE);
473 }
474
475 static int mca_dump_ce_open(struct inode *inode, struct file *file)
476 {
477         return single_open(file, mca_dump_ce_show, inode->i_private);
478 }
479
480 static const struct file_operations mca_ce_dump_debug_fops = {
481         .owner = THIS_MODULE,
482         .open = mca_dump_ce_open,
483         .read = seq_read,
484         .llseek = seq_lseek,
485         .release = single_release,
486 };
487
488 static int mca_dump_ue_show(struct seq_file *m, void *unused)
489 {
490         return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE);
491 }
492
493 static int mca_dump_ue_open(struct inode *inode, struct file *file)
494 {
495         return single_open(file, mca_dump_ue_show, inode->i_private);
496 }
497
498 static const struct file_operations mca_ue_dump_debug_fops = {
499         .owner = THIS_MODULE,
500         .open = mca_dump_ue_open,
501         .read = seq_read,
502         .llseek = seq_lseek,
503         .release = single_release,
504 };
505
506 DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n");
507 #endif
508
509 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
510 {
511 #if defined(CONFIG_DEBUG_FS)
512         if (!root || amdgpu_ip_version(adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 6))
513                 return;
514
515         debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
516         debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops);
517         debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops);
518 #endif
519 }
520
This page took 0.063801 seconds and 4 git commands to generate.