]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Merge tag 'mediatek-drm-next-6.5' of https://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "nbio_v4_3.h"
38 #include "atom.h"
39 #include "amdgpu_reset.h"
40
41 #ifdef CONFIG_X86_MCE_AMD
42 #include <asm/mce.h>
43
44 static bool notifier_registered;
45 #endif
46 static const char *RAS_FS_NAME = "ras";
47
48 const char *ras_error_string[] = {
49         "none",
50         "parity",
51         "single_correctable",
52         "multi_uncorrectable",
53         "poison",
54 };
55
56 const char *ras_block_string[] = {
57         "umc",
58         "sdma",
59         "gfx",
60         "mmhub",
61         "athub",
62         "pcie_bif",
63         "hdp",
64         "xgmi_wafl",
65         "df",
66         "smn",
67         "sem",
68         "mp0",
69         "mp1",
70         "fuse",
71         "mca",
72         "vcn",
73         "jpeg",
74 };
75
76 const char *ras_mca_block_string[] = {
77         "mca_mp0",
78         "mca_mp1",
79         "mca_mpio",
80         "mca_iohc",
81 };
82
83 struct amdgpu_ras_block_list {
84         /* ras block link */
85         struct list_head node;
86
87         struct amdgpu_ras_block_object *ras_obj;
88 };
89
90 const char *get_ras_block_str(struct ras_common_if *ras_block)
91 {
92         if (!ras_block)
93                 return "NULL";
94
95         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
96                 return "OUT OF RANGE";
97
98         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
99                 return ras_mca_block_string[ras_block->sub_block_index];
100
101         return ras_block_string[ras_block->block];
102 }
103
104 #define ras_block_str(_BLOCK_) \
105         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
106
107 #define ras_err_str(i) (ras_error_string[ffs(i)])
108
109 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
110
111 /* inject address is 52 bits */
112 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
113
114 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
115 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
116
117 enum amdgpu_ras_retire_page_reservation {
118         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
119         AMDGPU_RAS_RETIRE_PAGE_PENDING,
120         AMDGPU_RAS_RETIRE_PAGE_FAULT,
121 };
122
123 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
124
125 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
126                                 uint64_t addr);
127 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
128                                 uint64_t addr);
129 #ifdef CONFIG_X86_MCE_AMD
130 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
131 struct mce_notifier_adev_list {
132         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
133         int num_gpu;
134 };
135 static struct mce_notifier_adev_list mce_adev_list;
136 #endif
137
138 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
139 {
140         if (adev && amdgpu_ras_get_context(adev))
141                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
142 }
143
144 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
145 {
146         if (adev && amdgpu_ras_get_context(adev))
147                 return amdgpu_ras_get_context(adev)->error_query_ready;
148
149         return false;
150 }
151
152 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
153 {
154         struct ras_err_data err_data = {0, 0, 0, NULL};
155         struct eeprom_table_record err_rec;
156
157         if ((address >= adev->gmc.mc_vram_size) ||
158             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
159                 dev_warn(adev->dev,
160                          "RAS WARN: input address 0x%llx is invalid.\n",
161                          address);
162                 return -EINVAL;
163         }
164
165         if (amdgpu_ras_check_bad_page(adev, address)) {
166                 dev_warn(adev->dev,
167                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
168                          address);
169                 return 0;
170         }
171
172         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
173         err_data.err_addr = &err_rec;
174         amdgpu_umc_fill_error_record(&err_data, address,
175                         (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
176
177         if (amdgpu_bad_page_threshold != 0) {
178                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
179                                          err_data.err_addr_cnt);
180                 amdgpu_ras_save_bad_pages(adev, NULL);
181         }
182
183         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
184         dev_warn(adev->dev, "Clear EEPROM:\n");
185         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
186
187         return 0;
188 }
189
190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
191                                         size_t size, loff_t *pos)
192 {
193         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
194         struct ras_query_if info = {
195                 .head = obj->head,
196         };
197         ssize_t s;
198         char val[128];
199
200         if (amdgpu_ras_query_error_status(obj->adev, &info))
201                 return -EINVAL;
202
203         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
204         if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
205             obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
206                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
207                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
208         }
209
210         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211                         "ue", info.ue_count,
212                         "ce", info.ce_count);
213         if (*pos >= s)
214                 return 0;
215
216         s -= *pos;
217         s = min_t(u64, s, size);
218
219
220         if (copy_to_user(buf, &val[*pos], s))
221                 return -EINVAL;
222
223         *pos += s;
224
225         return s;
226 }
227
228 static const struct file_operations amdgpu_ras_debugfs_ops = {
229         .owner = THIS_MODULE,
230         .read = amdgpu_ras_debugfs_read,
231         .write = NULL,
232         .llseek = default_llseek
233 };
234
235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
236 {
237         int i;
238
239         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240                 *block_id = i;
241                 if (strcmp(name, ras_block_string[i]) == 0)
242                         return 0;
243         }
244         return -EINVAL;
245 }
246
247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
248                 const char __user *buf, size_t size,
249                 loff_t *pos, struct ras_debug_if *data)
250 {
251         ssize_t s = min_t(u64, 64, size);
252         char str[65];
253         char block_name[33];
254         char err[9] = "ue";
255         int op = -1;
256         int block_id;
257         uint32_t sub_block;
258         u64 address, value;
259         /* default value is 0 if the mask is not set by user */
260         u32 instance_mask = 0;
261
262         if (*pos)
263                 return -EINVAL;
264         *pos = size;
265
266         memset(str, 0, sizeof(str));
267         memset(data, 0, sizeof(*data));
268
269         if (copy_from_user(str, buf, s))
270                 return -EINVAL;
271
272         if (sscanf(str, "disable %32s", block_name) == 1)
273                 op = 0;
274         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
275                 op = 1;
276         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
277                 op = 2;
278         else if (strstr(str, "retire_page") != NULL)
279                 op = 3;
280         else if (str[0] && str[1] && str[2] && str[3])
281                 /* ascii string, but commands are not matched. */
282                 return -EINVAL;
283
284         if (op != -1) {
285                 if (op == 3) {
286                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
287                             sscanf(str, "%*s %llu", &address) != 1)
288                                 return -EINVAL;
289
290                         data->op = op;
291                         data->inject.address = address;
292
293                         return 0;
294                 }
295
296                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
297                         return -EINVAL;
298
299                 data->head.block = block_id;
300                 /* only ue and ce errors are supported */
301                 if (!memcmp("ue", err, 2))
302                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
303                 else if (!memcmp("ce", err, 2))
304                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
305                 else
306                         return -EINVAL;
307
308                 data->op = op;
309
310                 if (op == 2) {
311                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
312                                    &sub_block, &address, &value, &instance_mask) != 4 &&
313                             sscanf(str, "%*s %*s %*s %u %llu %llu %u",
314                                    &sub_block, &address, &value, &instance_mask) != 4 &&
315                                 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
316                                    &sub_block, &address, &value) != 3 &&
317                             sscanf(str, "%*s %*s %*s %u %llu %llu",
318                                    &sub_block, &address, &value) != 3)
319                                 return -EINVAL;
320                         data->head.sub_block_index = sub_block;
321                         data->inject.address = address;
322                         data->inject.value = value;
323                         data->inject.instance_mask = instance_mask;
324                 }
325         } else {
326                 if (size < sizeof(*data))
327                         return -EINVAL;
328
329                 if (copy_from_user(data, buf, sizeof(*data)))
330                         return -EINVAL;
331         }
332
333         return 0;
334 }
335
336 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
337                                 struct ras_debug_if *data)
338 {
339         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
340         uint32_t mask, inst_mask = data->inject.instance_mask;
341
342         /* no need to set instance mask if there is only one instance */
343         if (num_xcc <= 1 && inst_mask) {
344                 data->inject.instance_mask = 0;
345                 dev_dbg(adev->dev,
346                         "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
347                         inst_mask);
348
349                 return;
350         }
351
352         switch (data->head.block) {
353         case AMDGPU_RAS_BLOCK__GFX:
354                 mask = GENMASK(num_xcc - 1, 0);
355                 break;
356         case AMDGPU_RAS_BLOCK__SDMA:
357                 mask = GENMASK(adev->sdma.num_instances - 1, 0);
358                 break;
359         case AMDGPU_RAS_BLOCK__VCN:
360         case AMDGPU_RAS_BLOCK__JPEG:
361                 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
362                 break;
363         default:
364                 mask = inst_mask;
365                 break;
366         }
367
368         /* remove invalid bits in instance mask */
369         data->inject.instance_mask &= mask;
370         if (inst_mask != data->inject.instance_mask)
371                 dev_dbg(adev->dev,
372                         "Adjust RAS inject mask 0x%x to 0x%x\n",
373                         inst_mask, data->inject.instance_mask);
374 }
375
376 /**
377  * DOC: AMDGPU RAS debugfs control interface
378  *
379  * The control interface accepts struct ras_debug_if which has two members.
380  *
381  * First member: ras_debug_if::head or ras_debug_if::inject.
382  *
383  * head is used to indicate which IP block will be under control.
384  *
385  * head has four members, they are block, type, sub_block_index, name.
386  * block: which IP will be under control.
387  * type: what kind of error will be enabled/disabled/injected.
388  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
389  * name: the name of IP.
390  *
391  * inject has three more members than head, they are address, value and mask.
392  * As their names indicate, inject operation will write the
393  * value to the address.
394  *
395  * The second member: struct ras_debug_if::op.
396  * It has three kinds of operations.
397  *
398  * - 0: disable RAS on the block. Take ::head as its data.
399  * - 1: enable RAS on the block. Take ::head as its data.
400  * - 2: inject errors on the block. Take ::inject as its data.
401  *
402  * How to use the interface?
403  *
404  * In a program
405  *
406  * Copy the struct ras_debug_if in your code and initialize it.
407  * Write the struct to the control interface.
408  *
409  * From shell
410  *
411  * .. code-block:: bash
412  *
413  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
414  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
415  *      echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
416  *
417  * Where N, is the card which you want to affect.
418  *
419  * "disable" requires only the block.
420  * "enable" requires the block and error type.
421  * "inject" requires the block, error type, address, and value.
422  *
423  * The block is one of: umc, sdma, gfx, etc.
424  *      see ras_block_string[] for details
425  *
426  * The error type is one of: ue, ce, where,
427  *      ue is multi-uncorrectable
428  *      ce is single-correctable
429  *
430  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
431  * The address and value are hexadecimal numbers, leading 0x is optional.
432  * The mask means instance mask, is optional, default value is 0x1.
433  *
434  * For instance,
435  *
436  * .. code-block:: bash
437  *
438  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
439  *      echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
440  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
441  *
442  * How to check the result of the operation?
443  *
444  * To check disable/enable, see "ras" features at,
445  * /sys/class/drm/card[0/1/2...]/device/ras/features
446  *
447  * To check inject, see the corresponding error count at,
448  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
449  *
450  * .. note::
451  *      Operations are only allowed on blocks which are supported.
452  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
453  *      to see which blocks support RAS on a particular asic.
454  *
455  */
456 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
457                                              const char __user *buf,
458                                              size_t size, loff_t *pos)
459 {
460         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
461         struct ras_debug_if data;
462         int ret = 0;
463
464         if (!amdgpu_ras_get_error_query_ready(adev)) {
465                 dev_warn(adev->dev, "RAS WARN: error injection "
466                                 "currently inaccessible\n");
467                 return size;
468         }
469
470         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
471         if (ret)
472                 return ret;
473
474         if (data.op == 3) {
475                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
476                 if (!ret)
477                         return size;
478                 else
479                         return ret;
480         }
481
482         if (!amdgpu_ras_is_supported(adev, data.head.block))
483                 return -EINVAL;
484
485         switch (data.op) {
486         case 0:
487                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
488                 break;
489         case 1:
490                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
491                 break;
492         case 2:
493                 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
494                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
495                         dev_warn(adev->dev, "RAS WARN: input address "
496                                         "0x%llx is invalid.",
497                                         data.inject.address);
498                         ret = -EINVAL;
499                         break;
500                 }
501
502                 /* umc ce/ue error injection for a bad page is not allowed */
503                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
504                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
505                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
506                                  "already been marked as bad!\n",
507                                  data.inject.address);
508                         break;
509                 }
510
511                 amdgpu_ras_instance_mask_check(adev, &data);
512
513                 /* data.inject.address is offset instead of absolute gpu address */
514                 ret = amdgpu_ras_error_inject(adev, &data.inject);
515                 break;
516         default:
517                 ret = -EINVAL;
518                 break;
519         }
520
521         if (ret)
522                 return ret;
523
524         return size;
525 }
526
527 /**
528  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
529  *
530  * Some boards contain an EEPROM which is used to persistently store a list of
531  * bad pages which experiences ECC errors in vram.  This interface provides
532  * a way to reset the EEPROM, e.g., after testing error injection.
533  *
534  * Usage:
535  *
536  * .. code-block:: bash
537  *
538  *      echo 1 > ../ras/ras_eeprom_reset
539  *
540  * will reset EEPROM table to 0 entries.
541  *
542  */
543 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
544                                                const char __user *buf,
545                                                size_t size, loff_t *pos)
546 {
547         struct amdgpu_device *adev =
548                 (struct amdgpu_device *)file_inode(f)->i_private;
549         int ret;
550
551         ret = amdgpu_ras_eeprom_reset_table(
552                 &(amdgpu_ras_get_context(adev)->eeprom_control));
553
554         if (!ret) {
555                 /* Something was written to EEPROM.
556                  */
557                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
558                 return size;
559         } else {
560                 return ret;
561         }
562 }
563
564 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
565         .owner = THIS_MODULE,
566         .read = NULL,
567         .write = amdgpu_ras_debugfs_ctrl_write,
568         .llseek = default_llseek
569 };
570
571 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
572         .owner = THIS_MODULE,
573         .read = NULL,
574         .write = amdgpu_ras_debugfs_eeprom_write,
575         .llseek = default_llseek
576 };
577
578 /**
579  * DOC: AMDGPU RAS sysfs Error Count Interface
580  *
581  * It allows the user to read the error count for each IP block on the gpu through
582  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
583  *
584  * It outputs the multiple lines which report the uncorrected (ue) and corrected
585  * (ce) error counts.
586  *
587  * The format of one line is below,
588  *
589  * [ce|ue]: count
590  *
591  * Example:
592  *
593  * .. code-block:: bash
594  *
595  *      ue: 0
596  *      ce: 1
597  *
598  */
599 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
600                 struct device_attribute *attr, char *buf)
601 {
602         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
603         struct ras_query_if info = {
604                 .head = obj->head,
605         };
606
607         if (!amdgpu_ras_get_error_query_ready(obj->adev))
608                 return sysfs_emit(buf, "Query currently inaccessible\n");
609
610         if (amdgpu_ras_query_error_status(obj->adev, &info))
611                 return -EINVAL;
612
613         if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
614             obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
615                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
616                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
617         }
618
619         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
620                           "ce", info.ce_count);
621 }
622
623 /* obj begin */
624
625 #define get_obj(obj) do { (obj)->use++; } while (0)
626 #define alive_obj(obj) ((obj)->use)
627
628 static inline void put_obj(struct ras_manager *obj)
629 {
630         if (obj && (--obj->use == 0))
631                 list_del(&obj->node);
632         if (obj && (obj->use < 0))
633                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
634 }
635
636 /* make one obj and return it. */
637 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
638                 struct ras_common_if *head)
639 {
640         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
641         struct ras_manager *obj;
642
643         if (!adev->ras_enabled || !con)
644                 return NULL;
645
646         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
647                 return NULL;
648
649         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
650                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
651                         return NULL;
652
653                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
654         } else
655                 obj = &con->objs[head->block];
656
657         /* already exist. return obj? */
658         if (alive_obj(obj))
659                 return NULL;
660
661         obj->head = *head;
662         obj->adev = adev;
663         list_add(&obj->node, &con->head);
664         get_obj(obj);
665
666         return obj;
667 }
668
669 /* return an obj equal to head, or the first when head is NULL */
670 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
671                 struct ras_common_if *head)
672 {
673         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
674         struct ras_manager *obj;
675         int i;
676
677         if (!adev->ras_enabled || !con)
678                 return NULL;
679
680         if (head) {
681                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
682                         return NULL;
683
684                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
685                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
686                                 return NULL;
687
688                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
689                 } else
690                         obj = &con->objs[head->block];
691
692                 if (alive_obj(obj))
693                         return obj;
694         } else {
695                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
696                         obj = &con->objs[i];
697                         if (alive_obj(obj))
698                                 return obj;
699                 }
700         }
701
702         return NULL;
703 }
704 /* obj end */
705
706 /* feature ctl begin */
707 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
708                                          struct ras_common_if *head)
709 {
710         return adev->ras_hw_enabled & BIT(head->block);
711 }
712
713 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
714                 struct ras_common_if *head)
715 {
716         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
717
718         return con->features & BIT(head->block);
719 }
720
721 /*
722  * if obj is not created, then create one.
723  * set feature enable flag.
724  */
725 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
726                 struct ras_common_if *head, int enable)
727 {
728         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
729         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
730
731         /* If hardware does not support ras, then do not create obj.
732          * But if hardware support ras, we can create the obj.
733          * Ras framework checks con->hw_supported to see if it need do
734          * corresponding initialization.
735          * IP checks con->support to see if it need disable ras.
736          */
737         if (!amdgpu_ras_is_feature_allowed(adev, head))
738                 return 0;
739
740         if (enable) {
741                 if (!obj) {
742                         obj = amdgpu_ras_create_obj(adev, head);
743                         if (!obj)
744                                 return -EINVAL;
745                 } else {
746                         /* In case we create obj somewhere else */
747                         get_obj(obj);
748                 }
749                 con->features |= BIT(head->block);
750         } else {
751                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
752                         con->features &= ~BIT(head->block);
753                         put_obj(obj);
754                 }
755         }
756
757         return 0;
758 }
759
760 static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
761                 struct ras_common_if *head)
762 {
763         if (amdgpu_ras_is_feature_allowed(adev, head) ||
764                 amdgpu_ras_is_poison_mode_supported(adev))
765                 return 1;
766         else
767                 return 0;
768 }
769
770 /* wrapper of psp_ras_enable_features */
771 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
772                 struct ras_common_if *head, bool enable)
773 {
774         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
775         union ta_ras_cmd_input *info;
776         int ret = 0;
777
778         if (!con)
779                 return -EINVAL;
780
781         if (head->block == AMDGPU_RAS_BLOCK__GFX) {
782                 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
783                 if (!info)
784                         return -ENOMEM;
785
786                 if (!enable) {
787                         info->disable_features = (struct ta_ras_disable_features_input) {
788                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
789                                 .error_type = amdgpu_ras_error_to_ta(head->type),
790                         };
791                 } else {
792                         info->enable_features = (struct ta_ras_enable_features_input) {
793                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
794                                 .error_type = amdgpu_ras_error_to_ta(head->type),
795                         };
796                 }
797         }
798
799         /* Do not enable if it is not allowed. */
800         if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
801                 goto out;
802
803         /* Only enable ras feature operation handle on host side */
804         if (head->block == AMDGPU_RAS_BLOCK__GFX &&
805                 !amdgpu_sriov_vf(adev) &&
806                 !amdgpu_ras_intr_triggered()) {
807                 ret = psp_ras_enable_features(&adev->psp, info, enable);
808                 if (ret) {
809                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
810                                 enable ? "enable":"disable",
811                                 get_ras_block_str(head),
812                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
813                         goto out;
814                 }
815         }
816
817         /* setup the obj */
818         __amdgpu_ras_feature_enable(adev, head, enable);
819 out:
820         if (head->block == AMDGPU_RAS_BLOCK__GFX)
821                 kfree(info);
822         return ret;
823 }
824
825 /* Only used in device probe stage and called only once. */
826 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
827                 struct ras_common_if *head, bool enable)
828 {
829         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
830         int ret;
831
832         if (!con)
833                 return -EINVAL;
834
835         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
836                 if (enable) {
837                         /* There is no harm to issue a ras TA cmd regardless of
838                          * the currecnt ras state.
839                          * If current state == target state, it will do nothing
840                          * But sometimes it requests driver to reset and repost
841                          * with error code -EAGAIN.
842                          */
843                         ret = amdgpu_ras_feature_enable(adev, head, 1);
844                         /* With old ras TA, we might fail to enable ras.
845                          * Log it and just setup the object.
846                          * TODO need remove this WA in the future.
847                          */
848                         if (ret == -EINVAL) {
849                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
850                                 if (!ret)
851                                         dev_info(adev->dev,
852                                                 "RAS INFO: %s setup object\n",
853                                                 get_ras_block_str(head));
854                         }
855                 } else {
856                         /* setup the object then issue a ras TA disable cmd.*/
857                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
858                         if (ret)
859                                 return ret;
860
861                         /* gfx block ras dsiable cmd must send to ras-ta */
862                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
863                                 con->features |= BIT(head->block);
864
865                         ret = amdgpu_ras_feature_enable(adev, head, 0);
866
867                         /* clean gfx block ras features flag */
868                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
869                                 con->features &= ~BIT(head->block);
870                 }
871         } else
872                 ret = amdgpu_ras_feature_enable(adev, head, enable);
873
874         return ret;
875 }
876
877 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
878                 bool bypass)
879 {
880         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
881         struct ras_manager *obj, *tmp;
882
883         list_for_each_entry_safe(obj, tmp, &con->head, node) {
884                 /* bypass psp.
885                  * aka just release the obj and corresponding flags
886                  */
887                 if (bypass) {
888                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
889                                 break;
890                 } else {
891                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
892                                 break;
893                 }
894         }
895
896         return con->features;
897 }
898
899 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
900                 bool bypass)
901 {
902         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
903         int i;
904         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
905
906         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
907                 struct ras_common_if head = {
908                         .block = i,
909                         .type = default_ras_type,
910                         .sub_block_index = 0,
911                 };
912
913                 if (i == AMDGPU_RAS_BLOCK__MCA)
914                         continue;
915
916                 if (bypass) {
917                         /*
918                          * bypass psp. vbios enable ras for us.
919                          * so just create the obj
920                          */
921                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
922                                 break;
923                 } else {
924                         if (amdgpu_ras_feature_enable(adev, &head, 1))
925                                 break;
926                 }
927         }
928
929         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
930                 struct ras_common_if head = {
931                         .block = AMDGPU_RAS_BLOCK__MCA,
932                         .type = default_ras_type,
933                         .sub_block_index = i,
934                 };
935
936                 if (bypass) {
937                         /*
938                          * bypass psp. vbios enable ras for us.
939                          * so just create the obj
940                          */
941                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
942                                 break;
943                 } else {
944                         if (amdgpu_ras_feature_enable(adev, &head, 1))
945                                 break;
946                 }
947         }
948
949         return con->features;
950 }
951 /* feature ctl end */
952
953 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
954                 enum amdgpu_ras_block block)
955 {
956         if (!block_obj)
957                 return -EINVAL;
958
959         if (block_obj->ras_comm.block == block)
960                 return 0;
961
962         return -EINVAL;
963 }
964
965 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
966                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
967 {
968         struct amdgpu_ras_block_list *node, *tmp;
969         struct amdgpu_ras_block_object *obj;
970
971         if (block >= AMDGPU_RAS_BLOCK__LAST)
972                 return NULL;
973
974         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
975                 if (!node->ras_obj) {
976                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
977                         continue;
978                 }
979
980                 obj = node->ras_obj;
981                 if (obj->ras_block_match) {
982                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
983                                 return obj;
984                 } else {
985                         if (amdgpu_ras_block_match_default(obj, block) == 0)
986                                 return obj;
987                 }
988         }
989
990         return NULL;
991 }
992
993 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
994 {
995         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
996         int ret = 0;
997
998         /*
999          * choosing right query method according to
1000          * whether smu support query error information
1001          */
1002         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1003         if (ret == -EOPNOTSUPP) {
1004                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1005                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1006                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1007
1008                 /* umc query_ras_error_address is also responsible for clearing
1009                  * error status
1010                  */
1011                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1012                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1013                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1014         } else if (!ret) {
1015                 if (adev->umc.ras &&
1016                         adev->umc.ras->ecc_info_query_ras_error_count)
1017                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1018
1019                 if (adev->umc.ras &&
1020                         adev->umc.ras->ecc_info_query_ras_error_address)
1021                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1022         }
1023 }
1024
1025 /* query/inject/cure begin */
1026 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
1027                                   struct ras_query_if *info)
1028 {
1029         struct amdgpu_ras_block_object *block_obj = NULL;
1030         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1031         struct ras_err_data err_data = {0, 0, 0, NULL};
1032
1033         if (!obj)
1034                 return -EINVAL;
1035
1036         if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1037                 amdgpu_ras_get_ecc_info(adev, &err_data);
1038         } else {
1039                 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1040                 if (!block_obj || !block_obj->hw_ops)   {
1041                         dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1042                                      get_ras_block_str(&info->head));
1043                         return -EINVAL;
1044                 }
1045
1046                 if (block_obj->hw_ops->query_ras_error_count)
1047                         block_obj->hw_ops->query_ras_error_count(adev, &err_data);
1048
1049                 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1050                     (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1051                     (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1052                                 if (block_obj->hw_ops->query_ras_error_status)
1053                                         block_obj->hw_ops->query_ras_error_status(adev);
1054                         }
1055         }
1056
1057         obj->err_data.ue_count += err_data.ue_count;
1058         obj->err_data.ce_count += err_data.ce_count;
1059
1060         info->ue_count = obj->err_data.ue_count;
1061         info->ce_count = obj->err_data.ce_count;
1062
1063         if (err_data.ce_count) {
1064                 if (adev->smuio.funcs &&
1065                     adev->smuio.funcs->get_socket_id &&
1066                     adev->smuio.funcs->get_die_id) {
1067                         dev_info(adev->dev, "socket: %d, die: %d "
1068                                         "%ld correctable hardware errors "
1069                                         "detected in %s block, no user "
1070                                         "action is needed.\n",
1071                                         adev->smuio.funcs->get_socket_id(adev),
1072                                         adev->smuio.funcs->get_die_id(adev),
1073                                         obj->err_data.ce_count,
1074                                         get_ras_block_str(&info->head));
1075                 } else {
1076                         dev_info(adev->dev, "%ld correctable hardware errors "
1077                                         "detected in %s block, no user "
1078                                         "action is needed.\n",
1079                                         obj->err_data.ce_count,
1080                                         get_ras_block_str(&info->head));
1081                 }
1082         }
1083         if (err_data.ue_count) {
1084                 if (adev->smuio.funcs &&
1085                     adev->smuio.funcs->get_socket_id &&
1086                     adev->smuio.funcs->get_die_id) {
1087                         dev_info(adev->dev, "socket: %d, die: %d "
1088                                         "%ld uncorrectable hardware errors "
1089                                         "detected in %s block\n",
1090                                         adev->smuio.funcs->get_socket_id(adev),
1091                                         adev->smuio.funcs->get_die_id(adev),
1092                                         obj->err_data.ue_count,
1093                                         get_ras_block_str(&info->head));
1094                 } else {
1095                         dev_info(adev->dev, "%ld uncorrectable hardware errors "
1096                                         "detected in %s block\n",
1097                                         obj->err_data.ue_count,
1098                                         get_ras_block_str(&info->head));
1099                 }
1100         }
1101
1102         return 0;
1103 }
1104
1105 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1106                 enum amdgpu_ras_block block)
1107 {
1108         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1109
1110         if (!amdgpu_ras_is_supported(adev, block))
1111                 return -EINVAL;
1112
1113         if (!block_obj || !block_obj->hw_ops)   {
1114                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1115                              ras_block_str(block));
1116                 return -EINVAL;
1117         }
1118
1119         if (block_obj->hw_ops->reset_ras_error_count)
1120                 block_obj->hw_ops->reset_ras_error_count(adev);
1121
1122         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1123             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1124                 if (block_obj->hw_ops->reset_ras_error_status)
1125                         block_obj->hw_ops->reset_ras_error_status(adev);
1126         }
1127
1128         return 0;
1129 }
1130
1131 /* wrapper of psp_ras_trigger_error */
1132 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1133                 struct ras_inject_if *info)
1134 {
1135         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1136         struct ta_ras_trigger_error_input block_info = {
1137                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1138                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1139                 .sub_block_index = info->head.sub_block_index,
1140                 .address = info->address,
1141                 .value = info->value,
1142         };
1143         int ret = -EINVAL;
1144         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1145                                                         info->head.block,
1146                                                         info->head.sub_block_index);
1147
1148         /* inject on guest isn't allowed, return success directly */
1149         if (amdgpu_sriov_vf(adev))
1150                 return 0;
1151
1152         if (!obj)
1153                 return -EINVAL;
1154
1155         if (!block_obj || !block_obj->hw_ops)   {
1156                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1157                              get_ras_block_str(&info->head));
1158                 return -EINVAL;
1159         }
1160
1161         /* Calculate XGMI relative offset */
1162         if (adev->gmc.xgmi.num_physical_nodes > 1) {
1163                 block_info.address =
1164                         amdgpu_xgmi_get_relative_phy_addr(adev,
1165                                                           block_info.address);
1166         }
1167
1168         if (block_obj->hw_ops->ras_error_inject) {
1169                 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1170                         ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1171                 else /* Special ras_error_inject is defined (e.g: xgmi) */
1172                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1173                                                 info->instance_mask);
1174         } else {
1175                 /* default path */
1176                 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1177         }
1178
1179         if (ret)
1180                 dev_err(adev->dev, "ras inject %s failed %d\n",
1181                         get_ras_block_str(&info->head), ret);
1182
1183         return ret;
1184 }
1185
1186 /**
1187  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1188  * @adev: pointer to AMD GPU device
1189  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1190  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1191  * @query_info: pointer to ras_query_if
1192  *
1193  * Return 0 for query success or do nothing, otherwise return an error
1194  * on failures
1195  */
1196 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1197                                                unsigned long *ce_count,
1198                                                unsigned long *ue_count,
1199                                                struct ras_query_if *query_info)
1200 {
1201         int ret;
1202
1203         if (!query_info)
1204                 /* do nothing if query_info is not specified */
1205                 return 0;
1206
1207         ret = amdgpu_ras_query_error_status(adev, query_info);
1208         if (ret)
1209                 return ret;
1210
1211         *ce_count += query_info->ce_count;
1212         *ue_count += query_info->ue_count;
1213
1214         /* some hardware/IP supports read to clear
1215          * no need to explictly reset the err status after the query call */
1216         if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1217             adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1218                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1219                         dev_warn(adev->dev,
1220                                  "Failed to reset error counter and error status\n");
1221         }
1222
1223         return 0;
1224 }
1225
1226 /**
1227  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1228  * @adev: pointer to AMD GPU device
1229  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1230  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1231  * errors.
1232  * @query_info: pointer to ras_query_if if the query request is only for
1233  * specific ip block; if info is NULL, then the qurey request is for
1234  * all the ip blocks that support query ras error counters/status
1235  *
1236  * If set, @ce_count or @ue_count, count and return the corresponding
1237  * error counts in those integer pointers. Return 0 if the device
1238  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1239  */
1240 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1241                                  unsigned long *ce_count,
1242                                  unsigned long *ue_count,
1243                                  struct ras_query_if *query_info)
1244 {
1245         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1246         struct ras_manager *obj;
1247         unsigned long ce, ue;
1248         int ret;
1249
1250         if (!adev->ras_enabled || !con)
1251                 return -EOPNOTSUPP;
1252
1253         /* Don't count since no reporting.
1254          */
1255         if (!ce_count && !ue_count)
1256                 return 0;
1257
1258         ce = 0;
1259         ue = 0;
1260         if (!query_info) {
1261                 /* query all the ip blocks that support ras query interface */
1262                 list_for_each_entry(obj, &con->head, node) {
1263                         struct ras_query_if info = {
1264                                 .head = obj->head,
1265                         };
1266
1267                         ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1268                 }
1269         } else {
1270                 /* query specific ip block */
1271                 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1272         }
1273
1274         if (ret)
1275                 return ret;
1276
1277         if (ce_count)
1278                 *ce_count = ce;
1279
1280         if (ue_count)
1281                 *ue_count = ue;
1282
1283         return 0;
1284 }
1285 /* query/inject/cure end */
1286
1287
1288 /* sysfs begin */
1289
1290 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1291                 struct ras_badpage **bps, unsigned int *count);
1292
1293 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1294 {
1295         switch (flags) {
1296         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1297                 return "R";
1298         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1299                 return "P";
1300         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1301         default:
1302                 return "F";
1303         }
1304 }
1305
1306 /**
1307  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1308  *
1309  * It allows user to read the bad pages of vram on the gpu through
1310  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1311  *
1312  * It outputs multiple lines, and each line stands for one gpu page.
1313  *
1314  * The format of one line is below,
1315  * gpu pfn : gpu page size : flags
1316  *
1317  * gpu pfn and gpu page size are printed in hex format.
1318  * flags can be one of below character,
1319  *
1320  * R: reserved, this gpu page is reserved and not able to use.
1321  *
1322  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1323  * in next window of page_reserve.
1324  *
1325  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1326  *
1327  * Examples:
1328  *
1329  * .. code-block:: bash
1330  *
1331  *      0x00000001 : 0x00001000 : R
1332  *      0x00000002 : 0x00001000 : P
1333  *
1334  */
1335
1336 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1337                 struct kobject *kobj, struct bin_attribute *attr,
1338                 char *buf, loff_t ppos, size_t count)
1339 {
1340         struct amdgpu_ras *con =
1341                 container_of(attr, struct amdgpu_ras, badpages_attr);
1342         struct amdgpu_device *adev = con->adev;
1343         const unsigned int element_size =
1344                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1345         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1346         unsigned int end = div64_ul(ppos + count - 1, element_size);
1347         ssize_t s = 0;
1348         struct ras_badpage *bps = NULL;
1349         unsigned int bps_count = 0;
1350
1351         memset(buf, 0, count);
1352
1353         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1354                 return 0;
1355
1356         for (; start < end && start < bps_count; start++)
1357                 s += scnprintf(&buf[s], element_size + 1,
1358                                 "0x%08x : 0x%08x : %1s\n",
1359                                 bps[start].bp,
1360                                 bps[start].size,
1361                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1362
1363         kfree(bps);
1364
1365         return s;
1366 }
1367
1368 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1369                 struct device_attribute *attr, char *buf)
1370 {
1371         struct amdgpu_ras *con =
1372                 container_of(attr, struct amdgpu_ras, features_attr);
1373
1374         return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1375 }
1376
1377 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1378 {
1379         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1380
1381         sysfs_remove_file_from_group(&adev->dev->kobj,
1382                                 &con->badpages_attr.attr,
1383                                 RAS_FS_NAME);
1384 }
1385
1386 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1387 {
1388         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1389         struct attribute *attrs[] = {
1390                 &con->features_attr.attr,
1391                 NULL
1392         };
1393         struct attribute_group group = {
1394                 .name = RAS_FS_NAME,
1395                 .attrs = attrs,
1396         };
1397
1398         sysfs_remove_group(&adev->dev->kobj, &group);
1399
1400         return 0;
1401 }
1402
1403 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1404                 struct ras_common_if *head)
1405 {
1406         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1407
1408         if (!obj || obj->attr_inuse)
1409                 return -EINVAL;
1410
1411         get_obj(obj);
1412
1413         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1414                 "%s_err_count", head->name);
1415
1416         obj->sysfs_attr = (struct device_attribute){
1417                 .attr = {
1418                         .name = obj->fs_data.sysfs_name,
1419                         .mode = S_IRUGO,
1420                 },
1421                         .show = amdgpu_ras_sysfs_read,
1422         };
1423         sysfs_attr_init(&obj->sysfs_attr.attr);
1424
1425         if (sysfs_add_file_to_group(&adev->dev->kobj,
1426                                 &obj->sysfs_attr.attr,
1427                                 RAS_FS_NAME)) {
1428                 put_obj(obj);
1429                 return -EINVAL;
1430         }
1431
1432         obj->attr_inuse = 1;
1433
1434         return 0;
1435 }
1436
1437 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1438                 struct ras_common_if *head)
1439 {
1440         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1441
1442         if (!obj || !obj->attr_inuse)
1443                 return -EINVAL;
1444
1445         sysfs_remove_file_from_group(&adev->dev->kobj,
1446                                 &obj->sysfs_attr.attr,
1447                                 RAS_FS_NAME);
1448         obj->attr_inuse = 0;
1449         put_obj(obj);
1450
1451         return 0;
1452 }
1453
1454 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1455 {
1456         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1457         struct ras_manager *obj, *tmp;
1458
1459         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1460                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1461         }
1462
1463         if (amdgpu_bad_page_threshold != 0)
1464                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1465
1466         amdgpu_ras_sysfs_remove_feature_node(adev);
1467
1468         return 0;
1469 }
1470 /* sysfs end */
1471
1472 /**
1473  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1474  *
1475  * Normally when there is an uncorrectable error, the driver will reset
1476  * the GPU to recover.  However, in the event of an unrecoverable error,
1477  * the driver provides an interface to reboot the system automatically
1478  * in that event.
1479  *
1480  * The following file in debugfs provides that interface:
1481  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1482  *
1483  * Usage:
1484  *
1485  * .. code-block:: bash
1486  *
1487  *      echo true > .../ras/auto_reboot
1488  *
1489  */
1490 /* debugfs begin */
1491 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1492 {
1493         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1494         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1495         struct dentry     *dir;
1496
1497         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1498         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1499                             &amdgpu_ras_debugfs_ctrl_ops);
1500         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1501                             &amdgpu_ras_debugfs_eeprom_ops);
1502         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1503                            &con->bad_page_cnt_threshold);
1504         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1505         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1506         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1507                             &amdgpu_ras_debugfs_eeprom_size_ops);
1508         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1509                                                        S_IRUGO, dir, adev,
1510                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1511         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1512
1513         /*
1514          * After one uncorrectable error happens, usually GPU recovery will
1515          * be scheduled. But due to the known problem in GPU recovery failing
1516          * to bring GPU back, below interface provides one direct way to
1517          * user to reboot system automatically in such case within
1518          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1519          * will never be called.
1520          */
1521         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1522
1523         /*
1524          * User could set this not to clean up hardware's error count register
1525          * of RAS IPs during ras recovery.
1526          */
1527         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1528                             &con->disable_ras_err_cnt_harvest);
1529         return dir;
1530 }
1531
1532 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1533                                       struct ras_fs_if *head,
1534                                       struct dentry *dir)
1535 {
1536         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1537
1538         if (!obj || !dir)
1539                 return;
1540
1541         get_obj(obj);
1542
1543         memcpy(obj->fs_data.debugfs_name,
1544                         head->debugfs_name,
1545                         sizeof(obj->fs_data.debugfs_name));
1546
1547         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1548                             obj, &amdgpu_ras_debugfs_ops);
1549 }
1550
1551 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1552 {
1553         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1554         struct dentry *dir;
1555         struct ras_manager *obj;
1556         struct ras_fs_if fs_info;
1557
1558         /*
1559          * it won't be called in resume path, no need to check
1560          * suspend and gpu reset status
1561          */
1562         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1563                 return;
1564
1565         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1566
1567         list_for_each_entry(obj, &con->head, node) {
1568                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1569                         (obj->attr_inuse == 1)) {
1570                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1571                                         get_ras_block_str(&obj->head));
1572                         fs_info.head = obj->head;
1573                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1574                 }
1575         }
1576 }
1577
1578 /* debugfs end */
1579
1580 /* ras fs */
1581 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1582                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1583 static DEVICE_ATTR(features, S_IRUGO,
1584                 amdgpu_ras_sysfs_features_read, NULL);
1585 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1586 {
1587         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1588         struct attribute_group group = {
1589                 .name = RAS_FS_NAME,
1590         };
1591         struct attribute *attrs[] = {
1592                 &con->features_attr.attr,
1593                 NULL
1594         };
1595         struct bin_attribute *bin_attrs[] = {
1596                 NULL,
1597                 NULL,
1598         };
1599         int r;
1600
1601         /* add features entry */
1602         con->features_attr = dev_attr_features;
1603         group.attrs = attrs;
1604         sysfs_attr_init(attrs[0]);
1605
1606         if (amdgpu_bad_page_threshold != 0) {
1607                 /* add bad_page_features entry */
1608                 bin_attr_gpu_vram_bad_pages.private = NULL;
1609                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1610                 bin_attrs[0] = &con->badpages_attr;
1611                 group.bin_attrs = bin_attrs;
1612                 sysfs_bin_attr_init(bin_attrs[0]);
1613         }
1614
1615         r = sysfs_create_group(&adev->dev->kobj, &group);
1616         if (r)
1617                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1618
1619         return 0;
1620 }
1621
1622 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1623 {
1624         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1625         struct ras_manager *con_obj, *ip_obj, *tmp;
1626
1627         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1628                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1629                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1630                         if (ip_obj)
1631                                 put_obj(ip_obj);
1632                 }
1633         }
1634
1635         amdgpu_ras_sysfs_remove_all(adev);
1636         return 0;
1637 }
1638 /* ras fs end */
1639
1640 /* ih begin */
1641
1642 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1643  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1644  * register to check whether the interrupt is triggered or not, and properly
1645  * ack the interrupt if it is there
1646  */
1647 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1648 {
1649         /* Fatal error events are handled on host side */
1650         if (amdgpu_sriov_vf(adev))
1651                 return;
1652
1653         if (adev->nbio.ras &&
1654             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1655                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1656
1657         if (adev->nbio.ras &&
1658             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1659                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1660 }
1661
1662 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1663                                 struct amdgpu_iv_entry *entry)
1664 {
1665         bool poison_stat = false;
1666         struct amdgpu_device *adev = obj->adev;
1667         struct amdgpu_ras_block_object *block_obj =
1668                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1669
1670         if (!block_obj)
1671                 return;
1672
1673         /* both query_poison_status and handle_poison_consumption are optional,
1674          * but at least one of them should be implemented if we need poison
1675          * consumption handler
1676          */
1677         if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1678                 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1679                 if (!poison_stat) {
1680                         /* Not poison consumption interrupt, no need to handle it */
1681                         dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1682                                         block_obj->ras_comm.name);
1683
1684                         return;
1685                 }
1686         }
1687
1688         if (!adev->gmc.xgmi.connected_to_cpu)
1689                 amdgpu_umc_poison_handler(adev, false);
1690
1691         if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1692                 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1693
1694         /* gpu reset is fallback for failed and default cases */
1695         if (poison_stat) {
1696                 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1697                                 block_obj->ras_comm.name);
1698                 amdgpu_ras_reset_gpu(adev);
1699         } else {
1700                 amdgpu_gfx_poison_consumption_handler(adev, entry);
1701         }
1702 }
1703
1704 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1705                                 struct amdgpu_iv_entry *entry)
1706 {
1707         dev_info(obj->adev->dev,
1708                 "Poison is created, no user action is needed.\n");
1709 }
1710
1711 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1712                                 struct amdgpu_iv_entry *entry)
1713 {
1714         struct ras_ih_data *data = &obj->ih_data;
1715         struct ras_err_data err_data = {0, 0, 0, NULL};
1716         int ret;
1717
1718         if (!data->cb)
1719                 return;
1720
1721         /* Let IP handle its data, maybe we need get the output
1722          * from the callback to update the error type/count, etc
1723          */
1724         ret = data->cb(obj->adev, &err_data, entry);
1725         /* ue will trigger an interrupt, and in that case
1726          * we need do a reset to recovery the whole system.
1727          * But leave IP do that recovery, here we just dispatch
1728          * the error.
1729          */
1730         if (ret == AMDGPU_RAS_SUCCESS) {
1731                 /* these counts could be left as 0 if
1732                  * some blocks do not count error number
1733                  */
1734                 obj->err_data.ue_count += err_data.ue_count;
1735                 obj->err_data.ce_count += err_data.ce_count;
1736         }
1737 }
1738
1739 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1740 {
1741         struct ras_ih_data *data = &obj->ih_data;
1742         struct amdgpu_iv_entry entry;
1743
1744         while (data->rptr != data->wptr) {
1745                 rmb();
1746                 memcpy(&entry, &data->ring[data->rptr],
1747                                 data->element_size);
1748
1749                 wmb();
1750                 data->rptr = (data->aligned_element_size +
1751                                 data->rptr) % data->ring_size;
1752
1753                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1754                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1755                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1756                         else
1757                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1758                 } else {
1759                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1760                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1761                         else
1762                                 dev_warn(obj->adev->dev,
1763                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1764                 }
1765         }
1766 }
1767
1768 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1769 {
1770         struct ras_ih_data *data =
1771                 container_of(work, struct ras_ih_data, ih_work);
1772         struct ras_manager *obj =
1773                 container_of(data, struct ras_manager, ih_data);
1774
1775         amdgpu_ras_interrupt_handler(obj);
1776 }
1777
1778 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1779                 struct ras_dispatch_if *info)
1780 {
1781         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1782         struct ras_ih_data *data = &obj->ih_data;
1783
1784         if (!obj)
1785                 return -EINVAL;
1786
1787         if (data->inuse == 0)
1788                 return 0;
1789
1790         /* Might be overflow... */
1791         memcpy(&data->ring[data->wptr], info->entry,
1792                         data->element_size);
1793
1794         wmb();
1795         data->wptr = (data->aligned_element_size +
1796                         data->wptr) % data->ring_size;
1797
1798         schedule_work(&data->ih_work);
1799
1800         return 0;
1801 }
1802
1803 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1804                 struct ras_common_if *head)
1805 {
1806         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1807         struct ras_ih_data *data;
1808
1809         if (!obj)
1810                 return -EINVAL;
1811
1812         data = &obj->ih_data;
1813         if (data->inuse == 0)
1814                 return 0;
1815
1816         cancel_work_sync(&data->ih_work);
1817
1818         kfree(data->ring);
1819         memset(data, 0, sizeof(*data));
1820         put_obj(obj);
1821
1822         return 0;
1823 }
1824
1825 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1826                 struct ras_common_if *head)
1827 {
1828         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1829         struct ras_ih_data *data;
1830         struct amdgpu_ras_block_object *ras_obj;
1831
1832         if (!obj) {
1833                 /* in case we registe the IH before enable ras feature */
1834                 obj = amdgpu_ras_create_obj(adev, head);
1835                 if (!obj)
1836                         return -EINVAL;
1837         } else
1838                 get_obj(obj);
1839
1840         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1841
1842         data = &obj->ih_data;
1843         /* add the callback.etc */
1844         *data = (struct ras_ih_data) {
1845                 .inuse = 0,
1846                 .cb = ras_obj->ras_cb,
1847                 .element_size = sizeof(struct amdgpu_iv_entry),
1848                 .rptr = 0,
1849                 .wptr = 0,
1850         };
1851
1852         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1853
1854         data->aligned_element_size = ALIGN(data->element_size, 8);
1855         /* the ring can store 64 iv entries. */
1856         data->ring_size = 64 * data->aligned_element_size;
1857         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1858         if (!data->ring) {
1859                 put_obj(obj);
1860                 return -ENOMEM;
1861         }
1862
1863         /* IH is ready */
1864         data->inuse = 1;
1865
1866         return 0;
1867 }
1868
1869 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1870 {
1871         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1872         struct ras_manager *obj, *tmp;
1873
1874         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1875                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1876         }
1877
1878         return 0;
1879 }
1880 /* ih end */
1881
1882 /* traversal all IPs except NBIO to query error counter */
1883 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1884 {
1885         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1886         struct ras_manager *obj;
1887
1888         if (!adev->ras_enabled || !con)
1889                 return;
1890
1891         list_for_each_entry(obj, &con->head, node) {
1892                 struct ras_query_if info = {
1893                         .head = obj->head,
1894                 };
1895
1896                 /*
1897                  * PCIE_BIF IP has one different isr by ras controller
1898                  * interrupt, the specific ras counter query will be
1899                  * done in that isr. So skip such block from common
1900                  * sync flood interrupt isr calling.
1901                  */
1902                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1903                         continue;
1904
1905                 /*
1906                  * this is a workaround for aldebaran, skip send msg to
1907                  * smu to get ecc_info table due to smu handle get ecc
1908                  * info table failed temporarily.
1909                  * should be removed until smu fix handle ecc_info table.
1910                  */
1911                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1912                         (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1913                         continue;
1914
1915                 amdgpu_ras_query_error_status(adev, &info);
1916
1917                 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1918                     adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1919                     adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1920                         if (amdgpu_ras_reset_error_status(adev, info.head.block))
1921                                 dev_warn(adev->dev, "Failed to reset error counter and error status");
1922                 }
1923         }
1924 }
1925
1926 /* Parse RdRspStatus and WrRspStatus */
1927 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1928                                           struct ras_query_if *info)
1929 {
1930         struct amdgpu_ras_block_object *block_obj;
1931         /*
1932          * Only two block need to query read/write
1933          * RspStatus at current state
1934          */
1935         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1936                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1937                 return;
1938
1939         block_obj = amdgpu_ras_get_ras_block(adev,
1940                                         info->head.block,
1941                                         info->head.sub_block_index);
1942
1943         if (!block_obj || !block_obj->hw_ops) {
1944                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1945                              get_ras_block_str(&info->head));
1946                 return;
1947         }
1948
1949         if (block_obj->hw_ops->query_ras_error_status)
1950                 block_obj->hw_ops->query_ras_error_status(adev);
1951
1952 }
1953
1954 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1955 {
1956         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1957         struct ras_manager *obj;
1958
1959         if (!adev->ras_enabled || !con)
1960                 return;
1961
1962         list_for_each_entry(obj, &con->head, node) {
1963                 struct ras_query_if info = {
1964                         .head = obj->head,
1965                 };
1966
1967                 amdgpu_ras_error_status_query(adev, &info);
1968         }
1969 }
1970
1971 /* recovery begin */
1972
1973 /* return 0 on success.
1974  * caller need free bps.
1975  */
1976 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1977                 struct ras_badpage **bps, unsigned int *count)
1978 {
1979         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1980         struct ras_err_handler_data *data;
1981         int i = 0;
1982         int ret = 0, status;
1983
1984         if (!con || !con->eh_data || !bps || !count)
1985                 return -EINVAL;
1986
1987         mutex_lock(&con->recovery_lock);
1988         data = con->eh_data;
1989         if (!data || data->count == 0) {
1990                 *bps = NULL;
1991                 ret = -EINVAL;
1992                 goto out;
1993         }
1994
1995         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1996         if (!*bps) {
1997                 ret = -ENOMEM;
1998                 goto out;
1999         }
2000
2001         for (; i < data->count; i++) {
2002                 (*bps)[i] = (struct ras_badpage){
2003                         .bp = data->bps[i].retired_page,
2004                         .size = AMDGPU_GPU_PAGE_SIZE,
2005                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2006                 };
2007                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2008                                 data->bps[i].retired_page);
2009                 if (status == -EBUSY)
2010                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2011                 else if (status == -ENOENT)
2012                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2013         }
2014
2015         *count = data->count;
2016 out:
2017         mutex_unlock(&con->recovery_lock);
2018         return ret;
2019 }
2020
2021 static void amdgpu_ras_do_recovery(struct work_struct *work)
2022 {
2023         struct amdgpu_ras *ras =
2024                 container_of(work, struct amdgpu_ras, recovery_work);
2025         struct amdgpu_device *remote_adev = NULL;
2026         struct amdgpu_device *adev = ras->adev;
2027         struct list_head device_list, *device_list_handle =  NULL;
2028
2029         if (!ras->disable_ras_err_cnt_harvest) {
2030                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2031
2032                 /* Build list of devices to query RAS related errors */
2033                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2034                         device_list_handle = &hive->device_list;
2035                 } else {
2036                         INIT_LIST_HEAD(&device_list);
2037                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
2038                         device_list_handle = &device_list;
2039                 }
2040
2041                 list_for_each_entry(remote_adev,
2042                                 device_list_handle, gmc.xgmi.head) {
2043                         amdgpu_ras_query_err_status(remote_adev);
2044                         amdgpu_ras_log_on_err_counter(remote_adev);
2045                 }
2046
2047                 amdgpu_put_xgmi_hive(hive);
2048         }
2049
2050         if (amdgpu_device_should_recover_gpu(ras->adev)) {
2051                 struct amdgpu_reset_context reset_context;
2052                 memset(&reset_context, 0, sizeof(reset_context));
2053
2054                 reset_context.method = AMD_RESET_METHOD_NONE;
2055                 reset_context.reset_req_dev = adev;
2056
2057                 /* Perform full reset in fatal error mode */
2058                 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2059                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2060                 else {
2061                         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2062
2063                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2064                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2065                                 reset_context.method = AMD_RESET_METHOD_MODE2;
2066                         }
2067                 }
2068
2069                 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2070         }
2071         atomic_set(&ras->in_recovery, 0);
2072 }
2073
2074 /* alloc/realloc bps array */
2075 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2076                 struct ras_err_handler_data *data, int pages)
2077 {
2078         unsigned int old_space = data->count + data->space_left;
2079         unsigned int new_space = old_space + pages;
2080         unsigned int align_space = ALIGN(new_space, 512);
2081         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2082
2083         if (!bps) {
2084                 return -ENOMEM;
2085         }
2086
2087         if (data->bps) {
2088                 memcpy(bps, data->bps,
2089                                 data->count * sizeof(*data->bps));
2090                 kfree(data->bps);
2091         }
2092
2093         data->bps = bps;
2094         data->space_left += align_space - old_space;
2095         return 0;
2096 }
2097
2098 /* it deal with vram only. */
2099 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2100                 struct eeprom_table_record *bps, int pages)
2101 {
2102         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2103         struct ras_err_handler_data *data;
2104         int ret = 0;
2105         uint32_t i;
2106
2107         if (!con || !con->eh_data || !bps || pages <= 0)
2108                 return 0;
2109
2110         mutex_lock(&con->recovery_lock);
2111         data = con->eh_data;
2112         if (!data)
2113                 goto out;
2114
2115         for (i = 0; i < pages; i++) {
2116                 if (amdgpu_ras_check_bad_page_unlock(con,
2117                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2118                         continue;
2119
2120                 if (!data->space_left &&
2121                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2122                         ret = -ENOMEM;
2123                         goto out;
2124                 }
2125
2126                 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2127                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2128                         AMDGPU_GPU_PAGE_SIZE);
2129
2130                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2131                 data->count++;
2132                 data->space_left--;
2133         }
2134 out:
2135         mutex_unlock(&con->recovery_lock);
2136
2137         return ret;
2138 }
2139
2140 /*
2141  * write error record array to eeprom, the function should be
2142  * protected by recovery_lock
2143  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2144  */
2145 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2146                 unsigned long *new_cnt)
2147 {
2148         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2149         struct ras_err_handler_data *data;
2150         struct amdgpu_ras_eeprom_control *control;
2151         int save_count;
2152
2153         if (!con || !con->eh_data) {
2154                 if (new_cnt)
2155                         *new_cnt = 0;
2156
2157                 return 0;
2158         }
2159
2160         mutex_lock(&con->recovery_lock);
2161         control = &con->eeprom_control;
2162         data = con->eh_data;
2163         save_count = data->count - control->ras_num_recs;
2164         mutex_unlock(&con->recovery_lock);
2165
2166         if (new_cnt)
2167                 *new_cnt = save_count / adev->umc.retire_unit;
2168
2169         /* only new entries are saved */
2170         if (save_count > 0) {
2171                 if (amdgpu_ras_eeprom_append(control,
2172                                              &data->bps[control->ras_num_recs],
2173                                              save_count)) {
2174                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2175                         return -EIO;
2176                 }
2177
2178                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2179         }
2180
2181         return 0;
2182 }
2183
2184 /*
2185  * read error record array in eeprom and reserve enough space for
2186  * storing new bad pages
2187  */
2188 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2189 {
2190         struct amdgpu_ras_eeprom_control *control =
2191                 &adev->psp.ras_context.ras->eeprom_control;
2192         struct eeprom_table_record *bps;
2193         int ret;
2194
2195         /* no bad page record, skip eeprom access */
2196         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2197                 return 0;
2198
2199         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2200         if (!bps)
2201                 return -ENOMEM;
2202
2203         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2204         if (ret)
2205                 dev_err(adev->dev, "Failed to load EEPROM table records!");
2206         else
2207                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2208
2209         kfree(bps);
2210         return ret;
2211 }
2212
2213 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2214                                 uint64_t addr)
2215 {
2216         struct ras_err_handler_data *data = con->eh_data;
2217         int i;
2218
2219         addr >>= AMDGPU_GPU_PAGE_SHIFT;
2220         for (i = 0; i < data->count; i++)
2221                 if (addr == data->bps[i].retired_page)
2222                         return true;
2223
2224         return false;
2225 }
2226
2227 /*
2228  * check if an address belongs to bad page
2229  *
2230  * Note: this check is only for umc block
2231  */
2232 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2233                                 uint64_t addr)
2234 {
2235         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2236         bool ret = false;
2237
2238         if (!con || !con->eh_data)
2239                 return ret;
2240
2241         mutex_lock(&con->recovery_lock);
2242         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2243         mutex_unlock(&con->recovery_lock);
2244         return ret;
2245 }
2246
2247 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2248                                           uint32_t max_count)
2249 {
2250         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2251
2252         /*
2253          * Justification of value bad_page_cnt_threshold in ras structure
2254          *
2255          * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2256          * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2257          * scenarios accordingly.
2258          *
2259          * Bad page retirement enablement:
2260          *    - If amdgpu_bad_page_threshold = -2,
2261          *      bad_page_cnt_threshold = typical value by formula.
2262          *
2263          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2264          *      max record length in eeprom, use it directly.
2265          *
2266          * Bad page retirement disablement:
2267          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2268          *      functionality is disabled, and bad_page_cnt_threshold will
2269          *      take no effect.
2270          */
2271
2272         if (amdgpu_bad_page_threshold < 0) {
2273                 u64 val = adev->gmc.mc_vram_size;
2274
2275                 do_div(val, RAS_BAD_PAGE_COVER);
2276                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2277                                                   max_count);
2278         } else {
2279                 con->bad_page_cnt_threshold = min_t(int, max_count,
2280                                                     amdgpu_bad_page_threshold);
2281         }
2282 }
2283
2284 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2285 {
2286         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2287         struct ras_err_handler_data **data;
2288         u32  max_eeprom_records_count = 0;
2289         bool exc_err_limit = false;
2290         int ret;
2291
2292         if (!con || amdgpu_sriov_vf(adev))
2293                 return 0;
2294
2295         /* Allow access to RAS EEPROM via debugfs, when the ASIC
2296          * supports RAS and debugfs is enabled, but when
2297          * adev->ras_enabled is unset, i.e. when "ras_enable"
2298          * module parameter is set to 0.
2299          */
2300         con->adev = adev;
2301
2302         if (!adev->ras_enabled)
2303                 return 0;
2304
2305         data = &con->eh_data;
2306         *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2307         if (!*data) {
2308                 ret = -ENOMEM;
2309                 goto out;
2310         }
2311
2312         mutex_init(&con->recovery_lock);
2313         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2314         atomic_set(&con->in_recovery, 0);
2315         con->eeprom_control.bad_channel_bitmap = 0;
2316
2317         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2318         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2319
2320         /* Todo: During test the SMU might fail to read the eeprom through I2C
2321          * when the GPU is pending on XGMI reset during probe time
2322          * (Mostly after second bus reset), skip it now
2323          */
2324         if (adev->gmc.xgmi.pending_reset)
2325                 return 0;
2326         ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2327         /*
2328          * This calling fails when exc_err_limit is true or
2329          * ret != 0.
2330          */
2331         if (exc_err_limit || ret)
2332                 goto free;
2333
2334         if (con->eeprom_control.ras_num_recs) {
2335                 ret = amdgpu_ras_load_bad_pages(adev);
2336                 if (ret)
2337                         goto free;
2338
2339                 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2340
2341                 if (con->update_channel_flag == true) {
2342                         amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2343                         con->update_channel_flag = false;
2344                 }
2345         }
2346
2347 #ifdef CONFIG_X86_MCE_AMD
2348         if ((adev->asic_type == CHIP_ALDEBARAN) &&
2349             (adev->gmc.xgmi.connected_to_cpu))
2350                 amdgpu_register_bad_pages_mca_notifier(adev);
2351 #endif
2352         return 0;
2353
2354 free:
2355         kfree((*data)->bps);
2356         kfree(*data);
2357         con->eh_data = NULL;
2358 out:
2359         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2360
2361         /*
2362          * Except error threshold exceeding case, other failure cases in this
2363          * function would not fail amdgpu driver init.
2364          */
2365         if (!exc_err_limit)
2366                 ret = 0;
2367         else
2368                 ret = -EINVAL;
2369
2370         return ret;
2371 }
2372
2373 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2374 {
2375         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2376         struct ras_err_handler_data *data = con->eh_data;
2377
2378         /* recovery_init failed to init it, fini is useless */
2379         if (!data)
2380                 return 0;
2381
2382         cancel_work_sync(&con->recovery_work);
2383
2384         mutex_lock(&con->recovery_lock);
2385         con->eh_data = NULL;
2386         kfree(data->bps);
2387         kfree(data);
2388         mutex_unlock(&con->recovery_lock);
2389
2390         return 0;
2391 }
2392 /* recovery end */
2393
2394 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2395 {
2396         if (amdgpu_sriov_vf(adev)) {
2397                 switch (adev->ip_versions[MP0_HWIP][0]) {
2398                 case IP_VERSION(13, 0, 2):
2399                         return true;
2400                 default:
2401                         return false;
2402                 }
2403         }
2404
2405         if (adev->asic_type == CHIP_IP_DISCOVERY) {
2406                 switch (adev->ip_versions[MP0_HWIP][0]) {
2407                 case IP_VERSION(13, 0, 0):
2408                 case IP_VERSION(13, 0, 10):
2409                         return true;
2410                 default:
2411                         return false;
2412                 }
2413         }
2414
2415         return adev->asic_type == CHIP_VEGA10 ||
2416                 adev->asic_type == CHIP_VEGA20 ||
2417                 adev->asic_type == CHIP_ARCTURUS ||
2418                 adev->asic_type == CHIP_ALDEBARAN ||
2419                 adev->asic_type == CHIP_SIENNA_CICHLID;
2420 }
2421
2422 /*
2423  * this is workaround for vega20 workstation sku,
2424  * force enable gfx ras, ignore vbios gfx ras flag
2425  * due to GC EDC can not write
2426  */
2427 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2428 {
2429         struct atom_context *ctx = adev->mode_info.atom_context;
2430
2431         if (!ctx)
2432                 return;
2433
2434         if (strnstr(ctx->vbios_version, "D16406",
2435                     sizeof(ctx->vbios_version)) ||
2436                 strnstr(ctx->vbios_version, "D36002",
2437                         sizeof(ctx->vbios_version)))
2438                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2439 }
2440
2441 /*
2442  * check hardware's ras ability which will be saved in hw_supported.
2443  * if hardware does not support ras, we can skip some ras initializtion and
2444  * forbid some ras operations from IP.
2445  * if software itself, say boot parameter, limit the ras ability. We still
2446  * need allow IP do some limited operations, like disable. In such case,
2447  * we have to initialize ras as normal. but need check if operation is
2448  * allowed or not in each function.
2449  */
2450 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2451 {
2452         adev->ras_hw_enabled = adev->ras_enabled = 0;
2453
2454         if (!adev->is_atom_fw ||
2455             !amdgpu_ras_asic_supported(adev))
2456                 return;
2457
2458         if (!adev->gmc.xgmi.connected_to_cpu) {
2459                 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2460                         dev_info(adev->dev, "MEM ECC is active.\n");
2461                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2462                                                    1 << AMDGPU_RAS_BLOCK__DF);
2463                 } else {
2464                         dev_info(adev->dev, "MEM ECC is not presented.\n");
2465                 }
2466
2467                 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2468                         dev_info(adev->dev, "SRAM ECC is active.\n");
2469                         if (!amdgpu_sriov_vf(adev))
2470                                 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2471                                                             1 << AMDGPU_RAS_BLOCK__DF);
2472                         else
2473                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2474                                                                 1 << AMDGPU_RAS_BLOCK__SDMA |
2475                                                                 1 << AMDGPU_RAS_BLOCK__GFX);
2476
2477                         /* VCN/JPEG RAS can be supported on both bare metal and
2478                          * SRIOV environment
2479                          */
2480                         if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2481                             adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2482                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2483                                                         1 << AMDGPU_RAS_BLOCK__JPEG);
2484                         else
2485                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2486                                                         1 << AMDGPU_RAS_BLOCK__JPEG);
2487
2488                         /*
2489                          * XGMI RAS is not supported if xgmi num physical nodes
2490                          * is zero
2491                          */
2492                         if (!adev->gmc.xgmi.num_physical_nodes)
2493                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2494                 } else {
2495                         dev_info(adev->dev, "SRAM ECC is not presented.\n");
2496                 }
2497         } else {
2498                 /* driver only manages a few IP blocks RAS feature
2499                  * when GPU is connected cpu through XGMI */
2500                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2501                                            1 << AMDGPU_RAS_BLOCK__SDMA |
2502                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
2503         }
2504
2505         amdgpu_ras_get_quirks(adev);
2506
2507         /* hw_supported needs to be aligned with RAS block mask. */
2508         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2509
2510         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2511                 adev->ras_hw_enabled & amdgpu_ras_mask;
2512 }
2513
2514 static void amdgpu_ras_counte_dw(struct work_struct *work)
2515 {
2516         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2517                                               ras_counte_delay_work.work);
2518         struct amdgpu_device *adev = con->adev;
2519         struct drm_device *dev = adev_to_drm(adev);
2520         unsigned long ce_count, ue_count;
2521         int res;
2522
2523         res = pm_runtime_get_sync(dev->dev);
2524         if (res < 0)
2525                 goto Out;
2526
2527         /* Cache new values.
2528          */
2529         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2530                 atomic_set(&con->ras_ce_count, ce_count);
2531                 atomic_set(&con->ras_ue_count, ue_count);
2532         }
2533
2534         pm_runtime_mark_last_busy(dev->dev);
2535 Out:
2536         pm_runtime_put_autosuspend(dev->dev);
2537 }
2538
2539 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2540 {
2541         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2542         bool df_poison, umc_poison;
2543
2544         /* poison setting is useless on SRIOV guest */
2545         if (amdgpu_sriov_vf(adev) || !con)
2546                 return;
2547
2548         /* Init poison supported flag, the default value is false */
2549         if (adev->gmc.xgmi.connected_to_cpu) {
2550                 /* enabled by default when GPU is connected to CPU */
2551                 con->poison_supported = true;
2552         } else if (adev->df.funcs &&
2553             adev->df.funcs->query_ras_poison_mode &&
2554             adev->umc.ras &&
2555             adev->umc.ras->query_ras_poison_mode) {
2556                 df_poison =
2557                         adev->df.funcs->query_ras_poison_mode(adev);
2558                 umc_poison =
2559                         adev->umc.ras->query_ras_poison_mode(adev);
2560
2561                 /* Only poison is set in both DF and UMC, we can support it */
2562                 if (df_poison && umc_poison)
2563                         con->poison_supported = true;
2564                 else if (df_poison != umc_poison)
2565                         dev_warn(adev->dev,
2566                                 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2567                                 df_poison, umc_poison);
2568         }
2569 }
2570
2571 int amdgpu_ras_init(struct amdgpu_device *adev)
2572 {
2573         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2574         int r;
2575
2576         if (con)
2577                 return 0;
2578
2579         con = kmalloc(sizeof(struct amdgpu_ras) +
2580                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2581                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2582                         GFP_KERNEL|__GFP_ZERO);
2583         if (!con)
2584                 return -ENOMEM;
2585
2586         con->adev = adev;
2587         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2588         atomic_set(&con->ras_ce_count, 0);
2589         atomic_set(&con->ras_ue_count, 0);
2590
2591         con->objs = (struct ras_manager *)(con + 1);
2592
2593         amdgpu_ras_set_context(adev, con);
2594
2595         amdgpu_ras_check_supported(adev);
2596
2597         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2598                 /* set gfx block ras context feature for VEGA20 Gaming
2599                  * send ras disable cmd to ras ta during ras late init.
2600                  */
2601                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2602                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2603
2604                         return 0;
2605                 }
2606
2607                 r = 0;
2608                 goto release_con;
2609         }
2610
2611         con->update_channel_flag = false;
2612         con->features = 0;
2613         INIT_LIST_HEAD(&con->head);
2614         /* Might need get this flag from vbios. */
2615         con->flags = RAS_DEFAULT_FLAGS;
2616
2617         /* initialize nbio ras function ahead of any other
2618          * ras functions so hardware fatal error interrupt
2619          * can be enabled as early as possible */
2620         switch (adev->ip_versions[NBIO_HWIP][0]) {
2621         case IP_VERSION(7, 4, 0):
2622         case IP_VERSION(7, 4, 1):
2623         case IP_VERSION(7, 4, 4):
2624                 if (!adev->gmc.xgmi.connected_to_cpu)
2625                         adev->nbio.ras = &nbio_v7_4_ras;
2626                 break;
2627         case IP_VERSION(4, 3, 0):
2628                 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2629                         /* unlike other generation of nbio ras,
2630                          * nbio v4_3 only support fatal error interrupt
2631                          * to inform software that DF is freezed due to
2632                          * system fatal error event. driver should not
2633                          * enable nbio ras in such case. Instead,
2634                          * check DF RAS */
2635                         adev->nbio.ras = &nbio_v4_3_ras;
2636                 break;
2637         default:
2638                 /* nbio ras is not available */
2639                 break;
2640         }
2641
2642         /* nbio ras block needs to be enabled ahead of other ras blocks
2643          * to handle fatal error */
2644         r = amdgpu_nbio_ras_sw_init(adev);
2645         if (r)
2646                 return r;
2647
2648         if (adev->nbio.ras &&
2649             adev->nbio.ras->init_ras_controller_interrupt) {
2650                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2651                 if (r)
2652                         goto release_con;
2653         }
2654
2655         if (adev->nbio.ras &&
2656             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2657                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2658                 if (r)
2659                         goto release_con;
2660         }
2661
2662         amdgpu_ras_query_poison_mode(adev);
2663
2664         if (amdgpu_ras_fs_init(adev)) {
2665                 r = -EINVAL;
2666                 goto release_con;
2667         }
2668
2669         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2670                  "hardware ability[%x] ras_mask[%x]\n",
2671                  adev->ras_hw_enabled, adev->ras_enabled);
2672
2673         return 0;
2674 release_con:
2675         amdgpu_ras_set_context(adev, NULL);
2676         kfree(con);
2677
2678         return r;
2679 }
2680
2681 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2682 {
2683         if (adev->gmc.xgmi.connected_to_cpu ||
2684             adev->gmc.is_app_apu)
2685                 return 1;
2686         return 0;
2687 }
2688
2689 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2690                                         struct ras_common_if *ras_block)
2691 {
2692         struct ras_query_if info = {
2693                 .head = *ras_block,
2694         };
2695
2696         if (!amdgpu_persistent_edc_harvesting_supported(adev))
2697                 return 0;
2698
2699         if (amdgpu_ras_query_error_status(adev, &info) != 0)
2700                 DRM_WARN("RAS init harvest failure");
2701
2702         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2703                 DRM_WARN("RAS init harvest reset failure");
2704
2705         return 0;
2706 }
2707
2708 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2709 {
2710        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2711
2712        if (!con)
2713                return false;
2714
2715        return con->poison_supported;
2716 }
2717
2718 /* helper function to handle common stuff in ip late init phase */
2719 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2720                          struct ras_common_if *ras_block)
2721 {
2722         struct amdgpu_ras_block_object *ras_obj = NULL;
2723         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2724         struct ras_query_if *query_info;
2725         unsigned long ue_count, ce_count;
2726         int r;
2727
2728         /* disable RAS feature per IP block if it is not supported */
2729         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2730                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2731                 return 0;
2732         }
2733
2734         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2735         if (r) {
2736                 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2737                         /* in resume phase, if fail to enable ras,
2738                          * clean up all ras fs nodes, and disable ras */
2739                         goto cleanup;
2740                 } else
2741                         return r;
2742         }
2743
2744         /* check for errors on warm reset edc persisant supported ASIC */
2745         amdgpu_persistent_edc_harvesting(adev, ras_block);
2746
2747         /* in resume phase, no need to create ras fs node */
2748         if (adev->in_suspend || amdgpu_in_reset(adev))
2749                 return 0;
2750
2751         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2752         if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2753             (ras_obj->hw_ops->query_poison_status ||
2754             ras_obj->hw_ops->handle_poison_consumption))) {
2755                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2756                 if (r)
2757                         goto cleanup;
2758         }
2759
2760         r = amdgpu_ras_sysfs_create(adev, ras_block);
2761         if (r)
2762                 goto interrupt;
2763
2764         /* Those are the cached values at init.
2765          */
2766         query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
2767         if (!query_info)
2768                 return -ENOMEM;
2769         memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2770
2771         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2772                 atomic_set(&con->ras_ce_count, ce_count);
2773                 atomic_set(&con->ras_ue_count, ue_count);
2774         }
2775
2776         kfree(query_info);
2777         return 0;
2778
2779 interrupt:
2780         if (ras_obj->ras_cb)
2781                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2782 cleanup:
2783         amdgpu_ras_feature_enable(adev, ras_block, 0);
2784         return r;
2785 }
2786
2787 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2788                          struct ras_common_if *ras_block)
2789 {
2790         return amdgpu_ras_block_late_init(adev, ras_block);
2791 }
2792
2793 /* helper function to remove ras fs node and interrupt handler */
2794 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2795                           struct ras_common_if *ras_block)
2796 {
2797         struct amdgpu_ras_block_object *ras_obj;
2798         if (!ras_block)
2799                 return;
2800
2801         amdgpu_ras_sysfs_remove(adev, ras_block);
2802
2803         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2804         if (ras_obj->ras_cb)
2805                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2806 }
2807
2808 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2809                           struct ras_common_if *ras_block)
2810 {
2811         return amdgpu_ras_block_late_fini(adev, ras_block);
2812 }
2813
2814 /* do some init work after IP late init as dependence.
2815  * and it runs in resume/gpu reset/booting up cases.
2816  */
2817 void amdgpu_ras_resume(struct amdgpu_device *adev)
2818 {
2819         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2820         struct ras_manager *obj, *tmp;
2821
2822         if (!adev->ras_enabled || !con) {
2823                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2824                 amdgpu_release_ras_context(adev);
2825
2826                 return;
2827         }
2828
2829         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2830                 /* Set up all other IPs which are not implemented. There is a
2831                  * tricky thing that IP's actual ras error type should be
2832                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2833                  * ERROR_NONE make sense anyway.
2834                  */
2835                 amdgpu_ras_enable_all_features(adev, 1);
2836
2837                 /* We enable ras on all hw_supported block, but as boot
2838                  * parameter might disable some of them and one or more IP has
2839                  * not implemented yet. So we disable them on behalf.
2840                  */
2841                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2842                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2843                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2844                                 /* there should be no any reference. */
2845                                 WARN_ON(alive_obj(obj));
2846                         }
2847                 }
2848         }
2849 }
2850
2851 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2852 {
2853         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2854
2855         if (!adev->ras_enabled || !con)
2856                 return;
2857
2858         amdgpu_ras_disable_all_features(adev, 0);
2859         /* Make sure all ras objects are disabled. */
2860         if (con->features)
2861                 amdgpu_ras_disable_all_features(adev, 1);
2862 }
2863
2864 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2865 {
2866         struct amdgpu_ras_block_list *node, *tmp;
2867         struct amdgpu_ras_block_object *obj;
2868         int r;
2869
2870         /* Guest side doesn't need init ras feature */
2871         if (amdgpu_sriov_vf(adev))
2872                 return 0;
2873
2874         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2875                 if (!node->ras_obj) {
2876                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2877                         continue;
2878                 }
2879
2880                 obj = node->ras_obj;
2881                 if (obj->ras_late_init) {
2882                         r = obj->ras_late_init(adev, &obj->ras_comm);
2883                         if (r) {
2884                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2885                                         obj->ras_comm.name, r);
2886                                 return r;
2887                         }
2888                 } else
2889                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2890         }
2891
2892         return 0;
2893 }
2894
2895 /* do some fini work before IP fini as dependence */
2896 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2897 {
2898         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2899
2900         if (!adev->ras_enabled || !con)
2901                 return 0;
2902
2903
2904         /* Need disable ras on all IPs here before ip [hw/sw]fini */
2905         if (con->features)
2906                 amdgpu_ras_disable_all_features(adev, 0);
2907         amdgpu_ras_recovery_fini(adev);
2908         return 0;
2909 }
2910
2911 int amdgpu_ras_fini(struct amdgpu_device *adev)
2912 {
2913         struct amdgpu_ras_block_list *ras_node, *tmp;
2914         struct amdgpu_ras_block_object *obj = NULL;
2915         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2916
2917         if (!adev->ras_enabled || !con)
2918                 return 0;
2919
2920         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2921                 if (ras_node->ras_obj) {
2922                         obj = ras_node->ras_obj;
2923                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2924                             obj->ras_fini)
2925                                 obj->ras_fini(adev, &obj->ras_comm);
2926                         else
2927                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2928                 }
2929
2930                 /* Clear ras blocks from ras_list and free ras block list node */
2931                 list_del(&ras_node->node);
2932                 kfree(ras_node);
2933         }
2934
2935         amdgpu_ras_fs_fini(adev);
2936         amdgpu_ras_interrupt_remove_all(adev);
2937
2938         WARN(con->features, "Feature mask is not cleared");
2939
2940         if (con->features)
2941                 amdgpu_ras_disable_all_features(adev, 1);
2942
2943         cancel_delayed_work_sync(&con->ras_counte_delay_work);
2944
2945         amdgpu_ras_set_context(adev, NULL);
2946         kfree(con);
2947
2948         return 0;
2949 }
2950
2951 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2952 {
2953         amdgpu_ras_check_supported(adev);
2954         if (!adev->ras_hw_enabled)
2955                 return;
2956
2957         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2958                 dev_info(adev->dev, "uncorrectable hardware error"
2959                         "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2960
2961                 amdgpu_ras_reset_gpu(adev);
2962         }
2963 }
2964
2965 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2966 {
2967         if (adev->asic_type == CHIP_VEGA20 &&
2968             adev->pm.fw_version <= 0x283400) {
2969                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2970                                 amdgpu_ras_intr_triggered();
2971         }
2972
2973         return false;
2974 }
2975
2976 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2977 {
2978         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2979
2980         if (!con)
2981                 return;
2982
2983         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2984                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2985                 amdgpu_ras_set_context(adev, NULL);
2986                 kfree(con);
2987         }
2988 }
2989
2990 #ifdef CONFIG_X86_MCE_AMD
2991 static struct amdgpu_device *find_adev(uint32_t node_id)
2992 {
2993         int i;
2994         struct amdgpu_device *adev = NULL;
2995
2996         for (i = 0; i < mce_adev_list.num_gpu; i++) {
2997                 adev = mce_adev_list.devs[i];
2998
2999                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3000                     adev->gmc.xgmi.physical_node_id == node_id)
3001                         break;
3002                 adev = NULL;
3003         }
3004
3005         return adev;
3006 }
3007
3008 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
3009 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
3010 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3011 #define GPU_ID_OFFSET           8
3012
3013 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3014                                     unsigned long val, void *data)
3015 {
3016         struct mce *m = (struct mce *)data;
3017         struct amdgpu_device *adev = NULL;
3018         uint32_t gpu_id = 0;
3019         uint32_t umc_inst = 0, ch_inst = 0;
3020
3021         /*
3022          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3023          * and error occurred in DramECC (Extended error code = 0) then only
3024          * process the error, else bail out.
3025          */
3026         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3027                     (XEC(m->status, 0x3f) == 0x0)))
3028                 return NOTIFY_DONE;
3029
3030         /*
3031          * If it is correctable error, return.
3032          */
3033         if (mce_is_correctable(m))
3034                 return NOTIFY_OK;
3035
3036         /*
3037          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3038          */
3039         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3040
3041         adev = find_adev(gpu_id);
3042         if (!adev) {
3043                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3044                                                                 gpu_id);
3045                 return NOTIFY_DONE;
3046         }
3047
3048         /*
3049          * If it is uncorrectable error, then find out UMC instance and
3050          * channel index.
3051          */
3052         umc_inst = GET_UMC_INST(m->ipid);
3053         ch_inst = GET_CHAN_INDEX(m->ipid);
3054
3055         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3056                              umc_inst, ch_inst);
3057
3058         if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3059                 return NOTIFY_OK;
3060         else
3061                 return NOTIFY_DONE;
3062 }
3063
3064 static struct notifier_block amdgpu_bad_page_nb = {
3065         .notifier_call  = amdgpu_bad_page_notifier,
3066         .priority       = MCE_PRIO_UC,
3067 };
3068
3069 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3070 {
3071         /*
3072          * Add the adev to the mce_adev_list.
3073          * During mode2 reset, amdgpu device is temporarily
3074          * removed from the mgpu_info list which can cause
3075          * page retirement to fail.
3076          * Use this list instead of mgpu_info to find the amdgpu
3077          * device on which the UMC error was reported.
3078          */
3079         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3080
3081         /*
3082          * Register the x86 notifier only once
3083          * with MCE subsystem.
3084          */
3085         if (notifier_registered == false) {
3086                 mce_register_decode_chain(&amdgpu_bad_page_nb);
3087                 notifier_registered = true;
3088         }
3089 }
3090 #endif
3091
3092 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3093 {
3094         if (!adev)
3095                 return NULL;
3096
3097         return adev->psp.ras_context.ras;
3098 }
3099
3100 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3101 {
3102         if (!adev)
3103                 return -EINVAL;
3104
3105         adev->psp.ras_context.ras = ras_con;
3106         return 0;
3107 }
3108
3109 /* check if ras is supported on block, say, sdma, gfx */
3110 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3111                 unsigned int block)
3112 {
3113         int ret = 0;
3114         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3115
3116         if (block >= AMDGPU_RAS_BLOCK_COUNT)
3117                 return 0;
3118
3119         ret = ras && (adev->ras_enabled & (1 << block));
3120
3121         /* For the special asic with mem ecc enabled but sram ecc
3122          * not enabled, even if the ras block is not supported on
3123          * .ras_enabled, if the asic supports poison mode and the
3124          * ras block has ras configuration, it can be considered
3125          * that the ras block supports ras function.
3126          */
3127         if (!ret &&
3128             amdgpu_ras_is_poison_mode_supported(adev) &&
3129             amdgpu_ras_get_ras_block(adev, block, 0))
3130                 ret = 1;
3131
3132         return ret;
3133 }
3134
3135 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3136 {
3137         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3138
3139         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3140                 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3141         return 0;
3142 }
3143
3144
3145 /* Register each ip ras block into amdgpu ras */
3146 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3147                 struct amdgpu_ras_block_object *ras_block_obj)
3148 {
3149         struct amdgpu_ras_block_list *ras_node;
3150         if (!adev || !ras_block_obj)
3151                 return -EINVAL;
3152
3153         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3154         if (!ras_node)
3155                 return -ENOMEM;
3156
3157         INIT_LIST_HEAD(&ras_node->node);
3158         ras_node->ras_obj = ras_block_obj;
3159         list_add_tail(&ras_node->node, &adev->ras_list);
3160
3161         return 0;
3162 }
3163
3164 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3165 {
3166         if (!err_type_name)
3167                 return;
3168
3169         switch (err_type) {
3170         case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3171                 sprintf(err_type_name, "correctable");
3172                 break;
3173         case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3174                 sprintf(err_type_name, "uncorrectable");
3175                 break;
3176         default:
3177                 sprintf(err_type_name, "unknown");
3178                 break;
3179         }
3180 }
3181
3182 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3183                                          const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3184                                          uint32_t instance,
3185                                          uint32_t *memory_id)
3186 {
3187         uint32_t err_status_lo_data, err_status_lo_offset;
3188
3189         if (!reg_entry)
3190                 return false;
3191
3192         err_status_lo_offset =
3193                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3194                                             reg_entry->seg_lo, reg_entry->reg_lo);
3195         err_status_lo_data = RREG32(err_status_lo_offset);
3196
3197         if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3198             !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3199                 return false;
3200
3201         *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3202
3203         return true;
3204 }
3205
3206 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3207                                        const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3208                                        uint32_t instance,
3209                                        unsigned long *err_cnt)
3210 {
3211         uint32_t err_status_hi_data, err_status_hi_offset;
3212
3213         if (!reg_entry)
3214                 return false;
3215
3216         err_status_hi_offset =
3217                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3218                                             reg_entry->seg_hi, reg_entry->reg_hi);
3219         err_status_hi_data = RREG32(err_status_hi_offset);
3220
3221         if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3222             !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3223                 /* keep the check here in case we need to refer to the result later */
3224                 dev_dbg(adev->dev, "Invalid err_info field\n");
3225
3226         /* read err count */
3227         *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3228
3229         return true;
3230 }
3231
3232 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3233                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
3234                                            uint32_t reg_list_size,
3235                                            const struct amdgpu_ras_memory_id_entry *mem_list,
3236                                            uint32_t mem_list_size,
3237                                            uint32_t instance,
3238                                            uint32_t err_type,
3239                                            unsigned long *err_count)
3240 {
3241         uint32_t memory_id;
3242         unsigned long err_cnt;
3243         char err_type_name[16];
3244         uint32_t i, j;
3245
3246         for (i = 0; i < reg_list_size; i++) {
3247                 /* query memory_id from err_status_lo */
3248                 if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
3249                                                          instance, &memory_id))
3250                         continue;
3251
3252                 /* query err_cnt from err_status_hi */
3253                 if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
3254                                                        instance, &err_cnt) ||
3255                     !err_cnt)
3256                         continue;
3257
3258                 *err_count += err_cnt;
3259
3260                 /* log the errors */
3261                 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3262                 if (!mem_list) {
3263                         /* memory_list is not supported */
3264                         dev_info(adev->dev,
3265                                  "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3266                                  err_cnt, err_type_name,
3267                                  reg_list[i].block_name,
3268                                  instance, memory_id);
3269                 } else {
3270                         for (j = 0; j < mem_list_size; j++) {
3271                                 if (memory_id == mem_list[j].memory_id) {
3272                                         dev_info(adev->dev,
3273                                                  "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3274                                                  err_cnt, err_type_name,
3275                                                  reg_list[i].block_name,
3276                                                  instance, mem_list[j].name);
3277                                         break;
3278                                 }
3279                         }
3280                 }
3281         }
3282 }
3283
3284 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3285                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
3286                                            uint32_t reg_list_size,
3287                                            uint32_t instance)
3288 {
3289         uint32_t err_status_lo_offset, err_status_hi_offset;
3290         uint32_t i;
3291
3292         for (i = 0; i < reg_list_size; i++) {
3293                 err_status_lo_offset =
3294                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3295                                                     reg_list[i].seg_lo, reg_list[i].reg_lo);
3296                 err_status_hi_offset =
3297                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3298                                                     reg_list[i].seg_hi, reg_list[i].reg_hi);
3299                 WREG32(err_status_lo_offset, 0);
3300                 WREG32(err_status_hi_offset, 0);
3301         }
3302 }
This page took 0.231616 seconds and 4 git commands to generate.