]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "atom.h"
38 #ifdef CONFIG_X86_MCE_AMD
39 #include <asm/mce.h>
40
41 static bool notifier_registered;
42 #endif
43 static const char *RAS_FS_NAME = "ras";
44
45 const char *ras_error_string[] = {
46         "none",
47         "parity",
48         "single_correctable",
49         "multi_uncorrectable",
50         "poison",
51 };
52
53 const char *ras_block_string[] = {
54         "umc",
55         "sdma",
56         "gfx",
57         "mmhub",
58         "athub",
59         "pcie_bif",
60         "hdp",
61         "xgmi_wafl",
62         "df",
63         "smn",
64         "sem",
65         "mp0",
66         "mp1",
67         "fuse",
68         "mca",
69         "vcn",
70         "jpeg",
71 };
72
73 const char *ras_mca_block_string[] = {
74         "mca_mp0",
75         "mca_mp1",
76         "mca_mpio",
77         "mca_iohc",
78 };
79
80 struct amdgpu_ras_block_list {
81         /* ras block link */
82         struct list_head node;
83
84         struct amdgpu_ras_block_object *ras_obj;
85 };
86
87 const char *get_ras_block_str(struct ras_common_if *ras_block)
88 {
89         if (!ras_block)
90                 return "NULL";
91
92         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
93                 return "OUT OF RANGE";
94
95         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
96                 return ras_mca_block_string[ras_block->sub_block_index];
97
98         return ras_block_string[ras_block->block];
99 }
100
101 #define ras_block_str(_BLOCK_) \
102         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
103
104 #define ras_err_str(i) (ras_error_string[ffs(i)])
105
106 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
107
108 /* inject address is 52 bits */
109 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
110
111 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
112 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
113
114 enum amdgpu_ras_retire_page_reservation {
115         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
116         AMDGPU_RAS_RETIRE_PAGE_PENDING,
117         AMDGPU_RAS_RETIRE_PAGE_FAULT,
118 };
119
120 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
121
122 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
123                                 uint64_t addr);
124 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
125                                 uint64_t addr);
126 #ifdef CONFIG_X86_MCE_AMD
127 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
128 struct mce_notifier_adev_list {
129         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
130         int num_gpu;
131 };
132 static struct mce_notifier_adev_list mce_adev_list;
133 #endif
134
135 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
136 {
137         if (adev && amdgpu_ras_get_context(adev))
138                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
139 }
140
141 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
142 {
143         if (adev && amdgpu_ras_get_context(adev))
144                 return amdgpu_ras_get_context(adev)->error_query_ready;
145
146         return false;
147 }
148
149 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
150 {
151         struct ras_err_data err_data = {0, 0, 0, NULL};
152         struct eeprom_table_record err_rec;
153
154         if ((address >= adev->gmc.mc_vram_size) ||
155             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
156                 dev_warn(adev->dev,
157                          "RAS WARN: input address 0x%llx is invalid.\n",
158                          address);
159                 return -EINVAL;
160         }
161
162         if (amdgpu_ras_check_bad_page(adev, address)) {
163                 dev_warn(adev->dev,
164                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
165                          address);
166                 return 0;
167         }
168
169         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
170         err_data.err_addr = &err_rec;
171         amdgpu_umc_fill_error_record(&err_data, address,
172                         (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
173
174         if (amdgpu_bad_page_threshold != 0) {
175                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
176                                          err_data.err_addr_cnt);
177                 amdgpu_ras_save_bad_pages(adev);
178         }
179
180         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
181         dev_warn(adev->dev, "Clear EEPROM:\n");
182         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
183
184         return 0;
185 }
186
187 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
188                                         size_t size, loff_t *pos)
189 {
190         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
191         struct ras_query_if info = {
192                 .head = obj->head,
193         };
194         ssize_t s;
195         char val[128];
196
197         if (amdgpu_ras_query_error_status(obj->adev, &info))
198                 return -EINVAL;
199
200         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
201                         "ue", info.ue_count,
202                         "ce", info.ce_count);
203         if (*pos >= s)
204                 return 0;
205
206         s -= *pos;
207         s = min_t(u64, s, size);
208
209
210         if (copy_to_user(buf, &val[*pos], s))
211                 return -EINVAL;
212
213         *pos += s;
214
215         return s;
216 }
217
218 static const struct file_operations amdgpu_ras_debugfs_ops = {
219         .owner = THIS_MODULE,
220         .read = amdgpu_ras_debugfs_read,
221         .write = NULL,
222         .llseek = default_llseek
223 };
224
225 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
226 {
227         int i;
228
229         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
230                 *block_id = i;
231                 if (strcmp(name, ras_block_string[i]) == 0)
232                         return 0;
233         }
234         return -EINVAL;
235 }
236
237 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
238                 const char __user *buf, size_t size,
239                 loff_t *pos, struct ras_debug_if *data)
240 {
241         ssize_t s = min_t(u64, 64, size);
242         char str[65];
243         char block_name[33];
244         char err[9] = "ue";
245         int op = -1;
246         int block_id;
247         uint32_t sub_block;
248         u64 address, value;
249
250         if (*pos)
251                 return -EINVAL;
252         *pos = size;
253
254         memset(str, 0, sizeof(str));
255         memset(data, 0, sizeof(*data));
256
257         if (copy_from_user(str, buf, s))
258                 return -EINVAL;
259
260         if (sscanf(str, "disable %32s", block_name) == 1)
261                 op = 0;
262         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
263                 op = 1;
264         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
265                 op = 2;
266         else if (strstr(str, "retire_page") != NULL)
267                 op = 3;
268         else if (str[0] && str[1] && str[2] && str[3])
269                 /* ascii string, but commands are not matched. */
270                 return -EINVAL;
271
272         if (op != -1) {
273                 if (op == 3) {
274                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
275                             sscanf(str, "%*s %llu", &address) != 1)
276                                 return -EINVAL;
277
278                         data->op = op;
279                         data->inject.address = address;
280
281                         return 0;
282                 }
283
284                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
285                         return -EINVAL;
286
287                 data->head.block = block_id;
288                 /* only ue and ce errors are supported */
289                 if (!memcmp("ue", err, 2))
290                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
291                 else if (!memcmp("ce", err, 2))
292                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
293                 else
294                         return -EINVAL;
295
296                 data->op = op;
297
298                 if (op == 2) {
299                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
300                                    &sub_block, &address, &value) != 3 &&
301                             sscanf(str, "%*s %*s %*s %u %llu %llu",
302                                    &sub_block, &address, &value) != 3)
303                                 return -EINVAL;
304                         data->head.sub_block_index = sub_block;
305                         data->inject.address = address;
306                         data->inject.value = value;
307                 }
308         } else {
309                 if (size < sizeof(*data))
310                         return -EINVAL;
311
312                 if (copy_from_user(data, buf, sizeof(*data)))
313                         return -EINVAL;
314         }
315
316         return 0;
317 }
318
319 /**
320  * DOC: AMDGPU RAS debugfs control interface
321  *
322  * The control interface accepts struct ras_debug_if which has two members.
323  *
324  * First member: ras_debug_if::head or ras_debug_if::inject.
325  *
326  * head is used to indicate which IP block will be under control.
327  *
328  * head has four members, they are block, type, sub_block_index, name.
329  * block: which IP will be under control.
330  * type: what kind of error will be enabled/disabled/injected.
331  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
332  * name: the name of IP.
333  *
334  * inject has two more members than head, they are address, value.
335  * As their names indicate, inject operation will write the
336  * value to the address.
337  *
338  * The second member: struct ras_debug_if::op.
339  * It has three kinds of operations.
340  *
341  * - 0: disable RAS on the block. Take ::head as its data.
342  * - 1: enable RAS on the block. Take ::head as its data.
343  * - 2: inject errors on the block. Take ::inject as its data.
344  *
345  * How to use the interface?
346  *
347  * In a program
348  *
349  * Copy the struct ras_debug_if in your code and initialize it.
350  * Write the struct to the control interface.
351  *
352  * From shell
353  *
354  * .. code-block:: bash
355  *
356  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
357  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
358  *      echo "inject  <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
359  *
360  * Where N, is the card which you want to affect.
361  *
362  * "disable" requires only the block.
363  * "enable" requires the block and error type.
364  * "inject" requires the block, error type, address, and value.
365  *
366  * The block is one of: umc, sdma, gfx, etc.
367  *      see ras_block_string[] for details
368  *
369  * The error type is one of: ue, ce, where,
370  *      ue is multi-uncorrectable
371  *      ce is single-correctable
372  *
373  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
374  * The address and value are hexadecimal numbers, leading 0x is optional.
375  *
376  * For instance,
377  *
378  * .. code-block:: bash
379  *
380  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
381  *      echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
382  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
383  *
384  * How to check the result of the operation?
385  *
386  * To check disable/enable, see "ras" features at,
387  * /sys/class/drm/card[0/1/2...]/device/ras/features
388  *
389  * To check inject, see the corresponding error count at,
390  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
391  *
392  * .. note::
393  *      Operations are only allowed on blocks which are supported.
394  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
395  *      to see which blocks support RAS on a particular asic.
396  *
397  */
398 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
399                                              const char __user *buf,
400                                              size_t size, loff_t *pos)
401 {
402         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
403         struct ras_debug_if data;
404         int ret = 0;
405
406         if (!amdgpu_ras_get_error_query_ready(adev)) {
407                 dev_warn(adev->dev, "RAS WARN: error injection "
408                                 "currently inaccessible\n");
409                 return size;
410         }
411
412         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
413         if (ret)
414                 return ret;
415
416         if (data.op == 3) {
417                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
418                 if (!ret)
419                         return size;
420                 else
421                         return ret;
422         }
423
424         if (!amdgpu_ras_is_supported(adev, data.head.block))
425                 return -EINVAL;
426
427         switch (data.op) {
428         case 0:
429                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
430                 break;
431         case 1:
432                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
433                 break;
434         case 2:
435                 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
436                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
437                         dev_warn(adev->dev, "RAS WARN: input address "
438                                         "0x%llx is invalid.",
439                                         data.inject.address);
440                         ret = -EINVAL;
441                         break;
442                 }
443
444                 /* umc ce/ue error injection for a bad page is not allowed */
445                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
446                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
447                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
448                                  "already been marked as bad!\n",
449                                  data.inject.address);
450                         break;
451                 }
452
453                 /* data.inject.address is offset instead of absolute gpu address */
454                 ret = amdgpu_ras_error_inject(adev, &data.inject);
455                 break;
456         default:
457                 ret = -EINVAL;
458                 break;
459         }
460
461         if (ret)
462                 return ret;
463
464         return size;
465 }
466
467 /**
468  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
469  *
470  * Some boards contain an EEPROM which is used to persistently store a list of
471  * bad pages which experiences ECC errors in vram.  This interface provides
472  * a way to reset the EEPROM, e.g., after testing error injection.
473  *
474  * Usage:
475  *
476  * .. code-block:: bash
477  *
478  *      echo 1 > ../ras/ras_eeprom_reset
479  *
480  * will reset EEPROM table to 0 entries.
481  *
482  */
483 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
484                                                const char __user *buf,
485                                                size_t size, loff_t *pos)
486 {
487         struct amdgpu_device *adev =
488                 (struct amdgpu_device *)file_inode(f)->i_private;
489         int ret;
490
491         ret = amdgpu_ras_eeprom_reset_table(
492                 &(amdgpu_ras_get_context(adev)->eeprom_control));
493
494         if (!ret) {
495                 /* Something was written to EEPROM.
496                  */
497                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
498                 return size;
499         } else {
500                 return ret;
501         }
502 }
503
504 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
505         .owner = THIS_MODULE,
506         .read = NULL,
507         .write = amdgpu_ras_debugfs_ctrl_write,
508         .llseek = default_llseek
509 };
510
511 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
512         .owner = THIS_MODULE,
513         .read = NULL,
514         .write = amdgpu_ras_debugfs_eeprom_write,
515         .llseek = default_llseek
516 };
517
518 /**
519  * DOC: AMDGPU RAS sysfs Error Count Interface
520  *
521  * It allows the user to read the error count for each IP block on the gpu through
522  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
523  *
524  * It outputs the multiple lines which report the uncorrected (ue) and corrected
525  * (ce) error counts.
526  *
527  * The format of one line is below,
528  *
529  * [ce|ue]: count
530  *
531  * Example:
532  *
533  * .. code-block:: bash
534  *
535  *      ue: 0
536  *      ce: 1
537  *
538  */
539 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
540                 struct device_attribute *attr, char *buf)
541 {
542         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
543         struct ras_query_if info = {
544                 .head = obj->head,
545         };
546
547         if (!amdgpu_ras_get_error_query_ready(obj->adev))
548                 return sysfs_emit(buf, "Query currently inaccessible\n");
549
550         if (amdgpu_ras_query_error_status(obj->adev, &info))
551                 return -EINVAL;
552
553         if (obj->adev->asic_type == CHIP_ALDEBARAN) {
554                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
555                         DRM_WARN("Failed to reset error counter and error status");
556         }
557
558         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
559                           "ce", info.ce_count);
560 }
561
562 /* obj begin */
563
564 #define get_obj(obj) do { (obj)->use++; } while (0)
565 #define alive_obj(obj) ((obj)->use)
566
567 static inline void put_obj(struct ras_manager *obj)
568 {
569         if (obj && (--obj->use == 0))
570                 list_del(&obj->node);
571         if (obj && (obj->use < 0))
572                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
573 }
574
575 /* make one obj and return it. */
576 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
577                 struct ras_common_if *head)
578 {
579         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
580         struct ras_manager *obj;
581
582         if (!adev->ras_enabled || !con)
583                 return NULL;
584
585         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
586                 return NULL;
587
588         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
589                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
590                         return NULL;
591
592                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
593         } else
594                 obj = &con->objs[head->block];
595
596         /* already exist. return obj? */
597         if (alive_obj(obj))
598                 return NULL;
599
600         obj->head = *head;
601         obj->adev = adev;
602         list_add(&obj->node, &con->head);
603         get_obj(obj);
604
605         return obj;
606 }
607
608 /* return an obj equal to head, or the first when head is NULL */
609 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
610                 struct ras_common_if *head)
611 {
612         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
613         struct ras_manager *obj;
614         int i;
615
616         if (!adev->ras_enabled || !con)
617                 return NULL;
618
619         if (head) {
620                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
621                         return NULL;
622
623                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
624                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
625                                 return NULL;
626
627                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
628                 } else
629                         obj = &con->objs[head->block];
630
631                 if (alive_obj(obj))
632                         return obj;
633         } else {
634                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
635                         obj = &con->objs[i];
636                         if (alive_obj(obj))
637                                 return obj;
638                 }
639         }
640
641         return NULL;
642 }
643 /* obj end */
644
645 /* feature ctl begin */
646 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
647                                          struct ras_common_if *head)
648 {
649         return adev->ras_hw_enabled & BIT(head->block);
650 }
651
652 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
653                 struct ras_common_if *head)
654 {
655         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
656
657         return con->features & BIT(head->block);
658 }
659
660 /*
661  * if obj is not created, then create one.
662  * set feature enable flag.
663  */
664 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
665                 struct ras_common_if *head, int enable)
666 {
667         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
668         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
669
670         /* If hardware does not support ras, then do not create obj.
671          * But if hardware support ras, we can create the obj.
672          * Ras framework checks con->hw_supported to see if it need do
673          * corresponding initialization.
674          * IP checks con->support to see if it need disable ras.
675          */
676         if (!amdgpu_ras_is_feature_allowed(adev, head))
677                 return 0;
678
679         if (enable) {
680                 if (!obj) {
681                         obj = amdgpu_ras_create_obj(adev, head);
682                         if (!obj)
683                                 return -EINVAL;
684                 } else {
685                         /* In case we create obj somewhere else */
686                         get_obj(obj);
687                 }
688                 con->features |= BIT(head->block);
689         } else {
690                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
691                         con->features &= ~BIT(head->block);
692                         put_obj(obj);
693                 }
694         }
695
696         return 0;
697 }
698
699 /* wrapper of psp_ras_enable_features */
700 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
701                 struct ras_common_if *head, bool enable)
702 {
703         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
704         union ta_ras_cmd_input *info;
705         int ret;
706
707         if (!con)
708                 return -EINVAL;
709
710         info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
711         if (!info)
712                 return -ENOMEM;
713
714         if (!enable) {
715                 info->disable_features = (struct ta_ras_disable_features_input) {
716                         .block_id =  amdgpu_ras_block_to_ta(head->block),
717                         .error_type = amdgpu_ras_error_to_ta(head->type),
718                 };
719         } else {
720                 info->enable_features = (struct ta_ras_enable_features_input) {
721                         .block_id =  amdgpu_ras_block_to_ta(head->block),
722                         .error_type = amdgpu_ras_error_to_ta(head->type),
723                 };
724         }
725
726         /* Do not enable if it is not allowed. */
727         WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
728
729         if (!amdgpu_ras_intr_triggered()) {
730                 ret = psp_ras_enable_features(&adev->psp, info, enable);
731                 if (ret) {
732                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
733                                 enable ? "enable":"disable",
734                                 get_ras_block_str(head),
735                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
736                         goto out;
737                 }
738         }
739
740         /* setup the obj */
741         __amdgpu_ras_feature_enable(adev, head, enable);
742         ret = 0;
743 out:
744         kfree(info);
745         return ret;
746 }
747
748 /* Only used in device probe stage and called only once. */
749 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
750                 struct ras_common_if *head, bool enable)
751 {
752         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
753         int ret;
754
755         if (!con)
756                 return -EINVAL;
757
758         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
759                 if (enable) {
760                         /* There is no harm to issue a ras TA cmd regardless of
761                          * the currecnt ras state.
762                          * If current state == target state, it will do nothing
763                          * But sometimes it requests driver to reset and repost
764                          * with error code -EAGAIN.
765                          */
766                         ret = amdgpu_ras_feature_enable(adev, head, 1);
767                         /* With old ras TA, we might fail to enable ras.
768                          * Log it and just setup the object.
769                          * TODO need remove this WA in the future.
770                          */
771                         if (ret == -EINVAL) {
772                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
773                                 if (!ret)
774                                         dev_info(adev->dev,
775                                                 "RAS INFO: %s setup object\n",
776                                                 get_ras_block_str(head));
777                         }
778                 } else {
779                         /* setup the object then issue a ras TA disable cmd.*/
780                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
781                         if (ret)
782                                 return ret;
783
784                         /* gfx block ras dsiable cmd must send to ras-ta */
785                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
786                                 con->features |= BIT(head->block);
787
788                         ret = amdgpu_ras_feature_enable(adev, head, 0);
789
790                         /* clean gfx block ras features flag */
791                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
792                                 con->features &= ~BIT(head->block);
793                 }
794         } else
795                 ret = amdgpu_ras_feature_enable(adev, head, enable);
796
797         return ret;
798 }
799
800 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
801                 bool bypass)
802 {
803         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
804         struct ras_manager *obj, *tmp;
805
806         list_for_each_entry_safe(obj, tmp, &con->head, node) {
807                 /* bypass psp.
808                  * aka just release the obj and corresponding flags
809                  */
810                 if (bypass) {
811                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
812                                 break;
813                 } else {
814                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
815                                 break;
816                 }
817         }
818
819         return con->features;
820 }
821
822 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
823                 bool bypass)
824 {
825         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
826         int i;
827         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
828
829         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
830                 struct ras_common_if head = {
831                         .block = i,
832                         .type = default_ras_type,
833                         .sub_block_index = 0,
834                 };
835
836                 if (i == AMDGPU_RAS_BLOCK__MCA)
837                         continue;
838
839                 if (bypass) {
840                         /*
841                          * bypass psp. vbios enable ras for us.
842                          * so just create the obj
843                          */
844                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
845                                 break;
846                 } else {
847                         if (amdgpu_ras_feature_enable(adev, &head, 1))
848                                 break;
849                 }
850         }
851
852         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
853                 struct ras_common_if head = {
854                         .block = AMDGPU_RAS_BLOCK__MCA,
855                         .type = default_ras_type,
856                         .sub_block_index = i,
857                 };
858
859                 if (bypass) {
860                         /*
861                          * bypass psp. vbios enable ras for us.
862                          * so just create the obj
863                          */
864                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
865                                 break;
866                 } else {
867                         if (amdgpu_ras_feature_enable(adev, &head, 1))
868                                 break;
869                 }
870         }
871
872         return con->features;
873 }
874 /* feature ctl end */
875
876 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
877                 enum amdgpu_ras_block block)
878 {
879         if (!block_obj)
880                 return -EINVAL;
881
882         if (block_obj->ras_comm.block == block)
883                 return 0;
884
885         return -EINVAL;
886 }
887
888 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
889                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
890 {
891         struct amdgpu_ras_block_list *node, *tmp;
892         struct amdgpu_ras_block_object *obj;
893
894         if (block >= AMDGPU_RAS_BLOCK__LAST)
895                 return NULL;
896
897         if (!amdgpu_ras_is_supported(adev, block))
898                 return NULL;
899
900         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
901                 if (!node->ras_obj) {
902                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
903                         continue;
904                 }
905
906                 obj = node->ras_obj;
907                 if (obj->ras_block_match) {
908                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
909                                 return obj;
910                 } else {
911                         if (amdgpu_ras_block_match_default(obj, block) == 0)
912                                 return obj;
913                 }
914         }
915
916         return NULL;
917 }
918
919 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
920 {
921         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
922         int ret = 0;
923
924         /*
925          * choosing right query method according to
926          * whether smu support query error information
927          */
928         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
929         if (ret == -EOPNOTSUPP) {
930                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
931                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
932                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
933
934                 /* umc query_ras_error_address is also responsible for clearing
935                  * error status
936                  */
937                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
938                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
939                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
940         } else if (!ret) {
941                 if (adev->umc.ras &&
942                         adev->umc.ras->ecc_info_query_ras_error_count)
943                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
944
945                 if (adev->umc.ras &&
946                         adev->umc.ras->ecc_info_query_ras_error_address)
947                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
948         }
949 }
950
951 /* query/inject/cure begin */
952 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
953                                   struct ras_query_if *info)
954 {
955         struct amdgpu_ras_block_object *block_obj = NULL;
956         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
957         struct ras_err_data err_data = {0, 0, 0, NULL};
958
959         if (!obj)
960                 return -EINVAL;
961
962         if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
963                 amdgpu_ras_get_ecc_info(adev, &err_data);
964         } else {
965                 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
966                 if (!block_obj || !block_obj->hw_ops)   {
967                         dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
968                                      get_ras_block_str(&info->head));
969                         return -EINVAL;
970                 }
971
972                 if (block_obj->hw_ops->query_ras_error_count)
973                         block_obj->hw_ops->query_ras_error_count(adev, &err_data);
974
975                 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
976                     (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
977                     (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
978                                 if (block_obj->hw_ops->query_ras_error_status)
979                                         block_obj->hw_ops->query_ras_error_status(adev);
980                         }
981         }
982
983         obj->err_data.ue_count += err_data.ue_count;
984         obj->err_data.ce_count += err_data.ce_count;
985
986         info->ue_count = obj->err_data.ue_count;
987         info->ce_count = obj->err_data.ce_count;
988
989         if (err_data.ce_count) {
990                 if (adev->smuio.funcs &&
991                     adev->smuio.funcs->get_socket_id &&
992                     adev->smuio.funcs->get_die_id) {
993                         dev_info(adev->dev, "socket: %d, die: %d "
994                                         "%ld correctable hardware errors "
995                                         "detected in %s block, no user "
996                                         "action is needed.\n",
997                                         adev->smuio.funcs->get_socket_id(adev),
998                                         adev->smuio.funcs->get_die_id(adev),
999                                         obj->err_data.ce_count,
1000                                         get_ras_block_str(&info->head));
1001                 } else {
1002                         dev_info(adev->dev, "%ld correctable hardware errors "
1003                                         "detected in %s block, no user "
1004                                         "action is needed.\n",
1005                                         obj->err_data.ce_count,
1006                                         get_ras_block_str(&info->head));
1007                 }
1008         }
1009         if (err_data.ue_count) {
1010                 if (adev->smuio.funcs &&
1011                     adev->smuio.funcs->get_socket_id &&
1012                     adev->smuio.funcs->get_die_id) {
1013                         dev_info(adev->dev, "socket: %d, die: %d "
1014                                         "%ld uncorrectable hardware errors "
1015                                         "detected in %s block\n",
1016                                         adev->smuio.funcs->get_socket_id(adev),
1017                                         adev->smuio.funcs->get_die_id(adev),
1018                                         obj->err_data.ue_count,
1019                                         get_ras_block_str(&info->head));
1020                 } else {
1021                         dev_info(adev->dev, "%ld uncorrectable hardware errors "
1022                                         "detected in %s block\n",
1023                                         obj->err_data.ue_count,
1024                                         get_ras_block_str(&info->head));
1025                 }
1026         }
1027
1028         if (!amdgpu_persistent_edc_harvesting_supported(adev))
1029                 amdgpu_ras_reset_error_status(adev, info->head.block);
1030
1031         return 0;
1032 }
1033
1034 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1035                 enum amdgpu_ras_block block)
1036 {
1037         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1038
1039         if (!amdgpu_ras_is_supported(adev, block))
1040                 return -EINVAL;
1041
1042         if (!block_obj || !block_obj->hw_ops)   {
1043                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1044                              ras_block_str(block));
1045                 return -EINVAL;
1046         }
1047
1048         if (block_obj->hw_ops->reset_ras_error_count)
1049                 block_obj->hw_ops->reset_ras_error_count(adev);
1050
1051         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1052             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1053                 if (block_obj->hw_ops->reset_ras_error_status)
1054                         block_obj->hw_ops->reset_ras_error_status(adev);
1055         }
1056
1057         return 0;
1058 }
1059
1060 /* wrapper of psp_ras_trigger_error */
1061 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1062                 struct ras_inject_if *info)
1063 {
1064         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1065         struct ta_ras_trigger_error_input block_info = {
1066                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1067                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1068                 .sub_block_index = info->head.sub_block_index,
1069                 .address = info->address,
1070                 .value = info->value,
1071         };
1072         int ret = -EINVAL;
1073         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1074                                                         info->head.block,
1075                                                         info->head.sub_block_index);
1076
1077         if (!obj)
1078                 return -EINVAL;
1079
1080         if (!block_obj || !block_obj->hw_ops)   {
1081                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1082                              get_ras_block_str(&info->head));
1083                 return -EINVAL;
1084         }
1085
1086         /* Calculate XGMI relative offset */
1087         if (adev->gmc.xgmi.num_physical_nodes > 1) {
1088                 block_info.address =
1089                         amdgpu_xgmi_get_relative_phy_addr(adev,
1090                                                           block_info.address);
1091         }
1092
1093         if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
1094                 if (block_obj->hw_ops->ras_error_inject)
1095                         ret = block_obj->hw_ops->ras_error_inject(adev, info);
1096         } else {
1097                 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
1098                 if (block_obj->hw_ops->ras_error_inject)
1099                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
1100                 else  /*If not defined .ras_error_inject, use default ras_error_inject*/
1101                         ret = psp_ras_trigger_error(&adev->psp, &block_info);
1102         }
1103
1104         if (ret)
1105                 dev_err(adev->dev, "ras inject %s failed %d\n",
1106                         get_ras_block_str(&info->head), ret);
1107
1108         return ret;
1109 }
1110
1111 /**
1112  * amdgpu_ras_query_error_count -- Get error counts of all IPs
1113  * @adev: pointer to AMD GPU device
1114  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1115  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1116  * errors.
1117  *
1118  * If set, @ce_count or @ue_count, count and return the corresponding
1119  * error counts in those integer pointers. Return 0 if the device
1120  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1121  */
1122 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1123                                  unsigned long *ce_count,
1124                                  unsigned long *ue_count)
1125 {
1126         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1127         struct ras_manager *obj;
1128         unsigned long ce, ue;
1129
1130         if (!adev->ras_enabled || !con)
1131                 return -EOPNOTSUPP;
1132
1133         /* Don't count since no reporting.
1134          */
1135         if (!ce_count && !ue_count)
1136                 return 0;
1137
1138         ce = 0;
1139         ue = 0;
1140         list_for_each_entry(obj, &con->head, node) {
1141                 struct ras_query_if info = {
1142                         .head = obj->head,
1143                 };
1144                 int res;
1145
1146                 res = amdgpu_ras_query_error_status(adev, &info);
1147                 if (res)
1148                         return res;
1149
1150                 ce += info.ce_count;
1151                 ue += info.ue_count;
1152         }
1153
1154         if (ce_count)
1155                 *ce_count = ce;
1156
1157         if (ue_count)
1158                 *ue_count = ue;
1159
1160         return 0;
1161 }
1162 /* query/inject/cure end */
1163
1164
1165 /* sysfs begin */
1166
1167 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1168                 struct ras_badpage **bps, unsigned int *count);
1169
1170 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1171 {
1172         switch (flags) {
1173         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1174                 return "R";
1175         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1176                 return "P";
1177         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1178         default:
1179                 return "F";
1180         }
1181 }
1182
1183 /**
1184  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1185  *
1186  * It allows user to read the bad pages of vram on the gpu through
1187  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1188  *
1189  * It outputs multiple lines, and each line stands for one gpu page.
1190  *
1191  * The format of one line is below,
1192  * gpu pfn : gpu page size : flags
1193  *
1194  * gpu pfn and gpu page size are printed in hex format.
1195  * flags can be one of below character,
1196  *
1197  * R: reserved, this gpu page is reserved and not able to use.
1198  *
1199  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1200  * in next window of page_reserve.
1201  *
1202  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1203  *
1204  * Examples:
1205  *
1206  * .. code-block:: bash
1207  *
1208  *      0x00000001 : 0x00001000 : R
1209  *      0x00000002 : 0x00001000 : P
1210  *
1211  */
1212
1213 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1214                 struct kobject *kobj, struct bin_attribute *attr,
1215                 char *buf, loff_t ppos, size_t count)
1216 {
1217         struct amdgpu_ras *con =
1218                 container_of(attr, struct amdgpu_ras, badpages_attr);
1219         struct amdgpu_device *adev = con->adev;
1220         const unsigned int element_size =
1221                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1222         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1223         unsigned int end = div64_ul(ppos + count - 1, element_size);
1224         ssize_t s = 0;
1225         struct ras_badpage *bps = NULL;
1226         unsigned int bps_count = 0;
1227
1228         memset(buf, 0, count);
1229
1230         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1231                 return 0;
1232
1233         for (; start < end && start < bps_count; start++)
1234                 s += scnprintf(&buf[s], element_size + 1,
1235                                 "0x%08x : 0x%08x : %1s\n",
1236                                 bps[start].bp,
1237                                 bps[start].size,
1238                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1239
1240         kfree(bps);
1241
1242         return s;
1243 }
1244
1245 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1246                 struct device_attribute *attr, char *buf)
1247 {
1248         struct amdgpu_ras *con =
1249                 container_of(attr, struct amdgpu_ras, features_attr);
1250
1251         return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
1252 }
1253
1254 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1255 {
1256         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1257
1258         sysfs_remove_file_from_group(&adev->dev->kobj,
1259                                 &con->badpages_attr.attr,
1260                                 RAS_FS_NAME);
1261 }
1262
1263 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1264 {
1265         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1266         struct attribute *attrs[] = {
1267                 &con->features_attr.attr,
1268                 NULL
1269         };
1270         struct attribute_group group = {
1271                 .name = RAS_FS_NAME,
1272                 .attrs = attrs,
1273         };
1274
1275         sysfs_remove_group(&adev->dev->kobj, &group);
1276
1277         return 0;
1278 }
1279
1280 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1281                 struct ras_common_if *head)
1282 {
1283         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1284
1285         if (!obj || obj->attr_inuse)
1286                 return -EINVAL;
1287
1288         get_obj(obj);
1289
1290         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1291                 "%s_err_count", head->name);
1292
1293         obj->sysfs_attr = (struct device_attribute){
1294                 .attr = {
1295                         .name = obj->fs_data.sysfs_name,
1296                         .mode = S_IRUGO,
1297                 },
1298                         .show = amdgpu_ras_sysfs_read,
1299         };
1300         sysfs_attr_init(&obj->sysfs_attr.attr);
1301
1302         if (sysfs_add_file_to_group(&adev->dev->kobj,
1303                                 &obj->sysfs_attr.attr,
1304                                 RAS_FS_NAME)) {
1305                 put_obj(obj);
1306                 return -EINVAL;
1307         }
1308
1309         obj->attr_inuse = 1;
1310
1311         return 0;
1312 }
1313
1314 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1315                 struct ras_common_if *head)
1316 {
1317         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1318
1319         if (!obj || !obj->attr_inuse)
1320                 return -EINVAL;
1321
1322         sysfs_remove_file_from_group(&adev->dev->kobj,
1323                                 &obj->sysfs_attr.attr,
1324                                 RAS_FS_NAME);
1325         obj->attr_inuse = 0;
1326         put_obj(obj);
1327
1328         return 0;
1329 }
1330
1331 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1332 {
1333         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1334         struct ras_manager *obj, *tmp;
1335
1336         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1337                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1338         }
1339
1340         if (amdgpu_bad_page_threshold != 0)
1341                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1342
1343         amdgpu_ras_sysfs_remove_feature_node(adev);
1344
1345         return 0;
1346 }
1347 /* sysfs end */
1348
1349 /**
1350  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1351  *
1352  * Normally when there is an uncorrectable error, the driver will reset
1353  * the GPU to recover.  However, in the event of an unrecoverable error,
1354  * the driver provides an interface to reboot the system automatically
1355  * in that event.
1356  *
1357  * The following file in debugfs provides that interface:
1358  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1359  *
1360  * Usage:
1361  *
1362  * .. code-block:: bash
1363  *
1364  *      echo true > .../ras/auto_reboot
1365  *
1366  */
1367 /* debugfs begin */
1368 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1369 {
1370         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1371         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1372         struct dentry     *dir;
1373
1374         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1375         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1376                             &amdgpu_ras_debugfs_ctrl_ops);
1377         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1378                             &amdgpu_ras_debugfs_eeprom_ops);
1379         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1380                            &con->bad_page_cnt_threshold);
1381         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1382         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1383         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1384                             &amdgpu_ras_debugfs_eeprom_size_ops);
1385         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1386                                                        S_IRUGO, dir, adev,
1387                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1388         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1389
1390         /*
1391          * After one uncorrectable error happens, usually GPU recovery will
1392          * be scheduled. But due to the known problem in GPU recovery failing
1393          * to bring GPU back, below interface provides one direct way to
1394          * user to reboot system automatically in such case within
1395          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1396          * will never be called.
1397          */
1398         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1399
1400         /*
1401          * User could set this not to clean up hardware's error count register
1402          * of RAS IPs during ras recovery.
1403          */
1404         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1405                             &con->disable_ras_err_cnt_harvest);
1406         return dir;
1407 }
1408
1409 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1410                                       struct ras_fs_if *head,
1411                                       struct dentry *dir)
1412 {
1413         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1414
1415         if (!obj || !dir)
1416                 return;
1417
1418         get_obj(obj);
1419
1420         memcpy(obj->fs_data.debugfs_name,
1421                         head->debugfs_name,
1422                         sizeof(obj->fs_data.debugfs_name));
1423
1424         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1425                             obj, &amdgpu_ras_debugfs_ops);
1426 }
1427
1428 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1429 {
1430         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1431         struct dentry *dir;
1432         struct ras_manager *obj;
1433         struct ras_fs_if fs_info;
1434
1435         /*
1436          * it won't be called in resume path, no need to check
1437          * suspend and gpu reset status
1438          */
1439         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1440                 return;
1441
1442         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1443
1444         list_for_each_entry(obj, &con->head, node) {
1445                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1446                         (obj->attr_inuse == 1)) {
1447                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1448                                         get_ras_block_str(&obj->head));
1449                         fs_info.head = obj->head;
1450                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1451                 }
1452         }
1453 }
1454
1455 /* debugfs end */
1456
1457 /* ras fs */
1458 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1459                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1460 static DEVICE_ATTR(features, S_IRUGO,
1461                 amdgpu_ras_sysfs_features_read, NULL);
1462 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1463 {
1464         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1465         struct attribute_group group = {
1466                 .name = RAS_FS_NAME,
1467         };
1468         struct attribute *attrs[] = {
1469                 &con->features_attr.attr,
1470                 NULL
1471         };
1472         struct bin_attribute *bin_attrs[] = {
1473                 NULL,
1474                 NULL,
1475         };
1476         int r;
1477
1478         /* add features entry */
1479         con->features_attr = dev_attr_features;
1480         group.attrs = attrs;
1481         sysfs_attr_init(attrs[0]);
1482
1483         if (amdgpu_bad_page_threshold != 0) {
1484                 /* add bad_page_features entry */
1485                 bin_attr_gpu_vram_bad_pages.private = NULL;
1486                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1487                 bin_attrs[0] = &con->badpages_attr;
1488                 group.bin_attrs = bin_attrs;
1489                 sysfs_bin_attr_init(bin_attrs[0]);
1490         }
1491
1492         r = sysfs_create_group(&adev->dev->kobj, &group);
1493         if (r)
1494                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1495
1496         return 0;
1497 }
1498
1499 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1500 {
1501         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1502         struct ras_manager *con_obj, *ip_obj, *tmp;
1503
1504         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1505                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1506                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1507                         if (ip_obj)
1508                                 put_obj(ip_obj);
1509                 }
1510         }
1511
1512         amdgpu_ras_sysfs_remove_all(adev);
1513         return 0;
1514 }
1515 /* ras fs end */
1516
1517 /* ih begin */
1518
1519 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1520  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1521  * register to check whether the interrupt is triggered or not, and properly
1522  * ack the interrupt if it is there
1523  */
1524 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1525 {
1526         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
1527                 return;
1528
1529         if (adev->nbio.ras &&
1530             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1531                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1532
1533         if (adev->nbio.ras &&
1534             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1535                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1536 }
1537
1538 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1539                                 struct amdgpu_iv_entry *entry)
1540 {
1541         bool poison_stat = true, need_reset = true;
1542         struct amdgpu_device *adev = obj->adev;
1543         struct ras_err_data err_data = {0, 0, 0, NULL};
1544         struct amdgpu_ras_block_object *block_obj =
1545                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1546
1547         if (!adev->gmc.xgmi.connected_to_cpu)
1548                 amdgpu_umc_poison_handler(adev, &err_data, false);
1549
1550         /* both query_poison_status and handle_poison_consumption are optional */
1551         if (block_obj && block_obj->hw_ops) {
1552                 if (block_obj->hw_ops->query_poison_status) {
1553                         poison_stat = block_obj->hw_ops->query_poison_status(adev);
1554                         if (!poison_stat)
1555                                 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1556                                                 block_obj->ras_comm.name);
1557                 }
1558
1559                 if (poison_stat && block_obj->hw_ops->handle_poison_consumption) {
1560                         poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1561                         need_reset = poison_stat;
1562                 }
1563         }
1564
1565         /* gpu reset is fallback for all failed cases */
1566         if (need_reset)
1567                 amdgpu_ras_reset_gpu(adev);
1568 }
1569
1570 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1571                                 struct amdgpu_iv_entry *entry)
1572 {
1573         dev_info(obj->adev->dev,
1574                 "Poison is created, no user action is needed.\n");
1575 }
1576
1577 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1578                                 struct amdgpu_iv_entry *entry)
1579 {
1580         struct ras_ih_data *data = &obj->ih_data;
1581         struct ras_err_data err_data = {0, 0, 0, NULL};
1582         int ret;
1583
1584         if (!data->cb)
1585                 return;
1586
1587         /* Let IP handle its data, maybe we need get the output
1588          * from the callback to update the error type/count, etc
1589          */
1590         ret = data->cb(obj->adev, &err_data, entry);
1591         /* ue will trigger an interrupt, and in that case
1592          * we need do a reset to recovery the whole system.
1593          * But leave IP do that recovery, here we just dispatch
1594          * the error.
1595          */
1596         if (ret == AMDGPU_RAS_SUCCESS) {
1597                 /* these counts could be left as 0 if
1598                  * some blocks do not count error number
1599                  */
1600                 obj->err_data.ue_count += err_data.ue_count;
1601                 obj->err_data.ce_count += err_data.ce_count;
1602         }
1603 }
1604
1605 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1606 {
1607         struct ras_ih_data *data = &obj->ih_data;
1608         struct amdgpu_iv_entry entry;
1609
1610         while (data->rptr != data->wptr) {
1611                 rmb();
1612                 memcpy(&entry, &data->ring[data->rptr],
1613                                 data->element_size);
1614
1615                 wmb();
1616                 data->rptr = (data->aligned_element_size +
1617                                 data->rptr) % data->ring_size;
1618
1619                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1620                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1621                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1622                         else
1623                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1624                 } else {
1625                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1626                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1627                         else
1628                                 dev_warn(obj->adev->dev,
1629                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1630                 }
1631         }
1632 }
1633
1634 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1635 {
1636         struct ras_ih_data *data =
1637                 container_of(work, struct ras_ih_data, ih_work);
1638         struct ras_manager *obj =
1639                 container_of(data, struct ras_manager, ih_data);
1640
1641         amdgpu_ras_interrupt_handler(obj);
1642 }
1643
1644 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1645                 struct ras_dispatch_if *info)
1646 {
1647         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1648         struct ras_ih_data *data = &obj->ih_data;
1649
1650         if (!obj)
1651                 return -EINVAL;
1652
1653         if (data->inuse == 0)
1654                 return 0;
1655
1656         /* Might be overflow... */
1657         memcpy(&data->ring[data->wptr], info->entry,
1658                         data->element_size);
1659
1660         wmb();
1661         data->wptr = (data->aligned_element_size +
1662                         data->wptr) % data->ring_size;
1663
1664         schedule_work(&data->ih_work);
1665
1666         return 0;
1667 }
1668
1669 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1670                 struct ras_common_if *head)
1671 {
1672         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1673         struct ras_ih_data *data;
1674
1675         if (!obj)
1676                 return -EINVAL;
1677
1678         data = &obj->ih_data;
1679         if (data->inuse == 0)
1680                 return 0;
1681
1682         cancel_work_sync(&data->ih_work);
1683
1684         kfree(data->ring);
1685         memset(data, 0, sizeof(*data));
1686         put_obj(obj);
1687
1688         return 0;
1689 }
1690
1691 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1692                 struct ras_common_if *head)
1693 {
1694         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1695         struct ras_ih_data *data;
1696         struct amdgpu_ras_block_object *ras_obj;
1697
1698         if (!obj) {
1699                 /* in case we registe the IH before enable ras feature */
1700                 obj = amdgpu_ras_create_obj(adev, head);
1701                 if (!obj)
1702                         return -EINVAL;
1703         } else
1704                 get_obj(obj);
1705
1706         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1707
1708         data = &obj->ih_data;
1709         /* add the callback.etc */
1710         *data = (struct ras_ih_data) {
1711                 .inuse = 0,
1712                 .cb = ras_obj->ras_cb,
1713                 .element_size = sizeof(struct amdgpu_iv_entry),
1714                 .rptr = 0,
1715                 .wptr = 0,
1716         };
1717
1718         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1719
1720         data->aligned_element_size = ALIGN(data->element_size, 8);
1721         /* the ring can store 64 iv entries. */
1722         data->ring_size = 64 * data->aligned_element_size;
1723         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1724         if (!data->ring) {
1725                 put_obj(obj);
1726                 return -ENOMEM;
1727         }
1728
1729         /* IH is ready */
1730         data->inuse = 1;
1731
1732         return 0;
1733 }
1734
1735 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1736 {
1737         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1738         struct ras_manager *obj, *tmp;
1739
1740         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1741                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1742         }
1743
1744         return 0;
1745 }
1746 /* ih end */
1747
1748 /* traversal all IPs except NBIO to query error counter */
1749 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1750 {
1751         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1752         struct ras_manager *obj;
1753
1754         if (!adev->ras_enabled || !con)
1755                 return;
1756
1757         list_for_each_entry(obj, &con->head, node) {
1758                 struct ras_query_if info = {
1759                         .head = obj->head,
1760                 };
1761
1762                 /*
1763                  * PCIE_BIF IP has one different isr by ras controller
1764                  * interrupt, the specific ras counter query will be
1765                  * done in that isr. So skip such block from common
1766                  * sync flood interrupt isr calling.
1767                  */
1768                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1769                         continue;
1770
1771                 /*
1772                  * this is a workaround for aldebaran, skip send msg to
1773                  * smu to get ecc_info table due to smu handle get ecc
1774                  * info table failed temporarily.
1775                  * should be removed until smu fix handle ecc_info table.
1776                  */
1777                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1778                         (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1779                         continue;
1780
1781                 amdgpu_ras_query_error_status(adev, &info);
1782         }
1783 }
1784
1785 /* Parse RdRspStatus and WrRspStatus */
1786 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1787                                           struct ras_query_if *info)
1788 {
1789         struct amdgpu_ras_block_object *block_obj;
1790         /*
1791          * Only two block need to query read/write
1792          * RspStatus at current state
1793          */
1794         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1795                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1796                 return;
1797
1798         block_obj = amdgpu_ras_get_ras_block(adev,
1799                                         info->head.block,
1800                                         info->head.sub_block_index);
1801
1802         if (!block_obj || !block_obj->hw_ops) {
1803                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1804                              get_ras_block_str(&info->head));
1805                 return;
1806         }
1807
1808         if (block_obj->hw_ops->query_ras_error_status)
1809                 block_obj->hw_ops->query_ras_error_status(adev);
1810
1811 }
1812
1813 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1814 {
1815         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1816         struct ras_manager *obj;
1817
1818         if (!adev->ras_enabled || !con)
1819                 return;
1820
1821         list_for_each_entry(obj, &con->head, node) {
1822                 struct ras_query_if info = {
1823                         .head = obj->head,
1824                 };
1825
1826                 amdgpu_ras_error_status_query(adev, &info);
1827         }
1828 }
1829
1830 /* recovery begin */
1831
1832 /* return 0 on success.
1833  * caller need free bps.
1834  */
1835 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1836                 struct ras_badpage **bps, unsigned int *count)
1837 {
1838         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1839         struct ras_err_handler_data *data;
1840         int i = 0;
1841         int ret = 0, status;
1842
1843         if (!con || !con->eh_data || !bps || !count)
1844                 return -EINVAL;
1845
1846         mutex_lock(&con->recovery_lock);
1847         data = con->eh_data;
1848         if (!data || data->count == 0) {
1849                 *bps = NULL;
1850                 ret = -EINVAL;
1851                 goto out;
1852         }
1853
1854         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1855         if (!*bps) {
1856                 ret = -ENOMEM;
1857                 goto out;
1858         }
1859
1860         for (; i < data->count; i++) {
1861                 (*bps)[i] = (struct ras_badpage){
1862                         .bp = data->bps[i].retired_page,
1863                         .size = AMDGPU_GPU_PAGE_SIZE,
1864                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1865                 };
1866                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1867                                 data->bps[i].retired_page);
1868                 if (status == -EBUSY)
1869                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1870                 else if (status == -ENOENT)
1871                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1872         }
1873
1874         *count = data->count;
1875 out:
1876         mutex_unlock(&con->recovery_lock);
1877         return ret;
1878 }
1879
1880 static void amdgpu_ras_do_recovery(struct work_struct *work)
1881 {
1882         struct amdgpu_ras *ras =
1883                 container_of(work, struct amdgpu_ras, recovery_work);
1884         struct amdgpu_device *remote_adev = NULL;
1885         struct amdgpu_device *adev = ras->adev;
1886         struct list_head device_list, *device_list_handle =  NULL;
1887
1888         if (!ras->disable_ras_err_cnt_harvest) {
1889                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1890
1891                 /* Build list of devices to query RAS related errors */
1892                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1893                         device_list_handle = &hive->device_list;
1894                 } else {
1895                         INIT_LIST_HEAD(&device_list);
1896                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
1897                         device_list_handle = &device_list;
1898                 }
1899
1900                 list_for_each_entry(remote_adev,
1901                                 device_list_handle, gmc.xgmi.head) {
1902                         amdgpu_ras_query_err_status(remote_adev);
1903                         amdgpu_ras_log_on_err_counter(remote_adev);
1904                 }
1905
1906                 amdgpu_put_xgmi_hive(hive);
1907         }
1908
1909         if (amdgpu_device_should_recover_gpu(ras->adev))
1910                 amdgpu_device_gpu_recover(ras->adev, NULL);
1911         atomic_set(&ras->in_recovery, 0);
1912 }
1913
1914 /* alloc/realloc bps array */
1915 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1916                 struct ras_err_handler_data *data, int pages)
1917 {
1918         unsigned int old_space = data->count + data->space_left;
1919         unsigned int new_space = old_space + pages;
1920         unsigned int align_space = ALIGN(new_space, 512);
1921         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1922
1923         if (!bps) {
1924                 return -ENOMEM;
1925         }
1926
1927         if (data->bps) {
1928                 memcpy(bps, data->bps,
1929                                 data->count * sizeof(*data->bps));
1930                 kfree(data->bps);
1931         }
1932
1933         data->bps = bps;
1934         data->space_left += align_space - old_space;
1935         return 0;
1936 }
1937
1938 /* it deal with vram only. */
1939 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1940                 struct eeprom_table_record *bps, int pages)
1941 {
1942         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1943         struct ras_err_handler_data *data;
1944         int ret = 0;
1945         uint32_t i;
1946
1947         if (!con || !con->eh_data || !bps || pages <= 0)
1948                 return 0;
1949
1950         mutex_lock(&con->recovery_lock);
1951         data = con->eh_data;
1952         if (!data)
1953                 goto out;
1954
1955         for (i = 0; i < pages; i++) {
1956                 if (amdgpu_ras_check_bad_page_unlock(con,
1957                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
1958                         continue;
1959
1960                 if (!data->space_left &&
1961                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
1962                         ret = -ENOMEM;
1963                         goto out;
1964                 }
1965
1966                 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
1967                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
1968                         AMDGPU_GPU_PAGE_SIZE);
1969
1970                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
1971                 data->count++;
1972                 data->space_left--;
1973         }
1974 out:
1975         mutex_unlock(&con->recovery_lock);
1976
1977         return ret;
1978 }
1979
1980 /*
1981  * write error record array to eeprom, the function should be
1982  * protected by recovery_lock
1983  */
1984 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1985 {
1986         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1987         struct ras_err_handler_data *data;
1988         struct amdgpu_ras_eeprom_control *control;
1989         int save_count;
1990
1991         if (!con || !con->eh_data)
1992                 return 0;
1993
1994         mutex_lock(&con->recovery_lock);
1995         control = &con->eeprom_control;
1996         data = con->eh_data;
1997         save_count = data->count - control->ras_num_recs;
1998         mutex_unlock(&con->recovery_lock);
1999         /* only new entries are saved */
2000         if (save_count > 0) {
2001                 if (amdgpu_ras_eeprom_append(control,
2002                                              &data->bps[control->ras_num_recs],
2003                                              save_count)) {
2004                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2005                         return -EIO;
2006                 }
2007
2008                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2009         }
2010
2011         return 0;
2012 }
2013
2014 /*
2015  * read error record array in eeprom and reserve enough space for
2016  * storing new bad pages
2017  */
2018 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2019 {
2020         struct amdgpu_ras_eeprom_control *control =
2021                 &adev->psp.ras_context.ras->eeprom_control;
2022         struct eeprom_table_record *bps;
2023         int ret;
2024
2025         /* no bad page record, skip eeprom access */
2026         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2027                 return 0;
2028
2029         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2030         if (!bps)
2031                 return -ENOMEM;
2032
2033         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2034         if (ret)
2035                 dev_err(adev->dev, "Failed to load EEPROM table records!");
2036         else
2037                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2038
2039         kfree(bps);
2040         return ret;
2041 }
2042
2043 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2044                                 uint64_t addr)
2045 {
2046         struct ras_err_handler_data *data = con->eh_data;
2047         int i;
2048
2049         addr >>= AMDGPU_GPU_PAGE_SHIFT;
2050         for (i = 0; i < data->count; i++)
2051                 if (addr == data->bps[i].retired_page)
2052                         return true;
2053
2054         return false;
2055 }
2056
2057 /*
2058  * check if an address belongs to bad page
2059  *
2060  * Note: this check is only for umc block
2061  */
2062 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2063                                 uint64_t addr)
2064 {
2065         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2066         bool ret = false;
2067
2068         if (!con || !con->eh_data)
2069                 return ret;
2070
2071         mutex_lock(&con->recovery_lock);
2072         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2073         mutex_unlock(&con->recovery_lock);
2074         return ret;
2075 }
2076
2077 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2078                                           uint32_t max_count)
2079 {
2080         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2081
2082         /*
2083          * Justification of value bad_page_cnt_threshold in ras structure
2084          *
2085          * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
2086          * in eeprom, and introduce two scenarios accordingly.
2087          *
2088          * Bad page retirement enablement:
2089          *    - If amdgpu_bad_page_threshold = -1,
2090          *      bad_page_cnt_threshold = typical value by formula.
2091          *
2092          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2093          *      max record length in eeprom, use it directly.
2094          *
2095          * Bad page retirement disablement:
2096          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2097          *      functionality is disabled, and bad_page_cnt_threshold will
2098          *      take no effect.
2099          */
2100
2101         if (amdgpu_bad_page_threshold < 0) {
2102                 u64 val = adev->gmc.mc_vram_size;
2103
2104                 do_div(val, RAS_BAD_PAGE_COVER);
2105                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2106                                                   max_count);
2107         } else {
2108                 con->bad_page_cnt_threshold = min_t(int, max_count,
2109                                                     amdgpu_bad_page_threshold);
2110         }
2111 }
2112
2113 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2114 {
2115         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2116         struct ras_err_handler_data **data;
2117         u32  max_eeprom_records_count = 0;
2118         bool exc_err_limit = false;
2119         int ret;
2120
2121         if (!con)
2122                 return 0;
2123
2124         /* Allow access to RAS EEPROM via debugfs, when the ASIC
2125          * supports RAS and debugfs is enabled, but when
2126          * adev->ras_enabled is unset, i.e. when "ras_enable"
2127          * module parameter is set to 0.
2128          */
2129         con->adev = adev;
2130
2131         if (!adev->ras_enabled)
2132                 return 0;
2133
2134         data = &con->eh_data;
2135         *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2136         if (!*data) {
2137                 ret = -ENOMEM;
2138                 goto out;
2139         }
2140
2141         mutex_init(&con->recovery_lock);
2142         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2143         atomic_set(&con->in_recovery, 0);
2144         con->eeprom_control.bad_channel_bitmap = 0;
2145
2146         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2147         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2148
2149         /* Todo: During test the SMU might fail to read the eeprom through I2C
2150          * when the GPU is pending on XGMI reset during probe time
2151          * (Mostly after second bus reset), skip it now
2152          */
2153         if (adev->gmc.xgmi.pending_reset)
2154                 return 0;
2155         ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2156         /*
2157          * This calling fails when exc_err_limit is true or
2158          * ret != 0.
2159          */
2160         if (exc_err_limit || ret)
2161                 goto free;
2162
2163         if (con->eeprom_control.ras_num_recs) {
2164                 ret = amdgpu_ras_load_bad_pages(adev);
2165                 if (ret)
2166                         goto free;
2167
2168                 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2169
2170                 if (con->update_channel_flag == true) {
2171                         amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2172                         con->update_channel_flag = false;
2173                 }
2174         }
2175
2176 #ifdef CONFIG_X86_MCE_AMD
2177         if ((adev->asic_type == CHIP_ALDEBARAN) &&
2178             (adev->gmc.xgmi.connected_to_cpu))
2179                 amdgpu_register_bad_pages_mca_notifier(adev);
2180 #endif
2181         return 0;
2182
2183 free:
2184         kfree((*data)->bps);
2185         kfree(*data);
2186         con->eh_data = NULL;
2187 out:
2188         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2189
2190         /*
2191          * Except error threshold exceeding case, other failure cases in this
2192          * function would not fail amdgpu driver init.
2193          */
2194         if (!exc_err_limit)
2195                 ret = 0;
2196         else
2197                 ret = -EINVAL;
2198
2199         return ret;
2200 }
2201
2202 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2203 {
2204         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2205         struct ras_err_handler_data *data = con->eh_data;
2206
2207         /* recovery_init failed to init it, fini is useless */
2208         if (!data)
2209                 return 0;
2210
2211         cancel_work_sync(&con->recovery_work);
2212
2213         mutex_lock(&con->recovery_lock);
2214         con->eh_data = NULL;
2215         kfree(data->bps);
2216         kfree(data);
2217         mutex_unlock(&con->recovery_lock);
2218
2219         return 0;
2220 }
2221 /* recovery end */
2222
2223 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2224 {
2225         return adev->asic_type == CHIP_VEGA10 ||
2226                 adev->asic_type == CHIP_VEGA20 ||
2227                 adev->asic_type == CHIP_ARCTURUS ||
2228                 adev->asic_type == CHIP_ALDEBARAN ||
2229                 adev->asic_type == CHIP_SIENNA_CICHLID;
2230 }
2231
2232 /*
2233  * this is workaround for vega20 workstation sku,
2234  * force enable gfx ras, ignore vbios gfx ras flag
2235  * due to GC EDC can not write
2236  */
2237 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2238 {
2239         struct atom_context *ctx = adev->mode_info.atom_context;
2240
2241         if (!ctx)
2242                 return;
2243
2244         if (strnstr(ctx->vbios_version, "D16406",
2245                     sizeof(ctx->vbios_version)) ||
2246                 strnstr(ctx->vbios_version, "D36002",
2247                         sizeof(ctx->vbios_version)))
2248                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2249 }
2250
2251 /*
2252  * check hardware's ras ability which will be saved in hw_supported.
2253  * if hardware does not support ras, we can skip some ras initializtion and
2254  * forbid some ras operations from IP.
2255  * if software itself, say boot parameter, limit the ras ability. We still
2256  * need allow IP do some limited operations, like disable. In such case,
2257  * we have to initialize ras as normal. but need check if operation is
2258  * allowed or not in each function.
2259  */
2260 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2261 {
2262         adev->ras_hw_enabled = adev->ras_enabled = 0;
2263
2264         if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
2265             !amdgpu_ras_asic_supported(adev))
2266                 return;
2267
2268         if (!adev->gmc.xgmi.connected_to_cpu) {
2269                 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2270                         dev_info(adev->dev, "MEM ECC is active.\n");
2271                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2272                                                    1 << AMDGPU_RAS_BLOCK__DF);
2273                 } else {
2274                         dev_info(adev->dev, "MEM ECC is not presented.\n");
2275                 }
2276
2277                 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2278                         dev_info(adev->dev, "SRAM ECC is active.\n");
2279                         adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2280                                                     1 << AMDGPU_RAS_BLOCK__DF);
2281
2282                         if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0))
2283                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2284                                                 1 << AMDGPU_RAS_BLOCK__JPEG);
2285                         else
2286                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2287                                                 1 << AMDGPU_RAS_BLOCK__JPEG);
2288                 } else {
2289                         dev_info(adev->dev, "SRAM ECC is not presented.\n");
2290                 }
2291         } else {
2292                 /* driver only manages a few IP blocks RAS feature
2293                  * when GPU is connected cpu through XGMI */
2294                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2295                                            1 << AMDGPU_RAS_BLOCK__SDMA |
2296                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
2297         }
2298
2299         amdgpu_ras_get_quirks(adev);
2300
2301         /* hw_supported needs to be aligned with RAS block mask. */
2302         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2303
2304         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2305                 adev->ras_hw_enabled & amdgpu_ras_mask;
2306 }
2307
2308 static void amdgpu_ras_counte_dw(struct work_struct *work)
2309 {
2310         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2311                                               ras_counte_delay_work.work);
2312         struct amdgpu_device *adev = con->adev;
2313         struct drm_device *dev = adev_to_drm(adev);
2314         unsigned long ce_count, ue_count;
2315         int res;
2316
2317         res = pm_runtime_get_sync(dev->dev);
2318         if (res < 0)
2319                 goto Out;
2320
2321         /* Cache new values.
2322          */
2323         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2324                 atomic_set(&con->ras_ce_count, ce_count);
2325                 atomic_set(&con->ras_ue_count, ue_count);
2326         }
2327
2328         pm_runtime_mark_last_busy(dev->dev);
2329 Out:
2330         pm_runtime_put_autosuspend(dev->dev);
2331 }
2332
2333 int amdgpu_ras_init(struct amdgpu_device *adev)
2334 {
2335         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2336         int r;
2337         bool df_poison, umc_poison;
2338
2339         if (con)
2340                 return 0;
2341
2342         con = kmalloc(sizeof(struct amdgpu_ras) +
2343                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2344                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2345                         GFP_KERNEL|__GFP_ZERO);
2346         if (!con)
2347                 return -ENOMEM;
2348
2349         con->adev = adev;
2350         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2351         atomic_set(&con->ras_ce_count, 0);
2352         atomic_set(&con->ras_ue_count, 0);
2353
2354         con->objs = (struct ras_manager *)(con + 1);
2355
2356         amdgpu_ras_set_context(adev, con);
2357
2358         amdgpu_ras_check_supported(adev);
2359
2360         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2361                 /* set gfx block ras context feature for VEGA20 Gaming
2362                  * send ras disable cmd to ras ta during ras late init.
2363                  */
2364                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2365                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2366
2367                         return 0;
2368                 }
2369
2370                 r = 0;
2371                 goto release_con;
2372         }
2373
2374         con->update_channel_flag = false;
2375         con->features = 0;
2376         INIT_LIST_HEAD(&con->head);
2377         /* Might need get this flag from vbios. */
2378         con->flags = RAS_DEFAULT_FLAGS;
2379
2380         /* initialize nbio ras function ahead of any other
2381          * ras functions so hardware fatal error interrupt
2382          * can be enabled as early as possible */
2383         switch (adev->asic_type) {
2384         case CHIP_VEGA20:
2385         case CHIP_ARCTURUS:
2386         case CHIP_ALDEBARAN:
2387                 if (!adev->gmc.xgmi.connected_to_cpu) {
2388                         adev->nbio.ras = &nbio_v7_4_ras;
2389                         amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
2390                         adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
2391                 }
2392                 break;
2393         default:
2394                 /* nbio ras is not available */
2395                 break;
2396         }
2397
2398         if (adev->nbio.ras &&
2399             adev->nbio.ras->init_ras_controller_interrupt) {
2400                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2401                 if (r)
2402                         goto release_con;
2403         }
2404
2405         if (adev->nbio.ras &&
2406             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2407                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2408                 if (r)
2409                         goto release_con;
2410         }
2411
2412         /* Init poison supported flag, the default value is false */
2413         if (adev->gmc.xgmi.connected_to_cpu) {
2414                 /* enabled by default when GPU is connected to CPU */
2415                 con->poison_supported = true;
2416         }
2417         else if (adev->df.funcs &&
2418             adev->df.funcs->query_ras_poison_mode &&
2419             adev->umc.ras &&
2420             adev->umc.ras->query_ras_poison_mode) {
2421                 df_poison =
2422                         adev->df.funcs->query_ras_poison_mode(adev);
2423                 umc_poison =
2424                         adev->umc.ras->query_ras_poison_mode(adev);
2425                 /* Only poison is set in both DF and UMC, we can support it */
2426                 if (df_poison && umc_poison)
2427                         con->poison_supported = true;
2428                 else if (df_poison != umc_poison)
2429                         dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2430                                         df_poison, umc_poison);
2431         }
2432
2433         if (amdgpu_ras_fs_init(adev)) {
2434                 r = -EINVAL;
2435                 goto release_con;
2436         }
2437
2438         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2439                  "hardware ability[%x] ras_mask[%x]\n",
2440                  adev->ras_hw_enabled, adev->ras_enabled);
2441
2442         return 0;
2443 release_con:
2444         amdgpu_ras_set_context(adev, NULL);
2445         kfree(con);
2446
2447         return r;
2448 }
2449
2450 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2451 {
2452         if (adev->gmc.xgmi.connected_to_cpu)
2453                 return 1;
2454         return 0;
2455 }
2456
2457 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2458                                         struct ras_common_if *ras_block)
2459 {
2460         struct ras_query_if info = {
2461                 .head = *ras_block,
2462         };
2463
2464         if (!amdgpu_persistent_edc_harvesting_supported(adev))
2465                 return 0;
2466
2467         if (amdgpu_ras_query_error_status(adev, &info) != 0)
2468                 DRM_WARN("RAS init harvest failure");
2469
2470         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2471                 DRM_WARN("RAS init harvest reset failure");
2472
2473         return 0;
2474 }
2475
2476 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2477 {
2478        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2479
2480        if (!con)
2481                return false;
2482
2483        return con->poison_supported;
2484 }
2485
2486 /* helper function to handle common stuff in ip late init phase */
2487 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2488                          struct ras_common_if *ras_block)
2489 {
2490         struct amdgpu_ras_block_object *ras_obj = NULL;
2491         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2492         unsigned long ue_count, ce_count;
2493         int r;
2494
2495         /* disable RAS feature per IP block if it is not supported */
2496         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2497                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2498                 return 0;
2499         }
2500
2501         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2502         if (r) {
2503                 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2504                         /* in resume phase, if fail to enable ras,
2505                          * clean up all ras fs nodes, and disable ras */
2506                         goto cleanup;
2507                 } else
2508                         return r;
2509         }
2510
2511         /* check for errors on warm reset edc persisant supported ASIC */
2512         amdgpu_persistent_edc_harvesting(adev, ras_block);
2513
2514         /* in resume phase, no need to create ras fs node */
2515         if (adev->in_suspend || amdgpu_in_reset(adev))
2516                 return 0;
2517
2518         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2519         if (ras_obj->ras_cb) {
2520                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2521                 if (r)
2522                         goto cleanup;
2523         }
2524
2525         r = amdgpu_ras_sysfs_create(adev, ras_block);
2526         if (r)
2527                 goto interrupt;
2528
2529         /* Those are the cached values at init.
2530          */
2531         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2532                 atomic_set(&con->ras_ce_count, ce_count);
2533                 atomic_set(&con->ras_ue_count, ue_count);
2534         }
2535
2536         return 0;
2537
2538 interrupt:
2539         if (ras_obj->ras_cb)
2540                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2541 cleanup:
2542         amdgpu_ras_feature_enable(adev, ras_block, 0);
2543         return r;
2544 }
2545
2546 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2547                          struct ras_common_if *ras_block)
2548 {
2549         return amdgpu_ras_block_late_init(adev, ras_block);
2550 }
2551
2552 /* helper function to remove ras fs node and interrupt handler */
2553 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2554                           struct ras_common_if *ras_block)
2555 {
2556         struct amdgpu_ras_block_object *ras_obj;
2557         if (!ras_block)
2558                 return;
2559
2560         amdgpu_ras_sysfs_remove(adev, ras_block);
2561
2562         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2563         if (ras_obj->ras_cb)
2564                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2565 }
2566
2567 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2568                           struct ras_common_if *ras_block)
2569 {
2570         return amdgpu_ras_block_late_fini(adev, ras_block);
2571 }
2572
2573 /* do some init work after IP late init as dependence.
2574  * and it runs in resume/gpu reset/booting up cases.
2575  */
2576 void amdgpu_ras_resume(struct amdgpu_device *adev)
2577 {
2578         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2579         struct ras_manager *obj, *tmp;
2580
2581         if (!adev->ras_enabled || !con) {
2582                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2583                 amdgpu_release_ras_context(adev);
2584
2585                 return;
2586         }
2587
2588         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2589                 /* Set up all other IPs which are not implemented. There is a
2590                  * tricky thing that IP's actual ras error type should be
2591                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2592                  * ERROR_NONE make sense anyway.
2593                  */
2594                 amdgpu_ras_enable_all_features(adev, 1);
2595
2596                 /* We enable ras on all hw_supported block, but as boot
2597                  * parameter might disable some of them and one or more IP has
2598                  * not implemented yet. So we disable them on behalf.
2599                  */
2600                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2601                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2602                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2603                                 /* there should be no any reference. */
2604                                 WARN_ON(alive_obj(obj));
2605                         }
2606                 }
2607         }
2608 }
2609
2610 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2611 {
2612         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2613
2614         if (!adev->ras_enabled || !con)
2615                 return;
2616
2617         amdgpu_ras_disable_all_features(adev, 0);
2618         /* Make sure all ras objects are disabled. */
2619         if (con->features)
2620                 amdgpu_ras_disable_all_features(adev, 1);
2621 }
2622
2623 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2624 {
2625         struct amdgpu_ras_block_list *node, *tmp;
2626         struct amdgpu_ras_block_object *obj;
2627         int r;
2628
2629         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2630                 if (!node->ras_obj) {
2631                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2632                         continue;
2633                 }
2634
2635                 obj = node->ras_obj;
2636                 if (obj->ras_late_init) {
2637                         r = obj->ras_late_init(adev, &obj->ras_comm);
2638                         if (r) {
2639                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2640                                         obj->ras_comm.name, r);
2641                                 return r;
2642                         }
2643                 } else
2644                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2645         }
2646
2647         return 0;
2648 }
2649
2650 /* do some fini work before IP fini as dependence */
2651 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2652 {
2653         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2654
2655         if (!adev->ras_enabled || !con)
2656                 return 0;
2657
2658
2659         /* Need disable ras on all IPs here before ip [hw/sw]fini */
2660         amdgpu_ras_disable_all_features(adev, 0);
2661         amdgpu_ras_recovery_fini(adev);
2662         return 0;
2663 }
2664
2665 int amdgpu_ras_fini(struct amdgpu_device *adev)
2666 {
2667         struct amdgpu_ras_block_list *ras_node, *tmp;
2668         struct amdgpu_ras_block_object *obj = NULL;
2669         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2670
2671         if (!adev->ras_enabled || !con)
2672                 return 0;
2673
2674         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2675                 if (ras_node->ras_obj) {
2676                         obj = ras_node->ras_obj;
2677                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2678                             obj->ras_fini)
2679                                 obj->ras_fini(adev, &obj->ras_comm);
2680                         else
2681                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2682                 }
2683
2684                 /* Clear ras blocks from ras_list and free ras block list node */
2685                 list_del(&ras_node->node);
2686                 kfree(ras_node);
2687         }
2688
2689         amdgpu_ras_fs_fini(adev);
2690         amdgpu_ras_interrupt_remove_all(adev);
2691
2692         WARN(con->features, "Feature mask is not cleared");
2693
2694         if (con->features)
2695                 amdgpu_ras_disable_all_features(adev, 1);
2696
2697         cancel_delayed_work_sync(&con->ras_counte_delay_work);
2698
2699         amdgpu_ras_set_context(adev, NULL);
2700         kfree(con);
2701
2702         return 0;
2703 }
2704
2705 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2706 {
2707         amdgpu_ras_check_supported(adev);
2708         if (!adev->ras_hw_enabled)
2709                 return;
2710
2711         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2712                 dev_info(adev->dev, "uncorrectable hardware error"
2713                         "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2714
2715                 amdgpu_ras_reset_gpu(adev);
2716         }
2717 }
2718
2719 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2720 {
2721         if (adev->asic_type == CHIP_VEGA20 &&
2722             adev->pm.fw_version <= 0x283400) {
2723                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2724                                 amdgpu_ras_intr_triggered();
2725         }
2726
2727         return false;
2728 }
2729
2730 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2731 {
2732         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2733
2734         if (!con)
2735                 return;
2736
2737         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2738                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2739                 amdgpu_ras_set_context(adev, NULL);
2740                 kfree(con);
2741         }
2742 }
2743
2744 #ifdef CONFIG_X86_MCE_AMD
2745 static struct amdgpu_device *find_adev(uint32_t node_id)
2746 {
2747         int i;
2748         struct amdgpu_device *adev = NULL;
2749
2750         for (i = 0; i < mce_adev_list.num_gpu; i++) {
2751                 adev = mce_adev_list.devs[i];
2752
2753                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
2754                     adev->gmc.xgmi.physical_node_id == node_id)
2755                         break;
2756                 adev = NULL;
2757         }
2758
2759         return adev;
2760 }
2761
2762 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
2763 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
2764 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2765 #define GPU_ID_OFFSET           8
2766
2767 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2768                                     unsigned long val, void *data)
2769 {
2770         struct mce *m = (struct mce *)data;
2771         struct amdgpu_device *adev = NULL;
2772         uint32_t gpu_id = 0;
2773         uint32_t umc_inst = 0;
2774         uint32_t ch_inst, channel_index = 0;
2775         struct ras_err_data err_data = {0, 0, 0, NULL};
2776         struct eeprom_table_record err_rec;
2777         uint64_t retired_page;
2778
2779         /*
2780          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2781          * and error occurred in DramECC (Extended error code = 0) then only
2782          * process the error, else bail out.
2783          */
2784         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
2785                     (XEC(m->status, 0x3f) == 0x0)))
2786                 return NOTIFY_DONE;
2787
2788         /*
2789          * If it is correctable error, return.
2790          */
2791         if (mce_is_correctable(m))
2792                 return NOTIFY_OK;
2793
2794         /*
2795          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2796          */
2797         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2798
2799         adev = find_adev(gpu_id);
2800         if (!adev) {
2801                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2802                                                                 gpu_id);
2803                 return NOTIFY_DONE;
2804         }
2805
2806         /*
2807          * If it is uncorrectable error, then find out UMC instance and
2808          * channel index.
2809          */
2810         umc_inst = GET_UMC_INST(m->ipid);
2811         ch_inst = GET_CHAN_INDEX(m->ipid);
2812
2813         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
2814                              umc_inst, ch_inst);
2815
2816         /*
2817          * Translate UMC channel address to Physical address
2818          */
2819         channel_index =
2820                 adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
2821                                           + ch_inst];
2822
2823         retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
2824                         ADDR_OF_256B_BLOCK(channel_index) |
2825                         OFFSET_IN_256B_BLOCK(m->addr);
2826
2827         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
2828         err_data.err_addr = &err_rec;
2829         amdgpu_umc_fill_error_record(&err_data, m->addr,
2830                         retired_page, channel_index, umc_inst);
2831
2832         if (amdgpu_bad_page_threshold != 0) {
2833                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
2834                                                 err_data.err_addr_cnt);
2835                 amdgpu_ras_save_bad_pages(adev);
2836         }
2837
2838         return NOTIFY_OK;
2839 }
2840
2841 static struct notifier_block amdgpu_bad_page_nb = {
2842         .notifier_call  = amdgpu_bad_page_notifier,
2843         .priority       = MCE_PRIO_UC,
2844 };
2845
2846 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
2847 {
2848         /*
2849          * Add the adev to the mce_adev_list.
2850          * During mode2 reset, amdgpu device is temporarily
2851          * removed from the mgpu_info list which can cause
2852          * page retirement to fail.
2853          * Use this list instead of mgpu_info to find the amdgpu
2854          * device on which the UMC error was reported.
2855          */
2856         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
2857
2858         /*
2859          * Register the x86 notifier only once
2860          * with MCE subsystem.
2861          */
2862         if (notifier_registered == false) {
2863                 mce_register_decode_chain(&amdgpu_bad_page_nb);
2864                 notifier_registered = true;
2865         }
2866 }
2867 #endif
2868
2869 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
2870 {
2871         if (!adev)
2872                 return NULL;
2873
2874         return adev->psp.ras_context.ras;
2875 }
2876
2877 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
2878 {
2879         if (!adev)
2880                 return -EINVAL;
2881
2882         adev->psp.ras_context.ras = ras_con;
2883         return 0;
2884 }
2885
2886 /* check if ras is supported on block, say, sdma, gfx */
2887 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
2888                 unsigned int block)
2889 {
2890         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2891
2892         if (block >= AMDGPU_RAS_BLOCK_COUNT)
2893                 return 0;
2894         return ras && (adev->ras_enabled & (1 << block));
2895 }
2896
2897 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
2898 {
2899         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2900
2901         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
2902                 schedule_work(&ras->recovery_work);
2903         return 0;
2904 }
2905
2906
2907 /* Register each ip ras block into amdgpu ras */
2908 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
2909                 struct amdgpu_ras_block_object *ras_block_obj)
2910 {
2911         struct amdgpu_ras_block_list *ras_node;
2912         if (!adev || !ras_block_obj)
2913                 return -EINVAL;
2914
2915         if (!amdgpu_ras_asic_supported(adev))
2916                 return 0;
2917
2918         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
2919         if (!ras_node)
2920                 return -ENOMEM;
2921
2922         INIT_LIST_HEAD(&ras_node->node);
2923         ras_node->ras_obj = ras_block_obj;
2924         list_add_tail(&ras_node->node, &adev->ras_list);
2925
2926         return 0;
2927 }
This page took 0.20859 seconds and 4 git commands to generate.