]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31
32 #include "amdgpu.h"
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 #include "nbio_v4_3.h"
38 #include "atom.h"
39 #include "amdgpu_reset.h"
40
41 #ifdef CONFIG_X86_MCE_AMD
42 #include <asm/mce.h>
43
44 static bool notifier_registered;
45 #endif
46 static const char *RAS_FS_NAME = "ras";
47
48 const char *ras_error_string[] = {
49         "none",
50         "parity",
51         "single_correctable",
52         "multi_uncorrectable",
53         "poison",
54 };
55
56 const char *ras_block_string[] = {
57         "umc",
58         "sdma",
59         "gfx",
60         "mmhub",
61         "athub",
62         "pcie_bif",
63         "hdp",
64         "xgmi_wafl",
65         "df",
66         "smn",
67         "sem",
68         "mp0",
69         "mp1",
70         "fuse",
71         "mca",
72         "vcn",
73         "jpeg",
74 };
75
76 const char *ras_mca_block_string[] = {
77         "mca_mp0",
78         "mca_mp1",
79         "mca_mpio",
80         "mca_iohc",
81 };
82
83 struct amdgpu_ras_block_list {
84         /* ras block link */
85         struct list_head node;
86
87         struct amdgpu_ras_block_object *ras_obj;
88 };
89
90 const char *get_ras_block_str(struct ras_common_if *ras_block)
91 {
92         if (!ras_block)
93                 return "NULL";
94
95         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
96                 return "OUT OF RANGE";
97
98         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
99                 return ras_mca_block_string[ras_block->sub_block_index];
100
101         return ras_block_string[ras_block->block];
102 }
103
104 #define ras_block_str(_BLOCK_) \
105         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
106
107 #define ras_err_str(i) (ras_error_string[ffs(i)])
108
109 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
110
111 /* inject address is 52 bits */
112 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
113
114 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
115 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
116
117 enum amdgpu_ras_retire_page_reservation {
118         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
119         AMDGPU_RAS_RETIRE_PAGE_PENDING,
120         AMDGPU_RAS_RETIRE_PAGE_FAULT,
121 };
122
123 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
124
125 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
126                                 uint64_t addr);
127 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
128                                 uint64_t addr);
129 #ifdef CONFIG_X86_MCE_AMD
130 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
131 struct mce_notifier_adev_list {
132         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
133         int num_gpu;
134 };
135 static struct mce_notifier_adev_list mce_adev_list;
136 #endif
137
138 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
139 {
140         if (adev && amdgpu_ras_get_context(adev))
141                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
142 }
143
144 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
145 {
146         if (adev && amdgpu_ras_get_context(adev))
147                 return amdgpu_ras_get_context(adev)->error_query_ready;
148
149         return false;
150 }
151
152 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
153 {
154         struct ras_err_data err_data = {0, 0, 0, NULL};
155         struct eeprom_table_record err_rec;
156
157         if ((address >= adev->gmc.mc_vram_size) ||
158             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
159                 dev_warn(adev->dev,
160                          "RAS WARN: input address 0x%llx is invalid.\n",
161                          address);
162                 return -EINVAL;
163         }
164
165         if (amdgpu_ras_check_bad_page(adev, address)) {
166                 dev_warn(adev->dev,
167                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
168                          address);
169                 return 0;
170         }
171
172         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
173         err_data.err_addr = &err_rec;
174         amdgpu_umc_fill_error_record(&err_data, address,
175                         (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
176
177         if (amdgpu_bad_page_threshold != 0) {
178                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
179                                          err_data.err_addr_cnt);
180                 amdgpu_ras_save_bad_pages(adev, NULL);
181         }
182
183         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
184         dev_warn(adev->dev, "Clear EEPROM:\n");
185         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
186
187         return 0;
188 }
189
190 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
191                                         size_t size, loff_t *pos)
192 {
193         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
194         struct ras_query_if info = {
195                 .head = obj->head,
196         };
197         ssize_t s;
198         char val[128];
199
200         if (amdgpu_ras_query_error_status(obj->adev, &info))
201                 return -EINVAL;
202
203         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
204         if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
205             obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
206                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
207                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
208         }
209
210         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211                         "ue", info.ue_count,
212                         "ce", info.ce_count);
213         if (*pos >= s)
214                 return 0;
215
216         s -= *pos;
217         s = min_t(u64, s, size);
218
219
220         if (copy_to_user(buf, &val[*pos], s))
221                 return -EINVAL;
222
223         *pos += s;
224
225         return s;
226 }
227
228 static const struct file_operations amdgpu_ras_debugfs_ops = {
229         .owner = THIS_MODULE,
230         .read = amdgpu_ras_debugfs_read,
231         .write = NULL,
232         .llseek = default_llseek
233 };
234
235 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
236 {
237         int i;
238
239         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240                 *block_id = i;
241                 if (strcmp(name, ras_block_string[i]) == 0)
242                         return 0;
243         }
244         return -EINVAL;
245 }
246
247 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
248                 const char __user *buf, size_t size,
249                 loff_t *pos, struct ras_debug_if *data)
250 {
251         ssize_t s = min_t(u64, 64, size);
252         char str[65];
253         char block_name[33];
254         char err[9] = "ue";
255         int op = -1;
256         int block_id;
257         uint32_t sub_block;
258         u64 address, value;
259
260         if (*pos)
261                 return -EINVAL;
262         *pos = size;
263
264         memset(str, 0, sizeof(str));
265         memset(data, 0, sizeof(*data));
266
267         if (copy_from_user(str, buf, s))
268                 return -EINVAL;
269
270         if (sscanf(str, "disable %32s", block_name) == 1)
271                 op = 0;
272         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
273                 op = 1;
274         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
275                 op = 2;
276         else if (strstr(str, "retire_page") != NULL)
277                 op = 3;
278         else if (str[0] && str[1] && str[2] && str[3])
279                 /* ascii string, but commands are not matched. */
280                 return -EINVAL;
281
282         if (op != -1) {
283                 if (op == 3) {
284                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
285                             sscanf(str, "%*s %llu", &address) != 1)
286                                 return -EINVAL;
287
288                         data->op = op;
289                         data->inject.address = address;
290
291                         return 0;
292                 }
293
294                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
295                         return -EINVAL;
296
297                 data->head.block = block_id;
298                 /* only ue and ce errors are supported */
299                 if (!memcmp("ue", err, 2))
300                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
301                 else if (!memcmp("ce", err, 2))
302                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
303                 else
304                         return -EINVAL;
305
306                 data->op = op;
307
308                 if (op == 2) {
309                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
310                                    &sub_block, &address, &value) != 3 &&
311                             sscanf(str, "%*s %*s %*s %u %llu %llu",
312                                    &sub_block, &address, &value) != 3)
313                                 return -EINVAL;
314                         data->head.sub_block_index = sub_block;
315                         data->inject.address = address;
316                         data->inject.value = value;
317                 }
318         } else {
319                 if (size < sizeof(*data))
320                         return -EINVAL;
321
322                 if (copy_from_user(data, buf, sizeof(*data)))
323                         return -EINVAL;
324         }
325
326         return 0;
327 }
328
329 /**
330  * DOC: AMDGPU RAS debugfs control interface
331  *
332  * The control interface accepts struct ras_debug_if which has two members.
333  *
334  * First member: ras_debug_if::head or ras_debug_if::inject.
335  *
336  * head is used to indicate which IP block will be under control.
337  *
338  * head has four members, they are block, type, sub_block_index, name.
339  * block: which IP will be under control.
340  * type: what kind of error will be enabled/disabled/injected.
341  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
342  * name: the name of IP.
343  *
344  * inject has two more members than head, they are address, value.
345  * As their names indicate, inject operation will write the
346  * value to the address.
347  *
348  * The second member: struct ras_debug_if::op.
349  * It has three kinds of operations.
350  *
351  * - 0: disable RAS on the block. Take ::head as its data.
352  * - 1: enable RAS on the block. Take ::head as its data.
353  * - 2: inject errors on the block. Take ::inject as its data.
354  *
355  * How to use the interface?
356  *
357  * In a program
358  *
359  * Copy the struct ras_debug_if in your code and initialize it.
360  * Write the struct to the control interface.
361  *
362  * From shell
363  *
364  * .. code-block:: bash
365  *
366  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
367  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
368  *      echo "inject  <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
369  *
370  * Where N, is the card which you want to affect.
371  *
372  * "disable" requires only the block.
373  * "enable" requires the block and error type.
374  * "inject" requires the block, error type, address, and value.
375  *
376  * The block is one of: umc, sdma, gfx, etc.
377  *      see ras_block_string[] for details
378  *
379  * The error type is one of: ue, ce, where,
380  *      ue is multi-uncorrectable
381  *      ce is single-correctable
382  *
383  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
384  * The address and value are hexadecimal numbers, leading 0x is optional.
385  *
386  * For instance,
387  *
388  * .. code-block:: bash
389  *
390  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
391  *      echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
392  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
393  *
394  * How to check the result of the operation?
395  *
396  * To check disable/enable, see "ras" features at,
397  * /sys/class/drm/card[0/1/2...]/device/ras/features
398  *
399  * To check inject, see the corresponding error count at,
400  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
401  *
402  * .. note::
403  *      Operations are only allowed on blocks which are supported.
404  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
405  *      to see which blocks support RAS on a particular asic.
406  *
407  */
408 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
409                                              const char __user *buf,
410                                              size_t size, loff_t *pos)
411 {
412         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
413         struct ras_debug_if data;
414         int ret = 0;
415
416         if (!amdgpu_ras_get_error_query_ready(adev)) {
417                 dev_warn(adev->dev, "RAS WARN: error injection "
418                                 "currently inaccessible\n");
419                 return size;
420         }
421
422         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
423         if (ret)
424                 return ret;
425
426         if (data.op == 3) {
427                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
428                 if (!ret)
429                         return size;
430                 else
431                         return ret;
432         }
433
434         if (!amdgpu_ras_is_supported(adev, data.head.block))
435                 return -EINVAL;
436
437         switch (data.op) {
438         case 0:
439                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
440                 break;
441         case 1:
442                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
443                 break;
444         case 2:
445                 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
446                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
447                         dev_warn(adev->dev, "RAS WARN: input address "
448                                         "0x%llx is invalid.",
449                                         data.inject.address);
450                         ret = -EINVAL;
451                         break;
452                 }
453
454                 /* umc ce/ue error injection for a bad page is not allowed */
455                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
456                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
457                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
458                                  "already been marked as bad!\n",
459                                  data.inject.address);
460                         break;
461                 }
462
463                 /* data.inject.address is offset instead of absolute gpu address */
464                 ret = amdgpu_ras_error_inject(adev, &data.inject);
465                 break;
466         default:
467                 ret = -EINVAL;
468                 break;
469         }
470
471         if (ret)
472                 return ret;
473
474         return size;
475 }
476
477 /**
478  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
479  *
480  * Some boards contain an EEPROM which is used to persistently store a list of
481  * bad pages which experiences ECC errors in vram.  This interface provides
482  * a way to reset the EEPROM, e.g., after testing error injection.
483  *
484  * Usage:
485  *
486  * .. code-block:: bash
487  *
488  *      echo 1 > ../ras/ras_eeprom_reset
489  *
490  * will reset EEPROM table to 0 entries.
491  *
492  */
493 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
494                                                const char __user *buf,
495                                                size_t size, loff_t *pos)
496 {
497         struct amdgpu_device *adev =
498                 (struct amdgpu_device *)file_inode(f)->i_private;
499         int ret;
500
501         ret = amdgpu_ras_eeprom_reset_table(
502                 &(amdgpu_ras_get_context(adev)->eeprom_control));
503
504         if (!ret) {
505                 /* Something was written to EEPROM.
506                  */
507                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
508                 return size;
509         } else {
510                 return ret;
511         }
512 }
513
514 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
515         .owner = THIS_MODULE,
516         .read = NULL,
517         .write = amdgpu_ras_debugfs_ctrl_write,
518         .llseek = default_llseek
519 };
520
521 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
522         .owner = THIS_MODULE,
523         .read = NULL,
524         .write = amdgpu_ras_debugfs_eeprom_write,
525         .llseek = default_llseek
526 };
527
528 /**
529  * DOC: AMDGPU RAS sysfs Error Count Interface
530  *
531  * It allows the user to read the error count for each IP block on the gpu through
532  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
533  *
534  * It outputs the multiple lines which report the uncorrected (ue) and corrected
535  * (ce) error counts.
536  *
537  * The format of one line is below,
538  *
539  * [ce|ue]: count
540  *
541  * Example:
542  *
543  * .. code-block:: bash
544  *
545  *      ue: 0
546  *      ce: 1
547  *
548  */
549 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
550                 struct device_attribute *attr, char *buf)
551 {
552         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
553         struct ras_query_if info = {
554                 .head = obj->head,
555         };
556
557         if (!amdgpu_ras_get_error_query_ready(obj->adev))
558                 return sysfs_emit(buf, "Query currently inaccessible\n");
559
560         if (amdgpu_ras_query_error_status(obj->adev, &info))
561                 return -EINVAL;
562
563         if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
564             obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
565                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
566                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
567         }
568
569         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
570                           "ce", info.ce_count);
571 }
572
573 /* obj begin */
574
575 #define get_obj(obj) do { (obj)->use++; } while (0)
576 #define alive_obj(obj) ((obj)->use)
577
578 static inline void put_obj(struct ras_manager *obj)
579 {
580         if (obj && (--obj->use == 0))
581                 list_del(&obj->node);
582         if (obj && (obj->use < 0))
583                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
584 }
585
586 /* make one obj and return it. */
587 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
588                 struct ras_common_if *head)
589 {
590         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
591         struct ras_manager *obj;
592
593         if (!adev->ras_enabled || !con)
594                 return NULL;
595
596         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
597                 return NULL;
598
599         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
600                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
601                         return NULL;
602
603                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
604         } else
605                 obj = &con->objs[head->block];
606
607         /* already exist. return obj? */
608         if (alive_obj(obj))
609                 return NULL;
610
611         obj->head = *head;
612         obj->adev = adev;
613         list_add(&obj->node, &con->head);
614         get_obj(obj);
615
616         return obj;
617 }
618
619 /* return an obj equal to head, or the first when head is NULL */
620 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
621                 struct ras_common_if *head)
622 {
623         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
624         struct ras_manager *obj;
625         int i;
626
627         if (!adev->ras_enabled || !con)
628                 return NULL;
629
630         if (head) {
631                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
632                         return NULL;
633
634                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
635                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
636                                 return NULL;
637
638                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
639                 } else
640                         obj = &con->objs[head->block];
641
642                 if (alive_obj(obj))
643                         return obj;
644         } else {
645                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
646                         obj = &con->objs[i];
647                         if (alive_obj(obj))
648                                 return obj;
649                 }
650         }
651
652         return NULL;
653 }
654 /* obj end */
655
656 /* feature ctl begin */
657 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
658                                          struct ras_common_if *head)
659 {
660         return adev->ras_hw_enabled & BIT(head->block);
661 }
662
663 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
664                 struct ras_common_if *head)
665 {
666         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
667
668         return con->features & BIT(head->block);
669 }
670
671 /*
672  * if obj is not created, then create one.
673  * set feature enable flag.
674  */
675 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
676                 struct ras_common_if *head, int enable)
677 {
678         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
679         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
680
681         /* If hardware does not support ras, then do not create obj.
682          * But if hardware support ras, we can create the obj.
683          * Ras framework checks con->hw_supported to see if it need do
684          * corresponding initialization.
685          * IP checks con->support to see if it need disable ras.
686          */
687         if (!amdgpu_ras_is_feature_allowed(adev, head))
688                 return 0;
689
690         if (enable) {
691                 if (!obj) {
692                         obj = amdgpu_ras_create_obj(adev, head);
693                         if (!obj)
694                                 return -EINVAL;
695                 } else {
696                         /* In case we create obj somewhere else */
697                         get_obj(obj);
698                 }
699                 con->features |= BIT(head->block);
700         } else {
701                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
702                         con->features &= ~BIT(head->block);
703                         put_obj(obj);
704                 }
705         }
706
707         return 0;
708 }
709
710 static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
711                 struct ras_common_if *head)
712 {
713         if (amdgpu_ras_is_feature_allowed(adev, head) ||
714                 amdgpu_ras_is_poison_mode_supported(adev))
715                 return 1;
716         else
717                 return 0;
718 }
719
720 /* wrapper of psp_ras_enable_features */
721 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
722                 struct ras_common_if *head, bool enable)
723 {
724         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
725         union ta_ras_cmd_input *info;
726         int ret = 0;
727
728         if (!con)
729                 return -EINVAL;
730
731         if (head->block == AMDGPU_RAS_BLOCK__GFX) {
732                 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
733                 if (!info)
734                         return -ENOMEM;
735
736                 if (!enable) {
737                         info->disable_features = (struct ta_ras_disable_features_input) {
738                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
739                                 .error_type = amdgpu_ras_error_to_ta(head->type),
740                         };
741                 } else {
742                         info->enable_features = (struct ta_ras_enable_features_input) {
743                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
744                                 .error_type = amdgpu_ras_error_to_ta(head->type),
745                         };
746                 }
747         }
748
749         /* Do not enable if it is not allowed. */
750         if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
751                 goto out;
752
753         /* Only enable ras feature operation handle on host side */
754         if (head->block == AMDGPU_RAS_BLOCK__GFX &&
755                 !amdgpu_sriov_vf(adev) &&
756                 !amdgpu_ras_intr_triggered()) {
757                 ret = psp_ras_enable_features(&adev->psp, info, enable);
758                 if (ret) {
759                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
760                                 enable ? "enable":"disable",
761                                 get_ras_block_str(head),
762                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
763                         goto out;
764                 }
765         }
766
767         /* setup the obj */
768         __amdgpu_ras_feature_enable(adev, head, enable);
769 out:
770         if (head->block == AMDGPU_RAS_BLOCK__GFX)
771                 kfree(info);
772         return ret;
773 }
774
775 /* Only used in device probe stage and called only once. */
776 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
777                 struct ras_common_if *head, bool enable)
778 {
779         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
780         int ret;
781
782         if (!con)
783                 return -EINVAL;
784
785         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
786                 if (enable) {
787                         /* There is no harm to issue a ras TA cmd regardless of
788                          * the currecnt ras state.
789                          * If current state == target state, it will do nothing
790                          * But sometimes it requests driver to reset and repost
791                          * with error code -EAGAIN.
792                          */
793                         ret = amdgpu_ras_feature_enable(adev, head, 1);
794                         /* With old ras TA, we might fail to enable ras.
795                          * Log it and just setup the object.
796                          * TODO need remove this WA in the future.
797                          */
798                         if (ret == -EINVAL) {
799                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
800                                 if (!ret)
801                                         dev_info(adev->dev,
802                                                 "RAS INFO: %s setup object\n",
803                                                 get_ras_block_str(head));
804                         }
805                 } else {
806                         /* setup the object then issue a ras TA disable cmd.*/
807                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
808                         if (ret)
809                                 return ret;
810
811                         /* gfx block ras dsiable cmd must send to ras-ta */
812                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
813                                 con->features |= BIT(head->block);
814
815                         ret = amdgpu_ras_feature_enable(adev, head, 0);
816
817                         /* clean gfx block ras features flag */
818                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
819                                 con->features &= ~BIT(head->block);
820                 }
821         } else
822                 ret = amdgpu_ras_feature_enable(adev, head, enable);
823
824         return ret;
825 }
826
827 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
828                 bool bypass)
829 {
830         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
831         struct ras_manager *obj, *tmp;
832
833         list_for_each_entry_safe(obj, tmp, &con->head, node) {
834                 /* bypass psp.
835                  * aka just release the obj and corresponding flags
836                  */
837                 if (bypass) {
838                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
839                                 break;
840                 } else {
841                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
842                                 break;
843                 }
844         }
845
846         return con->features;
847 }
848
849 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
850                 bool bypass)
851 {
852         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
853         int i;
854         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
855
856         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
857                 struct ras_common_if head = {
858                         .block = i,
859                         .type = default_ras_type,
860                         .sub_block_index = 0,
861                 };
862
863                 if (i == AMDGPU_RAS_BLOCK__MCA)
864                         continue;
865
866                 if (bypass) {
867                         /*
868                          * bypass psp. vbios enable ras for us.
869                          * so just create the obj
870                          */
871                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
872                                 break;
873                 } else {
874                         if (amdgpu_ras_feature_enable(adev, &head, 1))
875                                 break;
876                 }
877         }
878
879         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
880                 struct ras_common_if head = {
881                         .block = AMDGPU_RAS_BLOCK__MCA,
882                         .type = default_ras_type,
883                         .sub_block_index = i,
884                 };
885
886                 if (bypass) {
887                         /*
888                          * bypass psp. vbios enable ras for us.
889                          * so just create the obj
890                          */
891                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
892                                 break;
893                 } else {
894                         if (amdgpu_ras_feature_enable(adev, &head, 1))
895                                 break;
896                 }
897         }
898
899         return con->features;
900 }
901 /* feature ctl end */
902
903 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
904                 enum amdgpu_ras_block block)
905 {
906         if (!block_obj)
907                 return -EINVAL;
908
909         if (block_obj->ras_comm.block == block)
910                 return 0;
911
912         return -EINVAL;
913 }
914
915 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
916                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
917 {
918         struct amdgpu_ras_block_list *node, *tmp;
919         struct amdgpu_ras_block_object *obj;
920
921         if (block >= AMDGPU_RAS_BLOCK__LAST)
922                 return NULL;
923
924         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
925                 if (!node->ras_obj) {
926                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
927                         continue;
928                 }
929
930                 obj = node->ras_obj;
931                 if (obj->ras_block_match) {
932                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
933                                 return obj;
934                 } else {
935                         if (amdgpu_ras_block_match_default(obj, block) == 0)
936                                 return obj;
937                 }
938         }
939
940         return NULL;
941 }
942
943 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
944 {
945         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
946         int ret = 0;
947
948         /*
949          * choosing right query method according to
950          * whether smu support query error information
951          */
952         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
953         if (ret == -EOPNOTSUPP) {
954                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
955                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
956                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
957
958                 /* umc query_ras_error_address is also responsible for clearing
959                  * error status
960                  */
961                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
962                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
963                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
964         } else if (!ret) {
965                 if (adev->umc.ras &&
966                         adev->umc.ras->ecc_info_query_ras_error_count)
967                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
968
969                 if (adev->umc.ras &&
970                         adev->umc.ras->ecc_info_query_ras_error_address)
971                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
972         }
973 }
974
975 /* query/inject/cure begin */
976 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
977                                   struct ras_query_if *info)
978 {
979         struct amdgpu_ras_block_object *block_obj = NULL;
980         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
981         struct ras_err_data err_data = {0, 0, 0, NULL};
982
983         if (!obj)
984                 return -EINVAL;
985
986         if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
987                 amdgpu_ras_get_ecc_info(adev, &err_data);
988         } else {
989                 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
990                 if (!block_obj || !block_obj->hw_ops)   {
991                         dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
992                                      get_ras_block_str(&info->head));
993                         return -EINVAL;
994                 }
995
996                 if (block_obj->hw_ops->query_ras_error_count)
997                         block_obj->hw_ops->query_ras_error_count(adev, &err_data);
998
999                 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1000                     (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1001                     (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1002                                 if (block_obj->hw_ops->query_ras_error_status)
1003                                         block_obj->hw_ops->query_ras_error_status(adev);
1004                         }
1005         }
1006
1007         obj->err_data.ue_count += err_data.ue_count;
1008         obj->err_data.ce_count += err_data.ce_count;
1009
1010         info->ue_count = obj->err_data.ue_count;
1011         info->ce_count = obj->err_data.ce_count;
1012
1013         if (err_data.ce_count) {
1014                 if (adev->smuio.funcs &&
1015                     adev->smuio.funcs->get_socket_id &&
1016                     adev->smuio.funcs->get_die_id) {
1017                         dev_info(adev->dev, "socket: %d, die: %d "
1018                                         "%ld correctable hardware errors "
1019                                         "detected in %s block, no user "
1020                                         "action is needed.\n",
1021                                         adev->smuio.funcs->get_socket_id(adev),
1022                                         adev->smuio.funcs->get_die_id(adev),
1023                                         obj->err_data.ce_count,
1024                                         get_ras_block_str(&info->head));
1025                 } else {
1026                         dev_info(adev->dev, "%ld correctable hardware errors "
1027                                         "detected in %s block, no user "
1028                                         "action is needed.\n",
1029                                         obj->err_data.ce_count,
1030                                         get_ras_block_str(&info->head));
1031                 }
1032         }
1033         if (err_data.ue_count) {
1034                 if (adev->smuio.funcs &&
1035                     adev->smuio.funcs->get_socket_id &&
1036                     adev->smuio.funcs->get_die_id) {
1037                         dev_info(adev->dev, "socket: %d, die: %d "
1038                                         "%ld uncorrectable hardware errors "
1039                                         "detected in %s block\n",
1040                                         adev->smuio.funcs->get_socket_id(adev),
1041                                         adev->smuio.funcs->get_die_id(adev),
1042                                         obj->err_data.ue_count,
1043                                         get_ras_block_str(&info->head));
1044                 } else {
1045                         dev_info(adev->dev, "%ld uncorrectable hardware errors "
1046                                         "detected in %s block\n",
1047                                         obj->err_data.ue_count,
1048                                         get_ras_block_str(&info->head));
1049                 }
1050         }
1051
1052         return 0;
1053 }
1054
1055 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1056                 enum amdgpu_ras_block block)
1057 {
1058         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1059
1060         if (!amdgpu_ras_is_supported(adev, block))
1061                 return -EINVAL;
1062
1063         if (!block_obj || !block_obj->hw_ops)   {
1064                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1065                              ras_block_str(block));
1066                 return -EINVAL;
1067         }
1068
1069         if (block_obj->hw_ops->reset_ras_error_count)
1070                 block_obj->hw_ops->reset_ras_error_count(adev);
1071
1072         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1073             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1074                 if (block_obj->hw_ops->reset_ras_error_status)
1075                         block_obj->hw_ops->reset_ras_error_status(adev);
1076         }
1077
1078         return 0;
1079 }
1080
1081 /* wrapper of psp_ras_trigger_error */
1082 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1083                 struct ras_inject_if *info)
1084 {
1085         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1086         struct ta_ras_trigger_error_input block_info = {
1087                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1088                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1089                 .sub_block_index = info->head.sub_block_index,
1090                 .address = info->address,
1091                 .value = info->value,
1092         };
1093         int ret = -EINVAL;
1094         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1095                                                         info->head.block,
1096                                                         info->head.sub_block_index);
1097
1098         /* inject on guest isn't allowed, return success directly */
1099         if (amdgpu_sriov_vf(adev))
1100                 return 0;
1101
1102         if (!obj)
1103                 return -EINVAL;
1104
1105         if (!block_obj || !block_obj->hw_ops)   {
1106                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1107                              get_ras_block_str(&info->head));
1108                 return -EINVAL;
1109         }
1110
1111         /* Calculate XGMI relative offset */
1112         if (adev->gmc.xgmi.num_physical_nodes > 1) {
1113                 block_info.address =
1114                         amdgpu_xgmi_get_relative_phy_addr(adev,
1115                                                           block_info.address);
1116         }
1117
1118         if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
1119                 if (block_obj->hw_ops->ras_error_inject)
1120                         ret = block_obj->hw_ops->ras_error_inject(adev, info);
1121         } else {
1122                 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
1123                 if (block_obj->hw_ops->ras_error_inject)
1124                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
1125                 else  /*If not defined .ras_error_inject, use default ras_error_inject*/
1126                         ret = psp_ras_trigger_error(&adev->psp, &block_info);
1127         }
1128
1129         if (ret)
1130                 dev_err(adev->dev, "ras inject %s failed %d\n",
1131                         get_ras_block_str(&info->head), ret);
1132
1133         return ret;
1134 }
1135
1136 /**
1137  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1138  * @adev: pointer to AMD GPU device
1139  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1140  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1141  * @query_info: pointer to ras_query_if
1142  *
1143  * Return 0 for query success or do nothing, otherwise return an error
1144  * on failures
1145  */
1146 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1147                                                unsigned long *ce_count,
1148                                                unsigned long *ue_count,
1149                                                struct ras_query_if *query_info)
1150 {
1151         int ret;
1152
1153         if (!query_info)
1154                 /* do nothing if query_info is not specified */
1155                 return 0;
1156
1157         ret = amdgpu_ras_query_error_status(adev, query_info);
1158         if (ret)
1159                 return ret;
1160
1161         *ce_count += query_info->ce_count;
1162         *ue_count += query_info->ue_count;
1163
1164         /* some hardware/IP supports read to clear
1165          * no need to explictly reset the err status after the query call */
1166         if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1167             adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1168                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1169                         dev_warn(adev->dev,
1170                                  "Failed to reset error counter and error status\n");
1171         }
1172
1173         return 0;
1174 }
1175
1176 /**
1177  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1178  * @adev: pointer to AMD GPU device
1179  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1180  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1181  * errors.
1182  * @query_info: pointer to ras_query_if if the query request is only for
1183  * specific ip block; if info is NULL, then the qurey request is for
1184  * all the ip blocks that support query ras error counters/status
1185  *
1186  * If set, @ce_count or @ue_count, count and return the corresponding
1187  * error counts in those integer pointers. Return 0 if the device
1188  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1189  */
1190 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1191                                  unsigned long *ce_count,
1192                                  unsigned long *ue_count,
1193                                  struct ras_query_if *query_info)
1194 {
1195         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1196         struct ras_manager *obj;
1197         unsigned long ce, ue;
1198         int ret;
1199
1200         if (!adev->ras_enabled || !con)
1201                 return -EOPNOTSUPP;
1202
1203         /* Don't count since no reporting.
1204          */
1205         if (!ce_count && !ue_count)
1206                 return 0;
1207
1208         ce = 0;
1209         ue = 0;
1210         if (!query_info) {
1211                 /* query all the ip blocks that support ras query interface */
1212                 list_for_each_entry(obj, &con->head, node) {
1213                         struct ras_query_if info = {
1214                                 .head = obj->head,
1215                         };
1216
1217                         ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1218                 }
1219         } else {
1220                 /* query specific ip block */
1221                 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1222         }
1223
1224         if (ret)
1225                 return ret;
1226
1227         if (ce_count)
1228                 *ce_count = ce;
1229
1230         if (ue_count)
1231                 *ue_count = ue;
1232
1233         return 0;
1234 }
1235 /* query/inject/cure end */
1236
1237
1238 /* sysfs begin */
1239
1240 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1241                 struct ras_badpage **bps, unsigned int *count);
1242
1243 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1244 {
1245         switch (flags) {
1246         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1247                 return "R";
1248         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1249                 return "P";
1250         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1251         default:
1252                 return "F";
1253         }
1254 }
1255
1256 /**
1257  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1258  *
1259  * It allows user to read the bad pages of vram on the gpu through
1260  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1261  *
1262  * It outputs multiple lines, and each line stands for one gpu page.
1263  *
1264  * The format of one line is below,
1265  * gpu pfn : gpu page size : flags
1266  *
1267  * gpu pfn and gpu page size are printed in hex format.
1268  * flags can be one of below character,
1269  *
1270  * R: reserved, this gpu page is reserved and not able to use.
1271  *
1272  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1273  * in next window of page_reserve.
1274  *
1275  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1276  *
1277  * Examples:
1278  *
1279  * .. code-block:: bash
1280  *
1281  *      0x00000001 : 0x00001000 : R
1282  *      0x00000002 : 0x00001000 : P
1283  *
1284  */
1285
1286 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1287                 struct kobject *kobj, struct bin_attribute *attr,
1288                 char *buf, loff_t ppos, size_t count)
1289 {
1290         struct amdgpu_ras *con =
1291                 container_of(attr, struct amdgpu_ras, badpages_attr);
1292         struct amdgpu_device *adev = con->adev;
1293         const unsigned int element_size =
1294                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1295         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1296         unsigned int end = div64_ul(ppos + count - 1, element_size);
1297         ssize_t s = 0;
1298         struct ras_badpage *bps = NULL;
1299         unsigned int bps_count = 0;
1300
1301         memset(buf, 0, count);
1302
1303         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1304                 return 0;
1305
1306         for (; start < end && start < bps_count; start++)
1307                 s += scnprintf(&buf[s], element_size + 1,
1308                                 "0x%08x : 0x%08x : %1s\n",
1309                                 bps[start].bp,
1310                                 bps[start].size,
1311                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1312
1313         kfree(bps);
1314
1315         return s;
1316 }
1317
1318 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1319                 struct device_attribute *attr, char *buf)
1320 {
1321         struct amdgpu_ras *con =
1322                 container_of(attr, struct amdgpu_ras, features_attr);
1323
1324         return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1325 }
1326
1327 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1328 {
1329         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1330
1331         sysfs_remove_file_from_group(&adev->dev->kobj,
1332                                 &con->badpages_attr.attr,
1333                                 RAS_FS_NAME);
1334 }
1335
1336 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1337 {
1338         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1339         struct attribute *attrs[] = {
1340                 &con->features_attr.attr,
1341                 NULL
1342         };
1343         struct attribute_group group = {
1344                 .name = RAS_FS_NAME,
1345                 .attrs = attrs,
1346         };
1347
1348         sysfs_remove_group(&adev->dev->kobj, &group);
1349
1350         return 0;
1351 }
1352
1353 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1354                 struct ras_common_if *head)
1355 {
1356         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1357
1358         if (!obj || obj->attr_inuse)
1359                 return -EINVAL;
1360
1361         get_obj(obj);
1362
1363         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1364                 "%s_err_count", head->name);
1365
1366         obj->sysfs_attr = (struct device_attribute){
1367                 .attr = {
1368                         .name = obj->fs_data.sysfs_name,
1369                         .mode = S_IRUGO,
1370                 },
1371                         .show = amdgpu_ras_sysfs_read,
1372         };
1373         sysfs_attr_init(&obj->sysfs_attr.attr);
1374
1375         if (sysfs_add_file_to_group(&adev->dev->kobj,
1376                                 &obj->sysfs_attr.attr,
1377                                 RAS_FS_NAME)) {
1378                 put_obj(obj);
1379                 return -EINVAL;
1380         }
1381
1382         obj->attr_inuse = 1;
1383
1384         return 0;
1385 }
1386
1387 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1388                 struct ras_common_if *head)
1389 {
1390         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1391
1392         if (!obj || !obj->attr_inuse)
1393                 return -EINVAL;
1394
1395         sysfs_remove_file_from_group(&adev->dev->kobj,
1396                                 &obj->sysfs_attr.attr,
1397                                 RAS_FS_NAME);
1398         obj->attr_inuse = 0;
1399         put_obj(obj);
1400
1401         return 0;
1402 }
1403
1404 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1405 {
1406         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1407         struct ras_manager *obj, *tmp;
1408
1409         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1410                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1411         }
1412
1413         if (amdgpu_bad_page_threshold != 0)
1414                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1415
1416         amdgpu_ras_sysfs_remove_feature_node(adev);
1417
1418         return 0;
1419 }
1420 /* sysfs end */
1421
1422 /**
1423  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1424  *
1425  * Normally when there is an uncorrectable error, the driver will reset
1426  * the GPU to recover.  However, in the event of an unrecoverable error,
1427  * the driver provides an interface to reboot the system automatically
1428  * in that event.
1429  *
1430  * The following file in debugfs provides that interface:
1431  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1432  *
1433  * Usage:
1434  *
1435  * .. code-block:: bash
1436  *
1437  *      echo true > .../ras/auto_reboot
1438  *
1439  */
1440 /* debugfs begin */
1441 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1442 {
1443         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1444         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1445         struct dentry     *dir;
1446
1447         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1448         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1449                             &amdgpu_ras_debugfs_ctrl_ops);
1450         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1451                             &amdgpu_ras_debugfs_eeprom_ops);
1452         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1453                            &con->bad_page_cnt_threshold);
1454         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1455         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1456         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1457                             &amdgpu_ras_debugfs_eeprom_size_ops);
1458         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1459                                                        S_IRUGO, dir, adev,
1460                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1461         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1462
1463         /*
1464          * After one uncorrectable error happens, usually GPU recovery will
1465          * be scheduled. But due to the known problem in GPU recovery failing
1466          * to bring GPU back, below interface provides one direct way to
1467          * user to reboot system automatically in such case within
1468          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1469          * will never be called.
1470          */
1471         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1472
1473         /*
1474          * User could set this not to clean up hardware's error count register
1475          * of RAS IPs during ras recovery.
1476          */
1477         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1478                             &con->disable_ras_err_cnt_harvest);
1479         return dir;
1480 }
1481
1482 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1483                                       struct ras_fs_if *head,
1484                                       struct dentry *dir)
1485 {
1486         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1487
1488         if (!obj || !dir)
1489                 return;
1490
1491         get_obj(obj);
1492
1493         memcpy(obj->fs_data.debugfs_name,
1494                         head->debugfs_name,
1495                         sizeof(obj->fs_data.debugfs_name));
1496
1497         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1498                             obj, &amdgpu_ras_debugfs_ops);
1499 }
1500
1501 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1502 {
1503         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1504         struct dentry *dir;
1505         struct ras_manager *obj;
1506         struct ras_fs_if fs_info;
1507
1508         /*
1509          * it won't be called in resume path, no need to check
1510          * suspend and gpu reset status
1511          */
1512         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1513                 return;
1514
1515         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1516
1517         list_for_each_entry(obj, &con->head, node) {
1518                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1519                         (obj->attr_inuse == 1)) {
1520                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1521                                         get_ras_block_str(&obj->head));
1522                         fs_info.head = obj->head;
1523                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1524                 }
1525         }
1526 }
1527
1528 /* debugfs end */
1529
1530 /* ras fs */
1531 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1532                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1533 static DEVICE_ATTR(features, S_IRUGO,
1534                 amdgpu_ras_sysfs_features_read, NULL);
1535 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1536 {
1537         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1538         struct attribute_group group = {
1539                 .name = RAS_FS_NAME,
1540         };
1541         struct attribute *attrs[] = {
1542                 &con->features_attr.attr,
1543                 NULL
1544         };
1545         struct bin_attribute *bin_attrs[] = {
1546                 NULL,
1547                 NULL,
1548         };
1549         int r;
1550
1551         /* add features entry */
1552         con->features_attr = dev_attr_features;
1553         group.attrs = attrs;
1554         sysfs_attr_init(attrs[0]);
1555
1556         if (amdgpu_bad_page_threshold != 0) {
1557                 /* add bad_page_features entry */
1558                 bin_attr_gpu_vram_bad_pages.private = NULL;
1559                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1560                 bin_attrs[0] = &con->badpages_attr;
1561                 group.bin_attrs = bin_attrs;
1562                 sysfs_bin_attr_init(bin_attrs[0]);
1563         }
1564
1565         r = sysfs_create_group(&adev->dev->kobj, &group);
1566         if (r)
1567                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1568
1569         return 0;
1570 }
1571
1572 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1573 {
1574         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1575         struct ras_manager *con_obj, *ip_obj, *tmp;
1576
1577         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1578                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1579                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1580                         if (ip_obj)
1581                                 put_obj(ip_obj);
1582                 }
1583         }
1584
1585         amdgpu_ras_sysfs_remove_all(adev);
1586         return 0;
1587 }
1588 /* ras fs end */
1589
1590 /* ih begin */
1591
1592 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1593  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1594  * register to check whether the interrupt is triggered or not, and properly
1595  * ack the interrupt if it is there
1596  */
1597 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1598 {
1599         /* Fatal error events are handled on host side */
1600         if (amdgpu_sriov_vf(adev) ||
1601                 !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
1602                 return;
1603
1604         if (adev->nbio.ras &&
1605             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1606                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1607
1608         if (adev->nbio.ras &&
1609             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1610                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1611 }
1612
1613 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1614                                 struct amdgpu_iv_entry *entry)
1615 {
1616         bool poison_stat = false;
1617         struct amdgpu_device *adev = obj->adev;
1618         struct amdgpu_ras_block_object *block_obj =
1619                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1620
1621         if (!block_obj)
1622                 return;
1623
1624         /* both query_poison_status and handle_poison_consumption are optional,
1625          * but at least one of them should be implemented if we need poison
1626          * consumption handler
1627          */
1628         if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1629                 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1630                 if (!poison_stat) {
1631                         /* Not poison consumption interrupt, no need to handle it */
1632                         dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1633                                         block_obj->ras_comm.name);
1634
1635                         return;
1636                 }
1637         }
1638
1639         if (!adev->gmc.xgmi.connected_to_cpu)
1640                 amdgpu_umc_poison_handler(adev, false);
1641
1642         if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1643                 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1644
1645         /* gpu reset is fallback for failed and default cases */
1646         if (poison_stat) {
1647                 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1648                                 block_obj->ras_comm.name);
1649                 amdgpu_ras_reset_gpu(adev);
1650         } else {
1651                 amdgpu_gfx_poison_consumption_handler(adev, entry);
1652         }
1653 }
1654
1655 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1656                                 struct amdgpu_iv_entry *entry)
1657 {
1658         dev_info(obj->adev->dev,
1659                 "Poison is created, no user action is needed.\n");
1660 }
1661
1662 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1663                                 struct amdgpu_iv_entry *entry)
1664 {
1665         struct ras_ih_data *data = &obj->ih_data;
1666         struct ras_err_data err_data = {0, 0, 0, NULL};
1667         int ret;
1668
1669         if (!data->cb)
1670                 return;
1671
1672         /* Let IP handle its data, maybe we need get the output
1673          * from the callback to update the error type/count, etc
1674          */
1675         ret = data->cb(obj->adev, &err_data, entry);
1676         /* ue will trigger an interrupt, and in that case
1677          * we need do a reset to recovery the whole system.
1678          * But leave IP do that recovery, here we just dispatch
1679          * the error.
1680          */
1681         if (ret == AMDGPU_RAS_SUCCESS) {
1682                 /* these counts could be left as 0 if
1683                  * some blocks do not count error number
1684                  */
1685                 obj->err_data.ue_count += err_data.ue_count;
1686                 obj->err_data.ce_count += err_data.ce_count;
1687         }
1688 }
1689
1690 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1691 {
1692         struct ras_ih_data *data = &obj->ih_data;
1693         struct amdgpu_iv_entry entry;
1694
1695         while (data->rptr != data->wptr) {
1696                 rmb();
1697                 memcpy(&entry, &data->ring[data->rptr],
1698                                 data->element_size);
1699
1700                 wmb();
1701                 data->rptr = (data->aligned_element_size +
1702                                 data->rptr) % data->ring_size;
1703
1704                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1705                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1706                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1707                         else
1708                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1709                 } else {
1710                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1711                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1712                         else
1713                                 dev_warn(obj->adev->dev,
1714                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1715                 }
1716         }
1717 }
1718
1719 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1720 {
1721         struct ras_ih_data *data =
1722                 container_of(work, struct ras_ih_data, ih_work);
1723         struct ras_manager *obj =
1724                 container_of(data, struct ras_manager, ih_data);
1725
1726         amdgpu_ras_interrupt_handler(obj);
1727 }
1728
1729 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1730                 struct ras_dispatch_if *info)
1731 {
1732         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1733         struct ras_ih_data *data = &obj->ih_data;
1734
1735         if (!obj)
1736                 return -EINVAL;
1737
1738         if (data->inuse == 0)
1739                 return 0;
1740
1741         /* Might be overflow... */
1742         memcpy(&data->ring[data->wptr], info->entry,
1743                         data->element_size);
1744
1745         wmb();
1746         data->wptr = (data->aligned_element_size +
1747                         data->wptr) % data->ring_size;
1748
1749         schedule_work(&data->ih_work);
1750
1751         return 0;
1752 }
1753
1754 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1755                 struct ras_common_if *head)
1756 {
1757         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1758         struct ras_ih_data *data;
1759
1760         if (!obj)
1761                 return -EINVAL;
1762
1763         data = &obj->ih_data;
1764         if (data->inuse == 0)
1765                 return 0;
1766
1767         cancel_work_sync(&data->ih_work);
1768
1769         kfree(data->ring);
1770         memset(data, 0, sizeof(*data));
1771         put_obj(obj);
1772
1773         return 0;
1774 }
1775
1776 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1777                 struct ras_common_if *head)
1778 {
1779         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1780         struct ras_ih_data *data;
1781         struct amdgpu_ras_block_object *ras_obj;
1782
1783         if (!obj) {
1784                 /* in case we registe the IH before enable ras feature */
1785                 obj = amdgpu_ras_create_obj(adev, head);
1786                 if (!obj)
1787                         return -EINVAL;
1788         } else
1789                 get_obj(obj);
1790
1791         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1792
1793         data = &obj->ih_data;
1794         /* add the callback.etc */
1795         *data = (struct ras_ih_data) {
1796                 .inuse = 0,
1797                 .cb = ras_obj->ras_cb,
1798                 .element_size = sizeof(struct amdgpu_iv_entry),
1799                 .rptr = 0,
1800                 .wptr = 0,
1801         };
1802
1803         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1804
1805         data->aligned_element_size = ALIGN(data->element_size, 8);
1806         /* the ring can store 64 iv entries. */
1807         data->ring_size = 64 * data->aligned_element_size;
1808         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1809         if (!data->ring) {
1810                 put_obj(obj);
1811                 return -ENOMEM;
1812         }
1813
1814         /* IH is ready */
1815         data->inuse = 1;
1816
1817         return 0;
1818 }
1819
1820 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1821 {
1822         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1823         struct ras_manager *obj, *tmp;
1824
1825         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1826                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1827         }
1828
1829         return 0;
1830 }
1831 /* ih end */
1832
1833 /* traversal all IPs except NBIO to query error counter */
1834 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1835 {
1836         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1837         struct ras_manager *obj;
1838
1839         if (!adev->ras_enabled || !con)
1840                 return;
1841
1842         list_for_each_entry(obj, &con->head, node) {
1843                 struct ras_query_if info = {
1844                         .head = obj->head,
1845                 };
1846
1847                 /*
1848                  * PCIE_BIF IP has one different isr by ras controller
1849                  * interrupt, the specific ras counter query will be
1850                  * done in that isr. So skip such block from common
1851                  * sync flood interrupt isr calling.
1852                  */
1853                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1854                         continue;
1855
1856                 /*
1857                  * this is a workaround for aldebaran, skip send msg to
1858                  * smu to get ecc_info table due to smu handle get ecc
1859                  * info table failed temporarily.
1860                  * should be removed until smu fix handle ecc_info table.
1861                  */
1862                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1863                         (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1864                         continue;
1865
1866                 amdgpu_ras_query_error_status(adev, &info);
1867
1868                 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1869                     adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1870                     adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1871                         if (amdgpu_ras_reset_error_status(adev, info.head.block))
1872                                 dev_warn(adev->dev, "Failed to reset error counter and error status");
1873                 }
1874         }
1875 }
1876
1877 /* Parse RdRspStatus and WrRspStatus */
1878 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1879                                           struct ras_query_if *info)
1880 {
1881         struct amdgpu_ras_block_object *block_obj;
1882         /*
1883          * Only two block need to query read/write
1884          * RspStatus at current state
1885          */
1886         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1887                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1888                 return;
1889
1890         block_obj = amdgpu_ras_get_ras_block(adev,
1891                                         info->head.block,
1892                                         info->head.sub_block_index);
1893
1894         if (!block_obj || !block_obj->hw_ops) {
1895                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1896                              get_ras_block_str(&info->head));
1897                 return;
1898         }
1899
1900         if (block_obj->hw_ops->query_ras_error_status)
1901                 block_obj->hw_ops->query_ras_error_status(adev);
1902
1903 }
1904
1905 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1906 {
1907         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1908         struct ras_manager *obj;
1909
1910         if (!adev->ras_enabled || !con)
1911                 return;
1912
1913         list_for_each_entry(obj, &con->head, node) {
1914                 struct ras_query_if info = {
1915                         .head = obj->head,
1916                 };
1917
1918                 amdgpu_ras_error_status_query(adev, &info);
1919         }
1920 }
1921
1922 /* recovery begin */
1923
1924 /* return 0 on success.
1925  * caller need free bps.
1926  */
1927 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1928                 struct ras_badpage **bps, unsigned int *count)
1929 {
1930         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1931         struct ras_err_handler_data *data;
1932         int i = 0;
1933         int ret = 0, status;
1934
1935         if (!con || !con->eh_data || !bps || !count)
1936                 return -EINVAL;
1937
1938         mutex_lock(&con->recovery_lock);
1939         data = con->eh_data;
1940         if (!data || data->count == 0) {
1941                 *bps = NULL;
1942                 ret = -EINVAL;
1943                 goto out;
1944         }
1945
1946         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1947         if (!*bps) {
1948                 ret = -ENOMEM;
1949                 goto out;
1950         }
1951
1952         for (; i < data->count; i++) {
1953                 (*bps)[i] = (struct ras_badpage){
1954                         .bp = data->bps[i].retired_page,
1955                         .size = AMDGPU_GPU_PAGE_SIZE,
1956                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1957                 };
1958                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1959                                 data->bps[i].retired_page);
1960                 if (status == -EBUSY)
1961                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1962                 else if (status == -ENOENT)
1963                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1964         }
1965
1966         *count = data->count;
1967 out:
1968         mutex_unlock(&con->recovery_lock);
1969         return ret;
1970 }
1971
1972 static void amdgpu_ras_do_recovery(struct work_struct *work)
1973 {
1974         struct amdgpu_ras *ras =
1975                 container_of(work, struct amdgpu_ras, recovery_work);
1976         struct amdgpu_device *remote_adev = NULL;
1977         struct amdgpu_device *adev = ras->adev;
1978         struct list_head device_list, *device_list_handle =  NULL;
1979
1980         if (!ras->disable_ras_err_cnt_harvest) {
1981                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1982
1983                 /* Build list of devices to query RAS related errors */
1984                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1985                         device_list_handle = &hive->device_list;
1986                 } else {
1987                         INIT_LIST_HEAD(&device_list);
1988                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
1989                         device_list_handle = &device_list;
1990                 }
1991
1992                 list_for_each_entry(remote_adev,
1993                                 device_list_handle, gmc.xgmi.head) {
1994                         amdgpu_ras_query_err_status(remote_adev);
1995                         amdgpu_ras_log_on_err_counter(remote_adev);
1996                 }
1997
1998                 amdgpu_put_xgmi_hive(hive);
1999         }
2000
2001         if (amdgpu_device_should_recover_gpu(ras->adev)) {
2002                 struct amdgpu_reset_context reset_context;
2003                 memset(&reset_context, 0, sizeof(reset_context));
2004
2005                 reset_context.method = AMD_RESET_METHOD_NONE;
2006                 reset_context.reset_req_dev = adev;
2007
2008                 /* Perform full reset in fatal error mode */
2009                 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2010                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2011                 else
2012                         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2013
2014                 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2015         }
2016         atomic_set(&ras->in_recovery, 0);
2017 }
2018
2019 /* alloc/realloc bps array */
2020 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2021                 struct ras_err_handler_data *data, int pages)
2022 {
2023         unsigned int old_space = data->count + data->space_left;
2024         unsigned int new_space = old_space + pages;
2025         unsigned int align_space = ALIGN(new_space, 512);
2026         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2027
2028         if (!bps) {
2029                 return -ENOMEM;
2030         }
2031
2032         if (data->bps) {
2033                 memcpy(bps, data->bps,
2034                                 data->count * sizeof(*data->bps));
2035                 kfree(data->bps);
2036         }
2037
2038         data->bps = bps;
2039         data->space_left += align_space - old_space;
2040         return 0;
2041 }
2042
2043 /* it deal with vram only. */
2044 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2045                 struct eeprom_table_record *bps, int pages)
2046 {
2047         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2048         struct ras_err_handler_data *data;
2049         int ret = 0;
2050         uint32_t i;
2051
2052         if (!con || !con->eh_data || !bps || pages <= 0)
2053                 return 0;
2054
2055         mutex_lock(&con->recovery_lock);
2056         data = con->eh_data;
2057         if (!data)
2058                 goto out;
2059
2060         for (i = 0; i < pages; i++) {
2061                 if (amdgpu_ras_check_bad_page_unlock(con,
2062                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2063                         continue;
2064
2065                 if (!data->space_left &&
2066                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2067                         ret = -ENOMEM;
2068                         goto out;
2069                 }
2070
2071                 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2072                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2073                         AMDGPU_GPU_PAGE_SIZE);
2074
2075                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2076                 data->count++;
2077                 data->space_left--;
2078         }
2079 out:
2080         mutex_unlock(&con->recovery_lock);
2081
2082         return ret;
2083 }
2084
2085 /*
2086  * write error record array to eeprom, the function should be
2087  * protected by recovery_lock
2088  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2089  */
2090 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2091                 unsigned long *new_cnt)
2092 {
2093         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2094         struct ras_err_handler_data *data;
2095         struct amdgpu_ras_eeprom_control *control;
2096         int save_count;
2097
2098         if (!con || !con->eh_data) {
2099                 if (new_cnt)
2100                         *new_cnt = 0;
2101
2102                 return 0;
2103         }
2104
2105         mutex_lock(&con->recovery_lock);
2106         control = &con->eeprom_control;
2107         data = con->eh_data;
2108         save_count = data->count - control->ras_num_recs;
2109         mutex_unlock(&con->recovery_lock);
2110
2111         if (new_cnt)
2112                 *new_cnt = save_count / adev->umc.retire_unit;
2113
2114         /* only new entries are saved */
2115         if (save_count > 0) {
2116                 if (amdgpu_ras_eeprom_append(control,
2117                                              &data->bps[control->ras_num_recs],
2118                                              save_count)) {
2119                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2120                         return -EIO;
2121                 }
2122
2123                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2124         }
2125
2126         return 0;
2127 }
2128
2129 /*
2130  * read error record array in eeprom and reserve enough space for
2131  * storing new bad pages
2132  */
2133 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2134 {
2135         struct amdgpu_ras_eeprom_control *control =
2136                 &adev->psp.ras_context.ras->eeprom_control;
2137         struct eeprom_table_record *bps;
2138         int ret;
2139
2140         /* no bad page record, skip eeprom access */
2141         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2142                 return 0;
2143
2144         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2145         if (!bps)
2146                 return -ENOMEM;
2147
2148         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2149         if (ret)
2150                 dev_err(adev->dev, "Failed to load EEPROM table records!");
2151         else
2152                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2153
2154         kfree(bps);
2155         return ret;
2156 }
2157
2158 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2159                                 uint64_t addr)
2160 {
2161         struct ras_err_handler_data *data = con->eh_data;
2162         int i;
2163
2164         addr >>= AMDGPU_GPU_PAGE_SHIFT;
2165         for (i = 0; i < data->count; i++)
2166                 if (addr == data->bps[i].retired_page)
2167                         return true;
2168
2169         return false;
2170 }
2171
2172 /*
2173  * check if an address belongs to bad page
2174  *
2175  * Note: this check is only for umc block
2176  */
2177 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2178                                 uint64_t addr)
2179 {
2180         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2181         bool ret = false;
2182
2183         if (!con || !con->eh_data)
2184                 return ret;
2185
2186         mutex_lock(&con->recovery_lock);
2187         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2188         mutex_unlock(&con->recovery_lock);
2189         return ret;
2190 }
2191
2192 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2193                                           uint32_t max_count)
2194 {
2195         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2196
2197         /*
2198          * Justification of value bad_page_cnt_threshold in ras structure
2199          *
2200          * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2201          * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2202          * scenarios accordingly.
2203          *
2204          * Bad page retirement enablement:
2205          *    - If amdgpu_bad_page_threshold = -2,
2206          *      bad_page_cnt_threshold = typical value by formula.
2207          *
2208          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2209          *      max record length in eeprom, use it directly.
2210          *
2211          * Bad page retirement disablement:
2212          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2213          *      functionality is disabled, and bad_page_cnt_threshold will
2214          *      take no effect.
2215          */
2216
2217         if (amdgpu_bad_page_threshold < 0) {
2218                 u64 val = adev->gmc.mc_vram_size;
2219
2220                 do_div(val, RAS_BAD_PAGE_COVER);
2221                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2222                                                   max_count);
2223         } else {
2224                 con->bad_page_cnt_threshold = min_t(int, max_count,
2225                                                     amdgpu_bad_page_threshold);
2226         }
2227 }
2228
2229 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2230 {
2231         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2232         struct ras_err_handler_data **data;
2233         u32  max_eeprom_records_count = 0;
2234         bool exc_err_limit = false;
2235         int ret;
2236
2237         if (!con || amdgpu_sriov_vf(adev))
2238                 return 0;
2239
2240         /* Allow access to RAS EEPROM via debugfs, when the ASIC
2241          * supports RAS and debugfs is enabled, but when
2242          * adev->ras_enabled is unset, i.e. when "ras_enable"
2243          * module parameter is set to 0.
2244          */
2245         con->adev = adev;
2246
2247         if (!adev->ras_enabled)
2248                 return 0;
2249
2250         data = &con->eh_data;
2251         *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2252         if (!*data) {
2253                 ret = -ENOMEM;
2254                 goto out;
2255         }
2256
2257         mutex_init(&con->recovery_lock);
2258         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2259         atomic_set(&con->in_recovery, 0);
2260         con->eeprom_control.bad_channel_bitmap = 0;
2261
2262         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2263         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2264
2265         /* Todo: During test the SMU might fail to read the eeprom through I2C
2266          * when the GPU is pending on XGMI reset during probe time
2267          * (Mostly after second bus reset), skip it now
2268          */
2269         if (adev->gmc.xgmi.pending_reset)
2270                 return 0;
2271         ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2272         /*
2273          * This calling fails when exc_err_limit is true or
2274          * ret != 0.
2275          */
2276         if (exc_err_limit || ret)
2277                 goto free;
2278
2279         if (con->eeprom_control.ras_num_recs) {
2280                 ret = amdgpu_ras_load_bad_pages(adev);
2281                 if (ret)
2282                         goto free;
2283
2284                 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2285
2286                 if (con->update_channel_flag == true) {
2287                         amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2288                         con->update_channel_flag = false;
2289                 }
2290         }
2291
2292 #ifdef CONFIG_X86_MCE_AMD
2293         if ((adev->asic_type == CHIP_ALDEBARAN) &&
2294             (adev->gmc.xgmi.connected_to_cpu))
2295                 amdgpu_register_bad_pages_mca_notifier(adev);
2296 #endif
2297         return 0;
2298
2299 free:
2300         kfree((*data)->bps);
2301         kfree(*data);
2302         con->eh_data = NULL;
2303 out:
2304         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2305
2306         /*
2307          * Except error threshold exceeding case, other failure cases in this
2308          * function would not fail amdgpu driver init.
2309          */
2310         if (!exc_err_limit)
2311                 ret = 0;
2312         else
2313                 ret = -EINVAL;
2314
2315         return ret;
2316 }
2317
2318 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2319 {
2320         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2321         struct ras_err_handler_data *data = con->eh_data;
2322
2323         /* recovery_init failed to init it, fini is useless */
2324         if (!data)
2325                 return 0;
2326
2327         cancel_work_sync(&con->recovery_work);
2328
2329         mutex_lock(&con->recovery_lock);
2330         con->eh_data = NULL;
2331         kfree(data->bps);
2332         kfree(data);
2333         mutex_unlock(&con->recovery_lock);
2334
2335         return 0;
2336 }
2337 /* recovery end */
2338
2339 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2340 {
2341         if (amdgpu_sriov_vf(adev)) {
2342                 switch (adev->ip_versions[MP0_HWIP][0]) {
2343                 case IP_VERSION(13, 0, 2):
2344                         return true;
2345                 default:
2346                         return false;
2347                 }
2348         }
2349
2350         if (adev->asic_type == CHIP_IP_DISCOVERY) {
2351                 switch (adev->ip_versions[MP0_HWIP][0]) {
2352                 case IP_VERSION(13, 0, 0):
2353                 case IP_VERSION(13, 0, 10):
2354                         return true;
2355                 default:
2356                         return false;
2357                 }
2358         }
2359
2360         return adev->asic_type == CHIP_VEGA10 ||
2361                 adev->asic_type == CHIP_VEGA20 ||
2362                 adev->asic_type == CHIP_ARCTURUS ||
2363                 adev->asic_type == CHIP_ALDEBARAN ||
2364                 adev->asic_type == CHIP_SIENNA_CICHLID;
2365 }
2366
2367 /*
2368  * this is workaround for vega20 workstation sku,
2369  * force enable gfx ras, ignore vbios gfx ras flag
2370  * due to GC EDC can not write
2371  */
2372 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2373 {
2374         struct atom_context *ctx = adev->mode_info.atom_context;
2375
2376         if (!ctx)
2377                 return;
2378
2379         if (strnstr(ctx->vbios_version, "D16406",
2380                     sizeof(ctx->vbios_version)) ||
2381                 strnstr(ctx->vbios_version, "D36002",
2382                         sizeof(ctx->vbios_version)))
2383                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2384 }
2385
2386 /*
2387  * check hardware's ras ability which will be saved in hw_supported.
2388  * if hardware does not support ras, we can skip some ras initializtion and
2389  * forbid some ras operations from IP.
2390  * if software itself, say boot parameter, limit the ras ability. We still
2391  * need allow IP do some limited operations, like disable. In such case,
2392  * we have to initialize ras as normal. but need check if operation is
2393  * allowed or not in each function.
2394  */
2395 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2396 {
2397         adev->ras_hw_enabled = adev->ras_enabled = 0;
2398
2399         if (!adev->is_atom_fw ||
2400             !amdgpu_ras_asic_supported(adev))
2401                 return;
2402
2403         if (!adev->gmc.xgmi.connected_to_cpu) {
2404                 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2405                         dev_info(adev->dev, "MEM ECC is active.\n");
2406                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2407                                                    1 << AMDGPU_RAS_BLOCK__DF);
2408                 } else {
2409                         dev_info(adev->dev, "MEM ECC is not presented.\n");
2410                 }
2411
2412                 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2413                         dev_info(adev->dev, "SRAM ECC is active.\n");
2414                         if (!amdgpu_sriov_vf(adev))
2415                                 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2416                                                             1 << AMDGPU_RAS_BLOCK__DF);
2417                         else
2418                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2419                                                                 1 << AMDGPU_RAS_BLOCK__SDMA |
2420                                                                 1 << AMDGPU_RAS_BLOCK__GFX);
2421
2422                         /* VCN/JPEG RAS can be supported on both bare metal and
2423                          * SRIOV environment
2424                          */
2425                         if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2426                             adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2427                                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2428                                                         1 << AMDGPU_RAS_BLOCK__JPEG);
2429                         else
2430                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2431                                                         1 << AMDGPU_RAS_BLOCK__JPEG);
2432
2433                         /*
2434                          * XGMI RAS is not supported if xgmi num physical nodes
2435                          * is zero
2436                          */
2437                         if (!adev->gmc.xgmi.num_physical_nodes)
2438                                 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2439                 } else {
2440                         dev_info(adev->dev, "SRAM ECC is not presented.\n");
2441                 }
2442         } else {
2443                 /* driver only manages a few IP blocks RAS feature
2444                  * when GPU is connected cpu through XGMI */
2445                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2446                                            1 << AMDGPU_RAS_BLOCK__SDMA |
2447                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
2448         }
2449
2450         amdgpu_ras_get_quirks(adev);
2451
2452         /* hw_supported needs to be aligned with RAS block mask. */
2453         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2454
2455         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2456                 adev->ras_hw_enabled & amdgpu_ras_mask;
2457 }
2458
2459 static void amdgpu_ras_counte_dw(struct work_struct *work)
2460 {
2461         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2462                                               ras_counte_delay_work.work);
2463         struct amdgpu_device *adev = con->adev;
2464         struct drm_device *dev = adev_to_drm(adev);
2465         unsigned long ce_count, ue_count;
2466         int res;
2467
2468         res = pm_runtime_get_sync(dev->dev);
2469         if (res < 0)
2470                 goto Out;
2471
2472         /* Cache new values.
2473          */
2474         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2475                 atomic_set(&con->ras_ce_count, ce_count);
2476                 atomic_set(&con->ras_ue_count, ue_count);
2477         }
2478
2479         pm_runtime_mark_last_busy(dev->dev);
2480 Out:
2481         pm_runtime_put_autosuspend(dev->dev);
2482 }
2483
2484 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2485 {
2486         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2487         bool df_poison, umc_poison;
2488
2489         /* poison setting is useless on SRIOV guest */
2490         if (amdgpu_sriov_vf(adev) || !con)
2491                 return;
2492
2493         /* Init poison supported flag, the default value is false */
2494         if (adev->gmc.xgmi.connected_to_cpu) {
2495                 /* enabled by default when GPU is connected to CPU */
2496                 con->poison_supported = true;
2497         } else if (adev->df.funcs &&
2498             adev->df.funcs->query_ras_poison_mode &&
2499             adev->umc.ras &&
2500             adev->umc.ras->query_ras_poison_mode) {
2501                 df_poison =
2502                         adev->df.funcs->query_ras_poison_mode(adev);
2503                 umc_poison =
2504                         adev->umc.ras->query_ras_poison_mode(adev);
2505
2506                 /* Only poison is set in both DF and UMC, we can support it */
2507                 if (df_poison && umc_poison)
2508                         con->poison_supported = true;
2509                 else if (df_poison != umc_poison)
2510                         dev_warn(adev->dev,
2511                                 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2512                                 df_poison, umc_poison);
2513         }
2514 }
2515
2516 int amdgpu_ras_init(struct amdgpu_device *adev)
2517 {
2518         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2519         int r;
2520
2521         if (con)
2522                 return 0;
2523
2524         con = kmalloc(sizeof(struct amdgpu_ras) +
2525                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2526                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2527                         GFP_KERNEL|__GFP_ZERO);
2528         if (!con)
2529                 return -ENOMEM;
2530
2531         con->adev = adev;
2532         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2533         atomic_set(&con->ras_ce_count, 0);
2534         atomic_set(&con->ras_ue_count, 0);
2535
2536         con->objs = (struct ras_manager *)(con + 1);
2537
2538         amdgpu_ras_set_context(adev, con);
2539
2540         amdgpu_ras_check_supported(adev);
2541
2542         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2543                 /* set gfx block ras context feature for VEGA20 Gaming
2544                  * send ras disable cmd to ras ta during ras late init.
2545                  */
2546                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2547                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2548
2549                         return 0;
2550                 }
2551
2552                 r = 0;
2553                 goto release_con;
2554         }
2555
2556         con->update_channel_flag = false;
2557         con->features = 0;
2558         INIT_LIST_HEAD(&con->head);
2559         /* Might need get this flag from vbios. */
2560         con->flags = RAS_DEFAULT_FLAGS;
2561
2562         /* initialize nbio ras function ahead of any other
2563          * ras functions so hardware fatal error interrupt
2564          * can be enabled as early as possible */
2565         switch (adev->ip_versions[NBIO_HWIP][0]) {
2566         case IP_VERSION(7, 4, 0):
2567         case IP_VERSION(7, 4, 1):
2568         case IP_VERSION(7, 4, 4):
2569                 if (!adev->gmc.xgmi.connected_to_cpu)
2570                         adev->nbio.ras = &nbio_v7_4_ras;
2571                 break;
2572         case IP_VERSION(4, 3, 0):
2573                 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2574                         /* unlike other generation of nbio ras,
2575                          * nbio v4_3 only support fatal error interrupt
2576                          * to inform software that DF is freezed due to
2577                          * system fatal error event. driver should not
2578                          * enable nbio ras in such case. Instead,
2579                          * check DF RAS */
2580                         adev->nbio.ras = &nbio_v4_3_ras;
2581                 break;
2582         default:
2583                 /* nbio ras is not available */
2584                 break;
2585         }
2586
2587         /* nbio ras block needs to be enabled ahead of other ras blocks
2588          * to handle fatal error */
2589         r = amdgpu_nbio_ras_sw_init(adev);
2590         if (r)
2591                 return r;
2592
2593         if (adev->nbio.ras &&
2594             adev->nbio.ras->init_ras_controller_interrupt) {
2595                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2596                 if (r)
2597                         goto release_con;
2598         }
2599
2600         if (adev->nbio.ras &&
2601             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2602                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2603                 if (r)
2604                         goto release_con;
2605         }
2606
2607         amdgpu_ras_query_poison_mode(adev);
2608
2609         if (amdgpu_ras_fs_init(adev)) {
2610                 r = -EINVAL;
2611                 goto release_con;
2612         }
2613
2614         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2615                  "hardware ability[%x] ras_mask[%x]\n",
2616                  adev->ras_hw_enabled, adev->ras_enabled);
2617
2618         return 0;
2619 release_con:
2620         amdgpu_ras_set_context(adev, NULL);
2621         kfree(con);
2622
2623         return r;
2624 }
2625
2626 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2627 {
2628         if (adev->gmc.xgmi.connected_to_cpu)
2629                 return 1;
2630         return 0;
2631 }
2632
2633 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2634                                         struct ras_common_if *ras_block)
2635 {
2636         struct ras_query_if info = {
2637                 .head = *ras_block,
2638         };
2639
2640         if (!amdgpu_persistent_edc_harvesting_supported(adev))
2641                 return 0;
2642
2643         if (amdgpu_ras_query_error_status(adev, &info) != 0)
2644                 DRM_WARN("RAS init harvest failure");
2645
2646         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2647                 DRM_WARN("RAS init harvest reset failure");
2648
2649         return 0;
2650 }
2651
2652 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2653 {
2654        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2655
2656        if (!con)
2657                return false;
2658
2659        return con->poison_supported;
2660 }
2661
2662 /* helper function to handle common stuff in ip late init phase */
2663 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2664                          struct ras_common_if *ras_block)
2665 {
2666         struct amdgpu_ras_block_object *ras_obj = NULL;
2667         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2668         struct ras_query_if *query_info;
2669         unsigned long ue_count, ce_count;
2670         int r;
2671
2672         /* disable RAS feature per IP block if it is not supported */
2673         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2674                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2675                 return 0;
2676         }
2677
2678         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2679         if (r) {
2680                 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2681                         /* in resume phase, if fail to enable ras,
2682                          * clean up all ras fs nodes, and disable ras */
2683                         goto cleanup;
2684                 } else
2685                         return r;
2686         }
2687
2688         /* check for errors on warm reset edc persisant supported ASIC */
2689         amdgpu_persistent_edc_harvesting(adev, ras_block);
2690
2691         /* in resume phase, no need to create ras fs node */
2692         if (adev->in_suspend || amdgpu_in_reset(adev))
2693                 return 0;
2694
2695         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2696         if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2697             (ras_obj->hw_ops->query_poison_status ||
2698             ras_obj->hw_ops->handle_poison_consumption))) {
2699                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2700                 if (r)
2701                         goto cleanup;
2702         }
2703
2704         r = amdgpu_ras_sysfs_create(adev, ras_block);
2705         if (r)
2706                 goto interrupt;
2707
2708         /* Those are the cached values at init.
2709          */
2710         query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
2711         if (!query_info)
2712                 return -ENOMEM;
2713         memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
2714
2715         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
2716                 atomic_set(&con->ras_ce_count, ce_count);
2717                 atomic_set(&con->ras_ue_count, ue_count);
2718         }
2719
2720         kfree(query_info);
2721         return 0;
2722
2723 interrupt:
2724         if (ras_obj->ras_cb)
2725                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2726 cleanup:
2727         amdgpu_ras_feature_enable(adev, ras_block, 0);
2728         return r;
2729 }
2730
2731 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2732                          struct ras_common_if *ras_block)
2733 {
2734         return amdgpu_ras_block_late_init(adev, ras_block);
2735 }
2736
2737 /* helper function to remove ras fs node and interrupt handler */
2738 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2739                           struct ras_common_if *ras_block)
2740 {
2741         struct amdgpu_ras_block_object *ras_obj;
2742         if (!ras_block)
2743                 return;
2744
2745         amdgpu_ras_sysfs_remove(adev, ras_block);
2746
2747         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2748         if (ras_obj->ras_cb)
2749                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2750 }
2751
2752 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2753                           struct ras_common_if *ras_block)
2754 {
2755         return amdgpu_ras_block_late_fini(adev, ras_block);
2756 }
2757
2758 /* do some init work after IP late init as dependence.
2759  * and it runs in resume/gpu reset/booting up cases.
2760  */
2761 void amdgpu_ras_resume(struct amdgpu_device *adev)
2762 {
2763         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2764         struct ras_manager *obj, *tmp;
2765
2766         if (!adev->ras_enabled || !con) {
2767                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2768                 amdgpu_release_ras_context(adev);
2769
2770                 return;
2771         }
2772
2773         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2774                 /* Set up all other IPs which are not implemented. There is a
2775                  * tricky thing that IP's actual ras error type should be
2776                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2777                  * ERROR_NONE make sense anyway.
2778                  */
2779                 amdgpu_ras_enable_all_features(adev, 1);
2780
2781                 /* We enable ras on all hw_supported block, but as boot
2782                  * parameter might disable some of them and one or more IP has
2783                  * not implemented yet. So we disable them on behalf.
2784                  */
2785                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2786                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2787                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2788                                 /* there should be no any reference. */
2789                                 WARN_ON(alive_obj(obj));
2790                         }
2791                 }
2792         }
2793 }
2794
2795 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2796 {
2797         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2798
2799         if (!adev->ras_enabled || !con)
2800                 return;
2801
2802         amdgpu_ras_disable_all_features(adev, 0);
2803         /* Make sure all ras objects are disabled. */
2804         if (con->features)
2805                 amdgpu_ras_disable_all_features(adev, 1);
2806 }
2807
2808 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2809 {
2810         struct amdgpu_ras_block_list *node, *tmp;
2811         struct amdgpu_ras_block_object *obj;
2812         int r;
2813
2814         /* Guest side doesn't need init ras feature */
2815         if (amdgpu_sriov_vf(adev))
2816                 return 0;
2817
2818         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2819                 if (!node->ras_obj) {
2820                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2821                         continue;
2822                 }
2823
2824                 obj = node->ras_obj;
2825                 if (obj->ras_late_init) {
2826                         r = obj->ras_late_init(adev, &obj->ras_comm);
2827                         if (r) {
2828                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2829                                         obj->ras_comm.name, r);
2830                                 return r;
2831                         }
2832                 } else
2833                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2834         }
2835
2836         return 0;
2837 }
2838
2839 /* do some fini work before IP fini as dependence */
2840 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2841 {
2842         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2843
2844         if (!adev->ras_enabled || !con)
2845                 return 0;
2846
2847
2848         /* Need disable ras on all IPs here before ip [hw/sw]fini */
2849         if (con->features)
2850                 amdgpu_ras_disable_all_features(adev, 0);
2851         amdgpu_ras_recovery_fini(adev);
2852         return 0;
2853 }
2854
2855 int amdgpu_ras_fini(struct amdgpu_device *adev)
2856 {
2857         struct amdgpu_ras_block_list *ras_node, *tmp;
2858         struct amdgpu_ras_block_object *obj = NULL;
2859         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2860
2861         if (!adev->ras_enabled || !con)
2862                 return 0;
2863
2864         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2865                 if (ras_node->ras_obj) {
2866                         obj = ras_node->ras_obj;
2867                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2868                             obj->ras_fini)
2869                                 obj->ras_fini(adev, &obj->ras_comm);
2870                         else
2871                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2872                 }
2873
2874                 /* Clear ras blocks from ras_list and free ras block list node */
2875                 list_del(&ras_node->node);
2876                 kfree(ras_node);
2877         }
2878
2879         amdgpu_ras_fs_fini(adev);
2880         amdgpu_ras_interrupt_remove_all(adev);
2881
2882         WARN(con->features, "Feature mask is not cleared");
2883
2884         if (con->features)
2885                 amdgpu_ras_disable_all_features(adev, 1);
2886
2887         cancel_delayed_work_sync(&con->ras_counte_delay_work);
2888
2889         amdgpu_ras_set_context(adev, NULL);
2890         kfree(con);
2891
2892         return 0;
2893 }
2894
2895 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2896 {
2897         amdgpu_ras_check_supported(adev);
2898         if (!adev->ras_hw_enabled)
2899                 return;
2900
2901         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2902                 dev_info(adev->dev, "uncorrectable hardware error"
2903                         "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2904
2905                 amdgpu_ras_reset_gpu(adev);
2906         }
2907 }
2908
2909 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2910 {
2911         if (adev->asic_type == CHIP_VEGA20 &&
2912             adev->pm.fw_version <= 0x283400) {
2913                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2914                                 amdgpu_ras_intr_triggered();
2915         }
2916
2917         return false;
2918 }
2919
2920 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2921 {
2922         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2923
2924         if (!con)
2925                 return;
2926
2927         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2928                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2929                 amdgpu_ras_set_context(adev, NULL);
2930                 kfree(con);
2931         }
2932 }
2933
2934 #ifdef CONFIG_X86_MCE_AMD
2935 static struct amdgpu_device *find_adev(uint32_t node_id)
2936 {
2937         int i;
2938         struct amdgpu_device *adev = NULL;
2939
2940         for (i = 0; i < mce_adev_list.num_gpu; i++) {
2941                 adev = mce_adev_list.devs[i];
2942
2943                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
2944                     adev->gmc.xgmi.physical_node_id == node_id)
2945                         break;
2946                 adev = NULL;
2947         }
2948
2949         return adev;
2950 }
2951
2952 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
2953 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
2954 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2955 #define GPU_ID_OFFSET           8
2956
2957 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2958                                     unsigned long val, void *data)
2959 {
2960         struct mce *m = (struct mce *)data;
2961         struct amdgpu_device *adev = NULL;
2962         uint32_t gpu_id = 0;
2963         uint32_t umc_inst = 0, ch_inst = 0;
2964
2965         /*
2966          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2967          * and error occurred in DramECC (Extended error code = 0) then only
2968          * process the error, else bail out.
2969          */
2970         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
2971                     (XEC(m->status, 0x3f) == 0x0)))
2972                 return NOTIFY_DONE;
2973
2974         /*
2975          * If it is correctable error, return.
2976          */
2977         if (mce_is_correctable(m))
2978                 return NOTIFY_OK;
2979
2980         /*
2981          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2982          */
2983         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2984
2985         adev = find_adev(gpu_id);
2986         if (!adev) {
2987                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2988                                                                 gpu_id);
2989                 return NOTIFY_DONE;
2990         }
2991
2992         /*
2993          * If it is uncorrectable error, then find out UMC instance and
2994          * channel index.
2995          */
2996         umc_inst = GET_UMC_INST(m->ipid);
2997         ch_inst = GET_CHAN_INDEX(m->ipid);
2998
2999         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3000                              umc_inst, ch_inst);
3001
3002         if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3003                 return NOTIFY_OK;
3004         else
3005                 return NOTIFY_DONE;
3006 }
3007
3008 static struct notifier_block amdgpu_bad_page_nb = {
3009         .notifier_call  = amdgpu_bad_page_notifier,
3010         .priority       = MCE_PRIO_UC,
3011 };
3012
3013 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3014 {
3015         /*
3016          * Add the adev to the mce_adev_list.
3017          * During mode2 reset, amdgpu device is temporarily
3018          * removed from the mgpu_info list which can cause
3019          * page retirement to fail.
3020          * Use this list instead of mgpu_info to find the amdgpu
3021          * device on which the UMC error was reported.
3022          */
3023         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3024
3025         /*
3026          * Register the x86 notifier only once
3027          * with MCE subsystem.
3028          */
3029         if (notifier_registered == false) {
3030                 mce_register_decode_chain(&amdgpu_bad_page_nb);
3031                 notifier_registered = true;
3032         }
3033 }
3034 #endif
3035
3036 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3037 {
3038         if (!adev)
3039                 return NULL;
3040
3041         return adev->psp.ras_context.ras;
3042 }
3043
3044 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3045 {
3046         if (!adev)
3047                 return -EINVAL;
3048
3049         adev->psp.ras_context.ras = ras_con;
3050         return 0;
3051 }
3052
3053 /* check if ras is supported on block, say, sdma, gfx */
3054 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3055                 unsigned int block)
3056 {
3057         int ret = 0;
3058         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3059
3060         if (block >= AMDGPU_RAS_BLOCK_COUNT)
3061                 return 0;
3062
3063         ret = ras && (adev->ras_enabled & (1 << block));
3064
3065         /* For the special asic with mem ecc enabled but sram ecc
3066          * not enabled, even if the ras block is not supported on
3067          * .ras_enabled, if the asic supports poison mode and the
3068          * ras block has ras configuration, it can be considered
3069          * that the ras block supports ras function.
3070          */
3071         if (!ret &&
3072             amdgpu_ras_is_poison_mode_supported(adev) &&
3073             amdgpu_ras_get_ras_block(adev, block, 0))
3074                 ret = 1;
3075
3076         return ret;
3077 }
3078
3079 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3080 {
3081         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3082
3083         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3084                 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3085         return 0;
3086 }
3087
3088
3089 /* Register each ip ras block into amdgpu ras */
3090 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3091                 struct amdgpu_ras_block_object *ras_block_obj)
3092 {
3093         struct amdgpu_ras_block_list *ras_node;
3094         if (!adev || !ras_block_obj)
3095                 return -EINVAL;
3096
3097         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3098         if (!ras_node)
3099                 return -ENOMEM;
3100
3101         INIT_LIST_HEAD(&ras_node->node);
3102         ras_node->ras_obj = ras_block_obj;
3103         list_add_tail(&ras_node->node, &adev->ras_list);
3104
3105         return 0;
3106 }
This page took 0.223934 seconds and 4 git commands to generate.