]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
43
44 #ifdef CONFIG_X86_MCE_AMD
45 #include <asm/mce.h>
46
47 static bool notifier_registered;
48 #endif
49 static const char *RAS_FS_NAME = "ras";
50
51 const char *ras_error_string[] = {
52         "none",
53         "parity",
54         "single_correctable",
55         "multi_uncorrectable",
56         "poison",
57 };
58
59 const char *ras_block_string[] = {
60         "umc",
61         "sdma",
62         "gfx",
63         "mmhub",
64         "athub",
65         "pcie_bif",
66         "hdp",
67         "xgmi_wafl",
68         "df",
69         "smn",
70         "sem",
71         "mp0",
72         "mp1",
73         "fuse",
74         "mca",
75         "vcn",
76         "jpeg",
77         "ih",
78         "mpio",
79 };
80
81 const char *ras_mca_block_string[] = {
82         "mca_mp0",
83         "mca_mp1",
84         "mca_mpio",
85         "mca_iohc",
86 };
87
88 struct amdgpu_ras_block_list {
89         /* ras block link */
90         struct list_head node;
91
92         struct amdgpu_ras_block_object *ras_obj;
93 };
94
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
96 {
97         if (!ras_block)
98                 return "NULL";
99
100         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101             ras_block->block >= ARRAY_SIZE(ras_block_string))
102                 return "OUT OF RANGE";
103
104         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105                 return ras_mca_block_string[ras_block->sub_block_index];
106
107         return ras_block_string[ras_block->block];
108 }
109
110 #define ras_block_str(_BLOCK_) \
111         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
114
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116
117 /* inject address is 52 bits */
118 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
119
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
122
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
124
125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
126
127 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
128
129 enum amdgpu_ras_retire_page_reservation {
130         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
131         AMDGPU_RAS_RETIRE_PAGE_PENDING,
132         AMDGPU_RAS_RETIRE_PAGE_FAULT,
133 };
134
135 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
136
137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
138                                 uint64_t addr);
139 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
140                                 uint64_t addr);
141 #ifdef CONFIG_X86_MCE_AMD
142 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
143 struct mce_notifier_adev_list {
144         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
145         int num_gpu;
146 };
147 static struct mce_notifier_adev_list mce_adev_list;
148 #endif
149
150 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
151 {
152         if (adev && amdgpu_ras_get_context(adev))
153                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
154 }
155
156 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
157 {
158         if (adev && amdgpu_ras_get_context(adev))
159                 return amdgpu_ras_get_context(adev)->error_query_ready;
160
161         return false;
162 }
163
164 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
165 {
166         struct ras_err_data err_data;
167         struct eeprom_table_record err_rec;
168         int ret;
169
170         if ((address >= adev->gmc.mc_vram_size) ||
171             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
172                 dev_warn(adev->dev,
173                          "RAS WARN: input address 0x%llx is invalid.\n",
174                          address);
175                 return -EINVAL;
176         }
177
178         if (amdgpu_ras_check_bad_page(adev, address)) {
179                 dev_warn(adev->dev,
180                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
181                          address);
182                 return 0;
183         }
184
185         ret = amdgpu_ras_error_data_init(&err_data);
186         if (ret)
187                 return ret;
188
189         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
190         err_data.err_addr = &err_rec;
191         amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
192
193         if (amdgpu_bad_page_threshold != 0) {
194                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
195                                          err_data.err_addr_cnt);
196                 amdgpu_ras_save_bad_pages(adev, NULL);
197         }
198
199         amdgpu_ras_error_data_fini(&err_data);
200
201         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
202         dev_warn(adev->dev, "Clear EEPROM:\n");
203         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
204
205         return 0;
206 }
207
208 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
209                                         size_t size, loff_t *pos)
210 {
211         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
212         struct ras_query_if info = {
213                 .head = obj->head,
214         };
215         ssize_t s;
216         char val[128];
217
218         if (amdgpu_ras_query_error_status(obj->adev, &info))
219                 return -EINVAL;
220
221         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
222         if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
223             amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
224                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
225                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
226         }
227
228         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
229                         "ue", info.ue_count,
230                         "ce", info.ce_count);
231         if (*pos >= s)
232                 return 0;
233
234         s -= *pos;
235         s = min_t(u64, s, size);
236
237
238         if (copy_to_user(buf, &val[*pos], s))
239                 return -EINVAL;
240
241         *pos += s;
242
243         return s;
244 }
245
246 static const struct file_operations amdgpu_ras_debugfs_ops = {
247         .owner = THIS_MODULE,
248         .read = amdgpu_ras_debugfs_read,
249         .write = NULL,
250         .llseek = default_llseek
251 };
252
253 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
254 {
255         int i;
256
257         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
258                 *block_id = i;
259                 if (strcmp(name, ras_block_string[i]) == 0)
260                         return 0;
261         }
262         return -EINVAL;
263 }
264
265 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
266                 const char __user *buf, size_t size,
267                 loff_t *pos, struct ras_debug_if *data)
268 {
269         ssize_t s = min_t(u64, 64, size);
270         char str[65];
271         char block_name[33];
272         char err[9] = "ue";
273         int op = -1;
274         int block_id;
275         uint32_t sub_block;
276         u64 address, value;
277         /* default value is 0 if the mask is not set by user */
278         u32 instance_mask = 0;
279
280         if (*pos)
281                 return -EINVAL;
282         *pos = size;
283
284         memset(str, 0, sizeof(str));
285         memset(data, 0, sizeof(*data));
286
287         if (copy_from_user(str, buf, s))
288                 return -EINVAL;
289
290         if (sscanf(str, "disable %32s", block_name) == 1)
291                 op = 0;
292         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
293                 op = 1;
294         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
295                 op = 2;
296         else if (strstr(str, "retire_page") != NULL)
297                 op = 3;
298         else if (str[0] && str[1] && str[2] && str[3])
299                 /* ascii string, but commands are not matched. */
300                 return -EINVAL;
301
302         if (op != -1) {
303                 if (op == 3) {
304                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
305                             sscanf(str, "%*s %llu", &address) != 1)
306                                 return -EINVAL;
307
308                         data->op = op;
309                         data->inject.address = address;
310
311                         return 0;
312                 }
313
314                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
315                         return -EINVAL;
316
317                 data->head.block = block_id;
318                 /* only ue, ce and poison errors are supported */
319                 if (!memcmp("ue", err, 2))
320                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
321                 else if (!memcmp("ce", err, 2))
322                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
323                 else if (!memcmp("poison", err, 6))
324                         data->head.type = AMDGPU_RAS_ERROR__POISON;
325                 else
326                         return -EINVAL;
327
328                 data->op = op;
329
330                 if (op == 2) {
331                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
332                                    &sub_block, &address, &value, &instance_mask) != 4 &&
333                             sscanf(str, "%*s %*s %*s %u %llu %llu %u",
334                                    &sub_block, &address, &value, &instance_mask) != 4 &&
335                                 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
336                                    &sub_block, &address, &value) != 3 &&
337                             sscanf(str, "%*s %*s %*s %u %llu %llu",
338                                    &sub_block, &address, &value) != 3)
339                                 return -EINVAL;
340                         data->head.sub_block_index = sub_block;
341                         data->inject.address = address;
342                         data->inject.value = value;
343                         data->inject.instance_mask = instance_mask;
344                 }
345         } else {
346                 if (size < sizeof(*data))
347                         return -EINVAL;
348
349                 if (copy_from_user(data, buf, sizeof(*data)))
350                         return -EINVAL;
351         }
352
353         return 0;
354 }
355
356 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
357                                 struct ras_debug_if *data)
358 {
359         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
360         uint32_t mask, inst_mask = data->inject.instance_mask;
361
362         /* no need to set instance mask if there is only one instance */
363         if (num_xcc <= 1 && inst_mask) {
364                 data->inject.instance_mask = 0;
365                 dev_dbg(adev->dev,
366                         "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
367                         inst_mask);
368
369                 return;
370         }
371
372         switch (data->head.block) {
373         case AMDGPU_RAS_BLOCK__GFX:
374                 mask = GENMASK(num_xcc - 1, 0);
375                 break;
376         case AMDGPU_RAS_BLOCK__SDMA:
377                 mask = GENMASK(adev->sdma.num_instances - 1, 0);
378                 break;
379         case AMDGPU_RAS_BLOCK__VCN:
380         case AMDGPU_RAS_BLOCK__JPEG:
381                 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
382                 break;
383         default:
384                 mask = inst_mask;
385                 break;
386         }
387
388         /* remove invalid bits in instance mask */
389         data->inject.instance_mask &= mask;
390         if (inst_mask != data->inject.instance_mask)
391                 dev_dbg(adev->dev,
392                         "Adjust RAS inject mask 0x%x to 0x%x\n",
393                         inst_mask, data->inject.instance_mask);
394 }
395
396 /**
397  * DOC: AMDGPU RAS debugfs control interface
398  *
399  * The control interface accepts struct ras_debug_if which has two members.
400  *
401  * First member: ras_debug_if::head or ras_debug_if::inject.
402  *
403  * head is used to indicate which IP block will be under control.
404  *
405  * head has four members, they are block, type, sub_block_index, name.
406  * block: which IP will be under control.
407  * type: what kind of error will be enabled/disabled/injected.
408  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
409  * name: the name of IP.
410  *
411  * inject has three more members than head, they are address, value and mask.
412  * As their names indicate, inject operation will write the
413  * value to the address.
414  *
415  * The second member: struct ras_debug_if::op.
416  * It has three kinds of operations.
417  *
418  * - 0: disable RAS on the block. Take ::head as its data.
419  * - 1: enable RAS on the block. Take ::head as its data.
420  * - 2: inject errors on the block. Take ::inject as its data.
421  *
422  * How to use the interface?
423  *
424  * In a program
425  *
426  * Copy the struct ras_debug_if in your code and initialize it.
427  * Write the struct to the control interface.
428  *
429  * From shell
430  *
431  * .. code-block:: bash
432  *
433  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
434  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
435  *      echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *
437  * Where N, is the card which you want to affect.
438  *
439  * "disable" requires only the block.
440  * "enable" requires the block and error type.
441  * "inject" requires the block, error type, address, and value.
442  *
443  * The block is one of: umc, sdma, gfx, etc.
444  *      see ras_block_string[] for details
445  *
446  * The error type is one of: ue, ce and poison where,
447  *      ue is multi-uncorrectable
448  *      ce is single-correctable
449  *      poison is poison
450  *
451  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
452  * The address and value are hexadecimal numbers, leading 0x is optional.
453  * The mask means instance mask, is optional, default value is 0x1.
454  *
455  * For instance,
456  *
457  * .. code-block:: bash
458  *
459  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
460  *      echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
461  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *
463  * How to check the result of the operation?
464  *
465  * To check disable/enable, see "ras" features at,
466  * /sys/class/drm/card[0/1/2...]/device/ras/features
467  *
468  * To check inject, see the corresponding error count at,
469  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
470  *
471  * .. note::
472  *      Operations are only allowed on blocks which are supported.
473  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
474  *      to see which blocks support RAS on a particular asic.
475  *
476  */
477 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
478                                              const char __user *buf,
479                                              size_t size, loff_t *pos)
480 {
481         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
482         struct ras_debug_if data;
483         int ret = 0;
484
485         if (!amdgpu_ras_get_error_query_ready(adev)) {
486                 dev_warn(adev->dev, "RAS WARN: error injection "
487                                 "currently inaccessible\n");
488                 return size;
489         }
490
491         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
492         if (ret)
493                 return ret;
494
495         if (data.op == 3) {
496                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
497                 if (!ret)
498                         return size;
499                 else
500                         return ret;
501         }
502
503         if (!amdgpu_ras_is_supported(adev, data.head.block))
504                 return -EINVAL;
505
506         switch (data.op) {
507         case 0:
508                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
509                 break;
510         case 1:
511                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
512                 break;
513         case 2:
514                 if ((data.inject.address >= adev->gmc.mc_vram_size &&
515                     adev->gmc.mc_vram_size) ||
516                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
517                         dev_warn(adev->dev, "RAS WARN: input address "
518                                         "0x%llx is invalid.",
519                                         data.inject.address);
520                         ret = -EINVAL;
521                         break;
522                 }
523
524                 /* umc ce/ue error injection for a bad page is not allowed */
525                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
526                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
527                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
528                                  "already been marked as bad!\n",
529                                  data.inject.address);
530                         break;
531                 }
532
533                 amdgpu_ras_instance_mask_check(adev, &data);
534
535                 /* data.inject.address is offset instead of absolute gpu address */
536                 ret = amdgpu_ras_error_inject(adev, &data.inject);
537                 break;
538         default:
539                 ret = -EINVAL;
540                 break;
541         }
542
543         if (ret)
544                 return ret;
545
546         return size;
547 }
548
549 /**
550  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
551  *
552  * Some boards contain an EEPROM which is used to persistently store a list of
553  * bad pages which experiences ECC errors in vram.  This interface provides
554  * a way to reset the EEPROM, e.g., after testing error injection.
555  *
556  * Usage:
557  *
558  * .. code-block:: bash
559  *
560  *      echo 1 > ../ras/ras_eeprom_reset
561  *
562  * will reset EEPROM table to 0 entries.
563  *
564  */
565 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
566                                                const char __user *buf,
567                                                size_t size, loff_t *pos)
568 {
569         struct amdgpu_device *adev =
570                 (struct amdgpu_device *)file_inode(f)->i_private;
571         int ret;
572
573         ret = amdgpu_ras_eeprom_reset_table(
574                 &(amdgpu_ras_get_context(adev)->eeprom_control));
575
576         if (!ret) {
577                 /* Something was written to EEPROM.
578                  */
579                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
580                 return size;
581         } else {
582                 return ret;
583         }
584 }
585
586 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
587         .owner = THIS_MODULE,
588         .read = NULL,
589         .write = amdgpu_ras_debugfs_ctrl_write,
590         .llseek = default_llseek
591 };
592
593 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
594         .owner = THIS_MODULE,
595         .read = NULL,
596         .write = amdgpu_ras_debugfs_eeprom_write,
597         .llseek = default_llseek
598 };
599
600 /**
601  * DOC: AMDGPU RAS sysfs Error Count Interface
602  *
603  * It allows the user to read the error count for each IP block on the gpu through
604  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
605  *
606  * It outputs the multiple lines which report the uncorrected (ue) and corrected
607  * (ce) error counts.
608  *
609  * The format of one line is below,
610  *
611  * [ce|ue]: count
612  *
613  * Example:
614  *
615  * .. code-block:: bash
616  *
617  *      ue: 0
618  *      ce: 1
619  *
620  */
621 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
622                 struct device_attribute *attr, char *buf)
623 {
624         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
625         struct ras_query_if info = {
626                 .head = obj->head,
627         };
628
629         if (!amdgpu_ras_get_error_query_ready(obj->adev))
630                 return sysfs_emit(buf, "Query currently inaccessible\n");
631
632         if (amdgpu_ras_query_error_status(obj->adev, &info))
633                 return -EINVAL;
634
635         if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
636             amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
637                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
638                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
639         }
640
641         if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
642                 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
643                                 "ce", info.ce_count, "de", info.de_count);
644         else
645                 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
646                                 "ce", info.ce_count);
647 }
648
649 /* obj begin */
650
651 #define get_obj(obj) do { (obj)->use++; } while (0)
652 #define alive_obj(obj) ((obj)->use)
653
654 static inline void put_obj(struct ras_manager *obj)
655 {
656         if (obj && (--obj->use == 0)) {
657                 list_del(&obj->node);
658                 amdgpu_ras_error_data_fini(&obj->err_data);
659         }
660
661         if (obj && (obj->use < 0))
662                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
663 }
664
665 /* make one obj and return it. */
666 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
667                 struct ras_common_if *head)
668 {
669         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
670         struct ras_manager *obj;
671
672         if (!adev->ras_enabled || !con)
673                 return NULL;
674
675         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
676                 return NULL;
677
678         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
679                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
680                         return NULL;
681
682                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
683         } else
684                 obj = &con->objs[head->block];
685
686         /* already exist. return obj? */
687         if (alive_obj(obj))
688                 return NULL;
689
690         if (amdgpu_ras_error_data_init(&obj->err_data))
691                 return NULL;
692
693         obj->head = *head;
694         obj->adev = adev;
695         list_add(&obj->node, &con->head);
696         get_obj(obj);
697
698         return obj;
699 }
700
701 /* return an obj equal to head, or the first when head is NULL */
702 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
703                 struct ras_common_if *head)
704 {
705         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
706         struct ras_manager *obj;
707         int i;
708
709         if (!adev->ras_enabled || !con)
710                 return NULL;
711
712         if (head) {
713                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
714                         return NULL;
715
716                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
717                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
718                                 return NULL;
719
720                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
721                 } else
722                         obj = &con->objs[head->block];
723
724                 if (alive_obj(obj))
725                         return obj;
726         } else {
727                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
728                         obj = &con->objs[i];
729                         if (alive_obj(obj))
730                                 return obj;
731                 }
732         }
733
734         return NULL;
735 }
736 /* obj end */
737
738 /* feature ctl begin */
739 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
740                                          struct ras_common_if *head)
741 {
742         return adev->ras_hw_enabled & BIT(head->block);
743 }
744
745 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
746                 struct ras_common_if *head)
747 {
748         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
749
750         return con->features & BIT(head->block);
751 }
752
753 /*
754  * if obj is not created, then create one.
755  * set feature enable flag.
756  */
757 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
758                 struct ras_common_if *head, int enable)
759 {
760         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
761         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
762
763         /* If hardware does not support ras, then do not create obj.
764          * But if hardware support ras, we can create the obj.
765          * Ras framework checks con->hw_supported to see if it need do
766          * corresponding initialization.
767          * IP checks con->support to see if it need disable ras.
768          */
769         if (!amdgpu_ras_is_feature_allowed(adev, head))
770                 return 0;
771
772         if (enable) {
773                 if (!obj) {
774                         obj = amdgpu_ras_create_obj(adev, head);
775                         if (!obj)
776                                 return -EINVAL;
777                 } else {
778                         /* In case we create obj somewhere else */
779                         get_obj(obj);
780                 }
781                 con->features |= BIT(head->block);
782         } else {
783                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
784                         con->features &= ~BIT(head->block);
785                         put_obj(obj);
786                 }
787         }
788
789         return 0;
790 }
791
792 /* wrapper of psp_ras_enable_features */
793 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
794                 struct ras_common_if *head, bool enable)
795 {
796         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
797         union ta_ras_cmd_input *info;
798         int ret;
799
800         if (!con)
801                 return -EINVAL;
802
803         /* For non-gfx ip, do not enable ras feature if it is not allowed */
804         /* For gfx ip, regardless of feature support status, */
805         /* Force issue enable or disable ras feature commands */
806         if (head->block != AMDGPU_RAS_BLOCK__GFX &&
807             !amdgpu_ras_is_feature_allowed(adev, head))
808                 return 0;
809
810         /* Only enable gfx ras feature from host side */
811         if (head->block == AMDGPU_RAS_BLOCK__GFX &&
812             !amdgpu_sriov_vf(adev) &&
813             !amdgpu_ras_intr_triggered()) {
814                 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
815                 if (!info)
816                         return -ENOMEM;
817
818                 if (!enable) {
819                         info->disable_features = (struct ta_ras_disable_features_input) {
820                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
821                                 .error_type = amdgpu_ras_error_to_ta(head->type),
822                         };
823                 } else {
824                         info->enable_features = (struct ta_ras_enable_features_input) {
825                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
826                                 .error_type = amdgpu_ras_error_to_ta(head->type),
827                         };
828                 }
829
830                 ret = psp_ras_enable_features(&adev->psp, info, enable);
831                 if (ret) {
832                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
833                                 enable ? "enable":"disable",
834                                 get_ras_block_str(head),
835                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
836                         kfree(info);
837                         return ret;
838                 }
839
840                 kfree(info);
841         }
842
843         /* setup the obj */
844         __amdgpu_ras_feature_enable(adev, head, enable);
845
846         return 0;
847 }
848
849 /* Only used in device probe stage and called only once. */
850 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
851                 struct ras_common_if *head, bool enable)
852 {
853         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
854         int ret;
855
856         if (!con)
857                 return -EINVAL;
858
859         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
860                 if (enable) {
861                         /* There is no harm to issue a ras TA cmd regardless of
862                          * the currecnt ras state.
863                          * If current state == target state, it will do nothing
864                          * But sometimes it requests driver to reset and repost
865                          * with error code -EAGAIN.
866                          */
867                         ret = amdgpu_ras_feature_enable(adev, head, 1);
868                         /* With old ras TA, we might fail to enable ras.
869                          * Log it and just setup the object.
870                          * TODO need remove this WA in the future.
871                          */
872                         if (ret == -EINVAL) {
873                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
874                                 if (!ret)
875                                         dev_info(adev->dev,
876                                                 "RAS INFO: %s setup object\n",
877                                                 get_ras_block_str(head));
878                         }
879                 } else {
880                         /* setup the object then issue a ras TA disable cmd.*/
881                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
882                         if (ret)
883                                 return ret;
884
885                         /* gfx block ras dsiable cmd must send to ras-ta */
886                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
887                                 con->features |= BIT(head->block);
888
889                         ret = amdgpu_ras_feature_enable(adev, head, 0);
890
891                         /* clean gfx block ras features flag */
892                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
893                                 con->features &= ~BIT(head->block);
894                 }
895         } else
896                 ret = amdgpu_ras_feature_enable(adev, head, enable);
897
898         return ret;
899 }
900
901 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
902                 bool bypass)
903 {
904         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
905         struct ras_manager *obj, *tmp;
906
907         list_for_each_entry_safe(obj, tmp, &con->head, node) {
908                 /* bypass psp.
909                  * aka just release the obj and corresponding flags
910                  */
911                 if (bypass) {
912                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
913                                 break;
914                 } else {
915                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
916                                 break;
917                 }
918         }
919
920         return con->features;
921 }
922
923 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
924                 bool bypass)
925 {
926         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
927         int i;
928         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
929
930         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
931                 struct ras_common_if head = {
932                         .block = i,
933                         .type = default_ras_type,
934                         .sub_block_index = 0,
935                 };
936
937                 if (i == AMDGPU_RAS_BLOCK__MCA)
938                         continue;
939
940                 if (bypass) {
941                         /*
942                          * bypass psp. vbios enable ras for us.
943                          * so just create the obj
944                          */
945                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
946                                 break;
947                 } else {
948                         if (amdgpu_ras_feature_enable(adev, &head, 1))
949                                 break;
950                 }
951         }
952
953         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
954                 struct ras_common_if head = {
955                         .block = AMDGPU_RAS_BLOCK__MCA,
956                         .type = default_ras_type,
957                         .sub_block_index = i,
958                 };
959
960                 if (bypass) {
961                         /*
962                          * bypass psp. vbios enable ras for us.
963                          * so just create the obj
964                          */
965                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
966                                 break;
967                 } else {
968                         if (amdgpu_ras_feature_enable(adev, &head, 1))
969                                 break;
970                 }
971         }
972
973         return con->features;
974 }
975 /* feature ctl end */
976
977 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
978                 enum amdgpu_ras_block block)
979 {
980         if (!block_obj)
981                 return -EINVAL;
982
983         if (block_obj->ras_comm.block == block)
984                 return 0;
985
986         return -EINVAL;
987 }
988
989 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
990                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
991 {
992         struct amdgpu_ras_block_list *node, *tmp;
993         struct amdgpu_ras_block_object *obj;
994
995         if (block >= AMDGPU_RAS_BLOCK__LAST)
996                 return NULL;
997
998         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
999                 if (!node->ras_obj) {
1000                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1001                         continue;
1002                 }
1003
1004                 obj = node->ras_obj;
1005                 if (obj->ras_block_match) {
1006                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1007                                 return obj;
1008                 } else {
1009                         if (amdgpu_ras_block_match_default(obj, block) == 0)
1010                                 return obj;
1011                 }
1012         }
1013
1014         return NULL;
1015 }
1016
1017 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1018 {
1019         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1020         int ret = 0;
1021
1022         /*
1023          * choosing right query method according to
1024          * whether smu support query error information
1025          */
1026         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1027         if (ret == -EOPNOTSUPP) {
1028                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1029                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1030                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1031
1032                 /* umc query_ras_error_address is also responsible for clearing
1033                  * error status
1034                  */
1035                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1036                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1037                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1038         } else if (!ret) {
1039                 if (adev->umc.ras &&
1040                         adev->umc.ras->ecc_info_query_ras_error_count)
1041                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1042
1043                 if (adev->umc.ras &&
1044                         adev->umc.ras->ecc_info_query_ras_error_address)
1045                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1046         }
1047 }
1048
1049 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1050                                               struct ras_manager *ras_mgr,
1051                                               struct ras_err_data *err_data,
1052                                               struct ras_query_context *qctx,
1053                                               const char *blk_name,
1054                                               bool is_ue,
1055                                               bool is_de)
1056 {
1057         struct amdgpu_smuio_mcm_config_info *mcm_info;
1058         struct ras_err_node *err_node;
1059         struct ras_err_info *err_info;
1060         u64 event_id = qctx->evid.event_id;
1061
1062         if (is_ue) {
1063                 for_each_ras_error(err_node, err_data) {
1064                         err_info = &err_node->err_info;
1065                         mcm_info = &err_info->mcm_info;
1066                         if (err_info->ue_count) {
1067                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1068                                               "%lld new uncorrectable hardware errors detected in %s block\n",
1069                                               mcm_info->socket_id,
1070                                               mcm_info->die_id,
1071                                               err_info->ue_count,
1072                                               blk_name);
1073                         }
1074                 }
1075
1076                 for_each_ras_error(err_node, &ras_mgr->err_data) {
1077                         err_info = &err_node->err_info;
1078                         mcm_info = &err_info->mcm_info;
1079                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1080                                       "%lld uncorrectable hardware errors detected in total in %s block\n",
1081                                       mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1082                 }
1083
1084         } else {
1085                 if (is_de) {
1086                         for_each_ras_error(err_node, err_data) {
1087                                 err_info = &err_node->err_info;
1088                                 mcm_info = &err_info->mcm_info;
1089                                 if (err_info->de_count) {
1090                                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1091                                                       "%lld new deferred hardware errors detected in %s block\n",
1092                                                       mcm_info->socket_id,
1093                                                       mcm_info->die_id,
1094                                                       err_info->de_count,
1095                                                       blk_name);
1096                                 }
1097                         }
1098
1099                         for_each_ras_error(err_node, &ras_mgr->err_data) {
1100                                 err_info = &err_node->err_info;
1101                                 mcm_info = &err_info->mcm_info;
1102                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1103                                               "%lld deferred hardware errors detected in total in %s block\n",
1104                                               mcm_info->socket_id, mcm_info->die_id,
1105                                               err_info->de_count, blk_name);
1106                         }
1107                 } else {
1108                         for_each_ras_error(err_node, err_data) {
1109                                 err_info = &err_node->err_info;
1110                                 mcm_info = &err_info->mcm_info;
1111                                 if (err_info->ce_count) {
1112                                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1113                                                       "%lld new correctable hardware errors detected in %s block\n",
1114                                                       mcm_info->socket_id,
1115                                                       mcm_info->die_id,
1116                                                       err_info->ce_count,
1117                                                       blk_name);
1118                                 }
1119                         }
1120
1121                         for_each_ras_error(err_node, &ras_mgr->err_data) {
1122                                 err_info = &err_node->err_info;
1123                                 mcm_info = &err_info->mcm_info;
1124                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1125                                               "%lld correctable hardware errors detected in total in %s block\n",
1126                                               mcm_info->socket_id, mcm_info->die_id,
1127                                               err_info->ce_count, blk_name);
1128                         }
1129                 }
1130         }
1131 }
1132
1133 static inline bool err_data_has_source_info(struct ras_err_data *data)
1134 {
1135         return !list_empty(&data->err_node_list);
1136 }
1137
1138 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1139                                              struct ras_query_if *query_if,
1140                                              struct ras_err_data *err_data,
1141                                              struct ras_query_context *qctx)
1142 {
1143         struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1144         const char *blk_name = get_ras_block_str(&query_if->head);
1145         u64 event_id = qctx->evid.event_id;
1146
1147         if (err_data->ce_count) {
1148                 if (err_data_has_source_info(err_data)) {
1149                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1150                                                           blk_name, false, false);
1151                 } else if (!adev->aid_mask &&
1152                            adev->smuio.funcs &&
1153                            adev->smuio.funcs->get_socket_id &&
1154                            adev->smuio.funcs->get_die_id) {
1155                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1156                                       "%ld correctable hardware errors "
1157                                       "detected in %s block\n",
1158                                       adev->smuio.funcs->get_socket_id(adev),
1159                                       adev->smuio.funcs->get_die_id(adev),
1160                                       ras_mgr->err_data.ce_count,
1161                                       blk_name);
1162                 } else {
1163                         RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1164                                       "detected in %s block\n",
1165                                       ras_mgr->err_data.ce_count,
1166                                       blk_name);
1167                 }
1168         }
1169
1170         if (err_data->ue_count) {
1171                 if (err_data_has_source_info(err_data)) {
1172                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1173                                                           blk_name, true, false);
1174                 } else if (!adev->aid_mask &&
1175                            adev->smuio.funcs &&
1176                            adev->smuio.funcs->get_socket_id &&
1177                            adev->smuio.funcs->get_die_id) {
1178                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1179                                       "%ld uncorrectable hardware errors "
1180                                       "detected in %s block\n",
1181                                       adev->smuio.funcs->get_socket_id(adev),
1182                                       adev->smuio.funcs->get_die_id(adev),
1183                                       ras_mgr->err_data.ue_count,
1184                                       blk_name);
1185                 } else {
1186                         RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1187                                       "detected in %s block\n",
1188                                       ras_mgr->err_data.ue_count,
1189                                       blk_name);
1190                 }
1191         }
1192
1193         if (err_data->de_count) {
1194                 if (err_data_has_source_info(err_data)) {
1195                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1196                                                           blk_name, false, true);
1197                 } else if (!adev->aid_mask &&
1198                            adev->smuio.funcs &&
1199                            adev->smuio.funcs->get_socket_id &&
1200                            adev->smuio.funcs->get_die_id) {
1201                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1202                                       "%ld deferred hardware errors "
1203                                       "detected in %s block\n",
1204                                       adev->smuio.funcs->get_socket_id(adev),
1205                                       adev->smuio.funcs->get_die_id(adev),
1206                                       ras_mgr->err_data.de_count,
1207                                       blk_name);
1208                 } else {
1209                         RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1210                                       "detected in %s block\n",
1211                                       ras_mgr->err_data.de_count,
1212                                       blk_name);
1213                 }
1214         }
1215 }
1216
1217 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1218 {
1219         struct ras_err_node *err_node;
1220         struct ras_err_info *err_info;
1221
1222         if (err_data_has_source_info(err_data)) {
1223                 for_each_ras_error(err_node, err_data) {
1224                         err_info = &err_node->err_info;
1225                         amdgpu_ras_error_statistic_de_count(&obj->err_data,
1226                                         &err_info->mcm_info, err_info->de_count);
1227                         amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1228                                         &err_info->mcm_info, err_info->ce_count);
1229                         amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1230                                         &err_info->mcm_info, err_info->ue_count);
1231                 }
1232         } else {
1233                 /* for legacy asic path which doesn't has error source info */
1234                 obj->err_data.ue_count += err_data->ue_count;
1235                 obj->err_data.ce_count += err_data->ce_count;
1236                 obj->err_data.de_count += err_data->de_count;
1237         }
1238 }
1239
1240 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1241 {
1242         struct ras_common_if head;
1243
1244         memset(&head, 0, sizeof(head));
1245         head.block = blk;
1246
1247         return amdgpu_ras_find_obj(adev, &head);
1248 }
1249
1250 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1251                         const struct aca_info *aca_info, void *data)
1252 {
1253         struct ras_manager *obj;
1254
1255         /* in resume phase, no need to create aca fs node */
1256         if (adev->in_suspend || amdgpu_in_reset(adev))
1257                 return 0;
1258
1259         obj = get_ras_manager(adev, blk);
1260         if (!obj)
1261                 return -EINVAL;
1262
1263         return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1264 }
1265
1266 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1267 {
1268         struct ras_manager *obj;
1269
1270         obj = get_ras_manager(adev, blk);
1271         if (!obj)
1272                 return -EINVAL;
1273
1274         amdgpu_aca_remove_handle(&obj->aca_handle);
1275
1276         return 0;
1277 }
1278
1279 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1280                                          enum aca_error_type type, struct ras_err_data *err_data,
1281                                          struct ras_query_context *qctx)
1282 {
1283         struct ras_manager *obj;
1284
1285         obj = get_ras_manager(adev, blk);
1286         if (!obj)
1287                 return -EINVAL;
1288
1289         return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1290 }
1291
1292 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1293                                   struct aca_handle *handle, char *buf, void *data)
1294 {
1295         struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1296         struct ras_query_if info = {
1297                 .head = obj->head,
1298         };
1299
1300         if (!amdgpu_ras_get_error_query_ready(obj->adev))
1301                 return sysfs_emit(buf, "Query currently inaccessible\n");
1302
1303         if (amdgpu_ras_query_error_status(obj->adev, &info))
1304                 return -EINVAL;
1305
1306         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1307                           "ce", info.ce_count, "de", info.de_count);
1308 }
1309
1310 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1311                                                 struct ras_query_if *info,
1312                                                 struct ras_err_data *err_data,
1313                                                 struct ras_query_context *qctx,
1314                                                 unsigned int error_query_mode)
1315 {
1316         enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1317         struct amdgpu_ras_block_object *block_obj = NULL;
1318         int ret;
1319
1320         if (blk == AMDGPU_RAS_BLOCK_COUNT)
1321                 return -EINVAL;
1322
1323         if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1324                 return -EINVAL;
1325
1326         if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1327                 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1328                         amdgpu_ras_get_ecc_info(adev, err_data);
1329                 } else {
1330                         block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1331                         if (!block_obj || !block_obj->hw_ops) {
1332                                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1333                                              get_ras_block_str(&info->head));
1334                                 return -EINVAL;
1335                         }
1336
1337                         if (block_obj->hw_ops->query_ras_error_count)
1338                                 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1339
1340                         if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1341                             (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1342                             (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1343                                 if (block_obj->hw_ops->query_ras_error_status)
1344                                         block_obj->hw_ops->query_ras_error_status(adev);
1345                         }
1346                 }
1347         } else {
1348                 if (amdgpu_aca_is_enabled(adev)) {
1349                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1350                         if (ret)
1351                                 return ret;
1352
1353                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1354                         if (ret)
1355                                 return ret;
1356
1357                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1358                         if (ret)
1359                                 return ret;
1360                 } else {
1361                         /* FIXME: add code to check return value later */
1362                         amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1363                         amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1364                 }
1365         }
1366
1367         return 0;
1368 }
1369
1370 /* query/inject/cure begin */
1371 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1372                                                     struct ras_query_if *info,
1373                                                     enum ras_event_type type)
1374 {
1375         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1376         struct ras_err_data err_data;
1377         struct ras_query_context qctx;
1378         unsigned int error_query_mode;
1379         int ret;
1380
1381         if (!obj)
1382                 return -EINVAL;
1383
1384         ret = amdgpu_ras_error_data_init(&err_data);
1385         if (ret)
1386                 return ret;
1387
1388         if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1389                 return -EINVAL;
1390
1391         memset(&qctx, 0, sizeof(qctx));
1392         qctx.evid.type = type;
1393         qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1394
1395         if (!down_read_trylock(&adev->reset_domain->sem)) {
1396                 ret = -EIO;
1397                 goto out_fini_err_data;
1398         }
1399
1400         ret = amdgpu_ras_query_error_status_helper(adev, info,
1401                                                    &err_data,
1402                                                    &qctx,
1403                                                    error_query_mode);
1404         up_read(&adev->reset_domain->sem);
1405         if (ret)
1406                 goto out_fini_err_data;
1407
1408         amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1409
1410         info->ue_count = obj->err_data.ue_count;
1411         info->ce_count = obj->err_data.ce_count;
1412         info->de_count = obj->err_data.de_count;
1413
1414         amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1415
1416 out_fini_err_data:
1417         amdgpu_ras_error_data_fini(&err_data);
1418
1419         return ret;
1420 }
1421
1422 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1423 {
1424         return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1425 }
1426
1427 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1428                 enum amdgpu_ras_block block)
1429 {
1430         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1431         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1432         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1433
1434         if (!block_obj || !block_obj->hw_ops) {
1435                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1436                                 ras_block_str(block));
1437                 return -EOPNOTSUPP;
1438         }
1439
1440         if (!amdgpu_ras_is_supported(adev, block) ||
1441             !amdgpu_ras_get_aca_debug_mode(adev))
1442                 return -EOPNOTSUPP;
1443
1444         /* skip ras error reset in gpu reset */
1445         if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1446             ((smu_funcs && smu_funcs->set_debug_mode) ||
1447              (mca_funcs && mca_funcs->mca_set_debug_mode)))
1448                 return -EOPNOTSUPP;
1449
1450         if (block_obj->hw_ops->reset_ras_error_count)
1451                 block_obj->hw_ops->reset_ras_error_count(adev);
1452
1453         return 0;
1454 }
1455
1456 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1457                 enum amdgpu_ras_block block)
1458 {
1459         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1460
1461         if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1462                 return 0;
1463
1464         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1465             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1466                 if (block_obj->hw_ops->reset_ras_error_status)
1467                         block_obj->hw_ops->reset_ras_error_status(adev);
1468         }
1469
1470         return 0;
1471 }
1472
1473 /* wrapper of psp_ras_trigger_error */
1474 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1475                 struct ras_inject_if *info)
1476 {
1477         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1478         struct ta_ras_trigger_error_input block_info = {
1479                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1480                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1481                 .sub_block_index = info->head.sub_block_index,
1482                 .address = info->address,
1483                 .value = info->value,
1484         };
1485         int ret = -EINVAL;
1486         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1487                                                         info->head.block,
1488                                                         info->head.sub_block_index);
1489
1490         /* inject on guest isn't allowed, return success directly */
1491         if (amdgpu_sriov_vf(adev))
1492                 return 0;
1493
1494         if (!obj)
1495                 return -EINVAL;
1496
1497         if (!block_obj || !block_obj->hw_ops)   {
1498                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1499                              get_ras_block_str(&info->head));
1500                 return -EINVAL;
1501         }
1502
1503         /* Calculate XGMI relative offset */
1504         if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1505             info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1506                 block_info.address =
1507                         amdgpu_xgmi_get_relative_phy_addr(adev,
1508                                                           block_info.address);
1509         }
1510
1511         if (block_obj->hw_ops->ras_error_inject) {
1512                 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1513                         ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1514                 else /* Special ras_error_inject is defined (e.g: xgmi) */
1515                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1516                                                 info->instance_mask);
1517         } else {
1518                 /* default path */
1519                 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1520         }
1521
1522         if (ret)
1523                 dev_err(adev->dev, "ras inject %s failed %d\n",
1524                         get_ras_block_str(&info->head), ret);
1525
1526         return ret;
1527 }
1528
1529 /**
1530  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1531  * @adev: pointer to AMD GPU device
1532  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1533  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1534  * @query_info: pointer to ras_query_if
1535  *
1536  * Return 0 for query success or do nothing, otherwise return an error
1537  * on failures
1538  */
1539 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1540                                                unsigned long *ce_count,
1541                                                unsigned long *ue_count,
1542                                                struct ras_query_if *query_info)
1543 {
1544         int ret;
1545
1546         if (!query_info)
1547                 /* do nothing if query_info is not specified */
1548                 return 0;
1549
1550         ret = amdgpu_ras_query_error_status(adev, query_info);
1551         if (ret)
1552                 return ret;
1553
1554         *ce_count += query_info->ce_count;
1555         *ue_count += query_info->ue_count;
1556
1557         /* some hardware/IP supports read to clear
1558          * no need to explictly reset the err status after the query call */
1559         if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1560             amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1561                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1562                         dev_warn(adev->dev,
1563                                  "Failed to reset error counter and error status\n");
1564         }
1565
1566         return 0;
1567 }
1568
1569 /**
1570  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1571  * @adev: pointer to AMD GPU device
1572  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1573  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1574  * errors.
1575  * @query_info: pointer to ras_query_if if the query request is only for
1576  * specific ip block; if info is NULL, then the qurey request is for
1577  * all the ip blocks that support query ras error counters/status
1578  *
1579  * If set, @ce_count or @ue_count, count and return the corresponding
1580  * error counts in those integer pointers. Return 0 if the device
1581  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1582  */
1583 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1584                                  unsigned long *ce_count,
1585                                  unsigned long *ue_count,
1586                                  struct ras_query_if *query_info)
1587 {
1588         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1589         struct ras_manager *obj;
1590         unsigned long ce, ue;
1591         int ret;
1592
1593         if (!adev->ras_enabled || !con)
1594                 return -EOPNOTSUPP;
1595
1596         /* Don't count since no reporting.
1597          */
1598         if (!ce_count && !ue_count)
1599                 return 0;
1600
1601         ce = 0;
1602         ue = 0;
1603         if (!query_info) {
1604                 /* query all the ip blocks that support ras query interface */
1605                 list_for_each_entry(obj, &con->head, node) {
1606                         struct ras_query_if info = {
1607                                 .head = obj->head,
1608                         };
1609
1610                         ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1611                 }
1612         } else {
1613                 /* query specific ip block */
1614                 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1615         }
1616
1617         if (ret)
1618                 return ret;
1619
1620         if (ce_count)
1621                 *ce_count = ce;
1622
1623         if (ue_count)
1624                 *ue_count = ue;
1625
1626         return 0;
1627 }
1628 /* query/inject/cure end */
1629
1630
1631 /* sysfs begin */
1632
1633 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1634                 struct ras_badpage **bps, unsigned int *count);
1635
1636 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1637 {
1638         switch (flags) {
1639         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1640                 return "R";
1641         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1642                 return "P";
1643         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1644         default:
1645                 return "F";
1646         }
1647 }
1648
1649 /**
1650  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1651  *
1652  * It allows user to read the bad pages of vram on the gpu through
1653  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1654  *
1655  * It outputs multiple lines, and each line stands for one gpu page.
1656  *
1657  * The format of one line is below,
1658  * gpu pfn : gpu page size : flags
1659  *
1660  * gpu pfn and gpu page size are printed in hex format.
1661  * flags can be one of below character,
1662  *
1663  * R: reserved, this gpu page is reserved and not able to use.
1664  *
1665  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1666  * in next window of page_reserve.
1667  *
1668  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1669  *
1670  * Examples:
1671  *
1672  * .. code-block:: bash
1673  *
1674  *      0x00000001 : 0x00001000 : R
1675  *      0x00000002 : 0x00001000 : P
1676  *
1677  */
1678
1679 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1680                 struct kobject *kobj, struct bin_attribute *attr,
1681                 char *buf, loff_t ppos, size_t count)
1682 {
1683         struct amdgpu_ras *con =
1684                 container_of(attr, struct amdgpu_ras, badpages_attr);
1685         struct amdgpu_device *adev = con->adev;
1686         const unsigned int element_size =
1687                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1688         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1689         unsigned int end = div64_ul(ppos + count - 1, element_size);
1690         ssize_t s = 0;
1691         struct ras_badpage *bps = NULL;
1692         unsigned int bps_count = 0;
1693
1694         memset(buf, 0, count);
1695
1696         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1697                 return 0;
1698
1699         for (; start < end && start < bps_count; start++)
1700                 s += scnprintf(&buf[s], element_size + 1,
1701                                 "0x%08x : 0x%08x : %1s\n",
1702                                 bps[start].bp,
1703                                 bps[start].size,
1704                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1705
1706         kfree(bps);
1707
1708         return s;
1709 }
1710
1711 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1712                 struct device_attribute *attr, char *buf)
1713 {
1714         struct amdgpu_ras *con =
1715                 container_of(attr, struct amdgpu_ras, features_attr);
1716
1717         return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1718 }
1719
1720 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1721                 struct device_attribute *attr, char *buf)
1722 {
1723         struct amdgpu_ras *con =
1724                 container_of(attr, struct amdgpu_ras, version_attr);
1725         return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1726 }
1727
1728 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1729                 struct device_attribute *attr, char *buf)
1730 {
1731         struct amdgpu_ras *con =
1732                 container_of(attr, struct amdgpu_ras, schema_attr);
1733         return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1734 }
1735
1736 static struct {
1737         enum ras_event_type type;
1738         const char *name;
1739 } dump_event[] = {
1740         {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1741         {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1742         {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1743 };
1744
1745 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1746                                                  struct device_attribute *attr, char *buf)
1747 {
1748         struct amdgpu_ras *con =
1749                 container_of(attr, struct amdgpu_ras, event_state_attr);
1750         struct ras_event_manager *event_mgr = con->event_mgr;
1751         struct ras_event_state *event_state;
1752         int i, size = 0;
1753
1754         if (!event_mgr)
1755                 return -EINVAL;
1756
1757         size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1758         for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1759                 event_state = &event_mgr->event_state[dump_event[i].type];
1760                 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1761                                       dump_event[i].name,
1762                                       atomic64_read(&event_state->count),
1763                                       event_state->last_seqno);
1764         }
1765
1766         return (ssize_t)size;
1767 }
1768
1769 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1770 {
1771         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1772
1773         if (adev->dev->kobj.sd)
1774                 sysfs_remove_file_from_group(&adev->dev->kobj,
1775                                 &con->badpages_attr.attr,
1776                                 RAS_FS_NAME);
1777 }
1778
1779 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1780 {
1781         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1782         struct attribute *attrs[] = {
1783                 &con->features_attr.attr,
1784                 &con->version_attr.attr,
1785                 &con->schema_attr.attr,
1786                 &con->event_state_attr.attr,
1787                 NULL
1788         };
1789         struct attribute_group group = {
1790                 .name = RAS_FS_NAME,
1791                 .attrs = attrs,
1792         };
1793
1794         if (adev->dev->kobj.sd)
1795                 sysfs_remove_group(&adev->dev->kobj, &group);
1796
1797         return 0;
1798 }
1799
1800 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1801                 struct ras_common_if *head)
1802 {
1803         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1804
1805         if (amdgpu_aca_is_enabled(adev))
1806                 return 0;
1807
1808         if (!obj || obj->attr_inuse)
1809                 return -EINVAL;
1810
1811         get_obj(obj);
1812
1813         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1814                 "%s_err_count", head->name);
1815
1816         obj->sysfs_attr = (struct device_attribute){
1817                 .attr = {
1818                         .name = obj->fs_data.sysfs_name,
1819                         .mode = S_IRUGO,
1820                 },
1821                         .show = amdgpu_ras_sysfs_read,
1822         };
1823         sysfs_attr_init(&obj->sysfs_attr.attr);
1824
1825         if (sysfs_add_file_to_group(&adev->dev->kobj,
1826                                 &obj->sysfs_attr.attr,
1827                                 RAS_FS_NAME)) {
1828                 put_obj(obj);
1829                 return -EINVAL;
1830         }
1831
1832         obj->attr_inuse = 1;
1833
1834         return 0;
1835 }
1836
1837 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1838                 struct ras_common_if *head)
1839 {
1840         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1841
1842         if (amdgpu_aca_is_enabled(adev))
1843                 return 0;
1844
1845         if (!obj || !obj->attr_inuse)
1846                 return -EINVAL;
1847
1848         if (adev->dev->kobj.sd)
1849                 sysfs_remove_file_from_group(&adev->dev->kobj,
1850                                 &obj->sysfs_attr.attr,
1851                                 RAS_FS_NAME);
1852         obj->attr_inuse = 0;
1853         put_obj(obj);
1854
1855         return 0;
1856 }
1857
1858 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1859 {
1860         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1861         struct ras_manager *obj, *tmp;
1862
1863         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1864                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1865         }
1866
1867         if (amdgpu_bad_page_threshold != 0)
1868                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1869
1870         amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1871
1872         return 0;
1873 }
1874 /* sysfs end */
1875
1876 /**
1877  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1878  *
1879  * Normally when there is an uncorrectable error, the driver will reset
1880  * the GPU to recover.  However, in the event of an unrecoverable error,
1881  * the driver provides an interface to reboot the system automatically
1882  * in that event.
1883  *
1884  * The following file in debugfs provides that interface:
1885  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1886  *
1887  * Usage:
1888  *
1889  * .. code-block:: bash
1890  *
1891  *      echo true > .../ras/auto_reboot
1892  *
1893  */
1894 /* debugfs begin */
1895 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1896 {
1897         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1898         struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1899         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1900         struct dentry     *dir;
1901
1902         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1903         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1904                             &amdgpu_ras_debugfs_ctrl_ops);
1905         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1906                             &amdgpu_ras_debugfs_eeprom_ops);
1907         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1908                            &con->bad_page_cnt_threshold);
1909         debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1910         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1911         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1912         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1913                             &amdgpu_ras_debugfs_eeprom_size_ops);
1914         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1915                                                        S_IRUGO, dir, adev,
1916                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1917         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1918
1919         /*
1920          * After one uncorrectable error happens, usually GPU recovery will
1921          * be scheduled. But due to the known problem in GPU recovery failing
1922          * to bring GPU back, below interface provides one direct way to
1923          * user to reboot system automatically in such case within
1924          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1925          * will never be called.
1926          */
1927         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1928
1929         /*
1930          * User could set this not to clean up hardware's error count register
1931          * of RAS IPs during ras recovery.
1932          */
1933         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1934                             &con->disable_ras_err_cnt_harvest);
1935         return dir;
1936 }
1937
1938 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1939                                       struct ras_fs_if *head,
1940                                       struct dentry *dir)
1941 {
1942         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1943
1944         if (!obj || !dir)
1945                 return;
1946
1947         get_obj(obj);
1948
1949         memcpy(obj->fs_data.debugfs_name,
1950                         head->debugfs_name,
1951                         sizeof(obj->fs_data.debugfs_name));
1952
1953         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1954                             obj, &amdgpu_ras_debugfs_ops);
1955 }
1956
1957 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
1958 {
1959         bool ret;
1960
1961         switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1962         case IP_VERSION(13, 0, 6):
1963         case IP_VERSION(13, 0, 14):
1964                 ret = true;
1965                 break;
1966         default:
1967                 ret = false;
1968                 break;
1969         }
1970
1971         return ret;
1972 }
1973
1974 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1975 {
1976         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1977         struct dentry *dir;
1978         struct ras_manager *obj;
1979         struct ras_fs_if fs_info;
1980
1981         /*
1982          * it won't be called in resume path, no need to check
1983          * suspend and gpu reset status
1984          */
1985         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1986                 return;
1987
1988         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1989
1990         list_for_each_entry(obj, &con->head, node) {
1991                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1992                         (obj->attr_inuse == 1)) {
1993                         sprintf(fs_info.debugfs_name, "%s_err_inject",
1994                                         get_ras_block_str(&obj->head));
1995                         fs_info.head = obj->head;
1996                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1997                 }
1998         }
1999
2000         if (amdgpu_ras_aca_is_supported(adev)) {
2001                 if (amdgpu_aca_is_enabled(adev))
2002                         amdgpu_aca_smu_debugfs_init(adev, dir);
2003                 else
2004                         amdgpu_mca_smu_debugfs_init(adev, dir);
2005         }
2006 }
2007
2008 /* debugfs end */
2009
2010 /* ras fs */
2011 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2012                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
2013 static DEVICE_ATTR(features, S_IRUGO,
2014                 amdgpu_ras_sysfs_features_read, NULL);
2015 static DEVICE_ATTR(version, 0444,
2016                 amdgpu_ras_sysfs_version_show, NULL);
2017 static DEVICE_ATTR(schema, 0444,
2018                 amdgpu_ras_sysfs_schema_show, NULL);
2019 static DEVICE_ATTR(event_state, 0444,
2020                    amdgpu_ras_sysfs_event_state_show, NULL);
2021 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2022 {
2023         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2024         struct attribute_group group = {
2025                 .name = RAS_FS_NAME,
2026         };
2027         struct attribute *attrs[] = {
2028                 &con->features_attr.attr,
2029                 &con->version_attr.attr,
2030                 &con->schema_attr.attr,
2031                 &con->event_state_attr.attr,
2032                 NULL
2033         };
2034         struct bin_attribute *bin_attrs[] = {
2035                 NULL,
2036                 NULL,
2037         };
2038         int r;
2039
2040         group.attrs = attrs;
2041
2042         /* add features entry */
2043         con->features_attr = dev_attr_features;
2044         sysfs_attr_init(attrs[0]);
2045
2046         /* add version entry */
2047         con->version_attr = dev_attr_version;
2048         sysfs_attr_init(attrs[1]);
2049
2050         /* add schema entry */
2051         con->schema_attr = dev_attr_schema;
2052         sysfs_attr_init(attrs[2]);
2053
2054         /* add event_state entry */
2055         con->event_state_attr = dev_attr_event_state;
2056         sysfs_attr_init(attrs[3]);
2057
2058         if (amdgpu_bad_page_threshold != 0) {
2059                 /* add bad_page_features entry */
2060                 bin_attr_gpu_vram_bad_pages.private = NULL;
2061                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2062                 bin_attrs[0] = &con->badpages_attr;
2063                 group.bin_attrs = bin_attrs;
2064                 sysfs_bin_attr_init(bin_attrs[0]);
2065         }
2066
2067         r = sysfs_create_group(&adev->dev->kobj, &group);
2068         if (r)
2069                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
2070
2071         return 0;
2072 }
2073
2074 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2075 {
2076         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2077         struct ras_manager *con_obj, *ip_obj, *tmp;
2078
2079         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2080                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2081                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2082                         if (ip_obj)
2083                                 put_obj(ip_obj);
2084                 }
2085         }
2086
2087         amdgpu_ras_sysfs_remove_all(adev);
2088         return 0;
2089 }
2090 /* ras fs end */
2091
2092 /* ih begin */
2093
2094 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2095  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2096  * register to check whether the interrupt is triggered or not, and properly
2097  * ack the interrupt if it is there
2098  */
2099 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2100 {
2101         /* Fatal error events are handled on host side */
2102         if (amdgpu_sriov_vf(adev))
2103                 return;
2104
2105         if (adev->nbio.ras &&
2106             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2107                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2108
2109         if (adev->nbio.ras &&
2110             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2111                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2112 }
2113
2114 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2115                                 struct amdgpu_iv_entry *entry)
2116 {
2117         bool poison_stat = false;
2118         struct amdgpu_device *adev = obj->adev;
2119         struct amdgpu_ras_block_object *block_obj =
2120                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2121         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2122         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2123         u64 event_id;
2124         int ret;
2125
2126         if (!block_obj || !con)
2127                 return;
2128
2129         ret = amdgpu_ras_mark_ras_event(adev, type);
2130         if (ret)
2131                 return;
2132
2133         /* both query_poison_status and handle_poison_consumption are optional,
2134          * but at least one of them should be implemented if we need poison
2135          * consumption handler
2136          */
2137         if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2138                 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2139                 if (!poison_stat) {
2140                         /* Not poison consumption interrupt, no need to handle it */
2141                         dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2142                                         block_obj->ras_comm.name);
2143
2144                         return;
2145                 }
2146         }
2147
2148         amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2149
2150         if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2151                 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2152
2153         /* gpu reset is fallback for failed and default cases.
2154          * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2155          */
2156         if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2157                 event_id = amdgpu_ras_acquire_event_id(adev, type);
2158                 RAS_EVENT_LOG(adev, event_id,
2159                               "GPU reset for %s RAS poison consumption is issued!\n",
2160                               block_obj->ras_comm.name);
2161                 amdgpu_ras_reset_gpu(adev);
2162         }
2163
2164         if (!poison_stat)
2165                 amdgpu_gfx_poison_consumption_handler(adev, entry);
2166 }
2167
2168 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2169                                 struct amdgpu_iv_entry *entry)
2170 {
2171         struct amdgpu_device *adev = obj->adev;
2172         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2173         u64 event_id;
2174         int ret;
2175
2176         ret = amdgpu_ras_mark_ras_event(adev, type);
2177         if (ret)
2178                 return;
2179
2180         event_id = amdgpu_ras_acquire_event_id(adev, type);
2181         RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2182
2183         if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2184                 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2185
2186                 atomic_inc(&con->page_retirement_req_cnt);
2187                 atomic_inc(&con->poison_creation_count);
2188
2189                 wake_up(&con->page_retirement_wq);
2190         }
2191 }
2192
2193 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2194                                 struct amdgpu_iv_entry *entry)
2195 {
2196         struct ras_ih_data *data = &obj->ih_data;
2197         struct ras_err_data err_data;
2198         int ret;
2199
2200         if (!data->cb)
2201                 return;
2202
2203         ret = amdgpu_ras_error_data_init(&err_data);
2204         if (ret)
2205                 return;
2206
2207         /* Let IP handle its data, maybe we need get the output
2208          * from the callback to update the error type/count, etc
2209          */
2210         amdgpu_ras_set_fed(obj->adev, true);
2211         ret = data->cb(obj->adev, &err_data, entry);
2212         /* ue will trigger an interrupt, and in that case
2213          * we need do a reset to recovery the whole system.
2214          * But leave IP do that recovery, here we just dispatch
2215          * the error.
2216          */
2217         if (ret == AMDGPU_RAS_SUCCESS) {
2218                 /* these counts could be left as 0 if
2219                  * some blocks do not count error number
2220                  */
2221                 obj->err_data.ue_count += err_data.ue_count;
2222                 obj->err_data.ce_count += err_data.ce_count;
2223                 obj->err_data.de_count += err_data.de_count;
2224         }
2225
2226         amdgpu_ras_error_data_fini(&err_data);
2227 }
2228
2229 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2230 {
2231         struct ras_ih_data *data = &obj->ih_data;
2232         struct amdgpu_iv_entry entry;
2233
2234         while (data->rptr != data->wptr) {
2235                 rmb();
2236                 memcpy(&entry, &data->ring[data->rptr],
2237                                 data->element_size);
2238
2239                 wmb();
2240                 data->rptr = (data->aligned_element_size +
2241                                 data->rptr) % data->ring_size;
2242
2243                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2244                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2245                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2246                         else
2247                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2248                 } else {
2249                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2250                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2251                         else
2252                                 dev_warn(obj->adev->dev,
2253                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
2254                 }
2255         }
2256 }
2257
2258 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2259 {
2260         struct ras_ih_data *data =
2261                 container_of(work, struct ras_ih_data, ih_work);
2262         struct ras_manager *obj =
2263                 container_of(data, struct ras_manager, ih_data);
2264
2265         amdgpu_ras_interrupt_handler(obj);
2266 }
2267
2268 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2269                 struct ras_dispatch_if *info)
2270 {
2271         struct ras_manager *obj;
2272         struct ras_ih_data *data;
2273
2274         obj = amdgpu_ras_find_obj(adev, &info->head);
2275         if (!obj)
2276                 return -EINVAL;
2277
2278         data = &obj->ih_data;
2279
2280         if (data->inuse == 0)
2281                 return 0;
2282
2283         /* Might be overflow... */
2284         memcpy(&data->ring[data->wptr], info->entry,
2285                         data->element_size);
2286
2287         wmb();
2288         data->wptr = (data->aligned_element_size +
2289                         data->wptr) % data->ring_size;
2290
2291         schedule_work(&data->ih_work);
2292
2293         return 0;
2294 }
2295
2296 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2297                 struct ras_common_if *head)
2298 {
2299         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2300         struct ras_ih_data *data;
2301
2302         if (!obj)
2303                 return -EINVAL;
2304
2305         data = &obj->ih_data;
2306         if (data->inuse == 0)
2307                 return 0;
2308
2309         cancel_work_sync(&data->ih_work);
2310
2311         kfree(data->ring);
2312         memset(data, 0, sizeof(*data));
2313         put_obj(obj);
2314
2315         return 0;
2316 }
2317
2318 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2319                 struct ras_common_if *head)
2320 {
2321         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2322         struct ras_ih_data *data;
2323         struct amdgpu_ras_block_object *ras_obj;
2324
2325         if (!obj) {
2326                 /* in case we registe the IH before enable ras feature */
2327                 obj = amdgpu_ras_create_obj(adev, head);
2328                 if (!obj)
2329                         return -EINVAL;
2330         } else
2331                 get_obj(obj);
2332
2333         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2334
2335         data = &obj->ih_data;
2336         /* add the callback.etc */
2337         *data = (struct ras_ih_data) {
2338                 .inuse = 0,
2339                 .cb = ras_obj->ras_cb,
2340                 .element_size = sizeof(struct amdgpu_iv_entry),
2341                 .rptr = 0,
2342                 .wptr = 0,
2343         };
2344
2345         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2346
2347         data->aligned_element_size = ALIGN(data->element_size, 8);
2348         /* the ring can store 64 iv entries. */
2349         data->ring_size = 64 * data->aligned_element_size;
2350         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2351         if (!data->ring) {
2352                 put_obj(obj);
2353                 return -ENOMEM;
2354         }
2355
2356         /* IH is ready */
2357         data->inuse = 1;
2358
2359         return 0;
2360 }
2361
2362 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2363 {
2364         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2365         struct ras_manager *obj, *tmp;
2366
2367         list_for_each_entry_safe(obj, tmp, &con->head, node) {
2368                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2369         }
2370
2371         return 0;
2372 }
2373 /* ih end */
2374
2375 /* traversal all IPs except NBIO to query error counter */
2376 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2377 {
2378         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2379         struct ras_manager *obj;
2380
2381         if (!adev->ras_enabled || !con)
2382                 return;
2383
2384         list_for_each_entry(obj, &con->head, node) {
2385                 struct ras_query_if info = {
2386                         .head = obj->head,
2387                 };
2388
2389                 /*
2390                  * PCIE_BIF IP has one different isr by ras controller
2391                  * interrupt, the specific ras counter query will be
2392                  * done in that isr. So skip such block from common
2393                  * sync flood interrupt isr calling.
2394                  */
2395                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2396                         continue;
2397
2398                 /*
2399                  * this is a workaround for aldebaran, skip send msg to
2400                  * smu to get ecc_info table due to smu handle get ecc
2401                  * info table failed temporarily.
2402                  * should be removed until smu fix handle ecc_info table.
2403                  */
2404                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2405                     (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2406                      IP_VERSION(13, 0, 2)))
2407                         continue;
2408
2409                 amdgpu_ras_query_error_status_with_event(adev, &info, type);
2410
2411                 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2412                             IP_VERSION(11, 0, 2) &&
2413                     amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2414                             IP_VERSION(11, 0, 4) &&
2415                     amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2416                             IP_VERSION(13, 0, 0)) {
2417                         if (amdgpu_ras_reset_error_status(adev, info.head.block))
2418                                 dev_warn(adev->dev, "Failed to reset error counter and error status");
2419                 }
2420         }
2421 }
2422
2423 /* Parse RdRspStatus and WrRspStatus */
2424 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2425                                           struct ras_query_if *info)
2426 {
2427         struct amdgpu_ras_block_object *block_obj;
2428         /*
2429          * Only two block need to query read/write
2430          * RspStatus at current state
2431          */
2432         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2433                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2434                 return;
2435
2436         block_obj = amdgpu_ras_get_ras_block(adev,
2437                                         info->head.block,
2438                                         info->head.sub_block_index);
2439
2440         if (!block_obj || !block_obj->hw_ops) {
2441                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2442                              get_ras_block_str(&info->head));
2443                 return;
2444         }
2445
2446         if (block_obj->hw_ops->query_ras_error_status)
2447                 block_obj->hw_ops->query_ras_error_status(adev);
2448
2449 }
2450
2451 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2452 {
2453         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2454         struct ras_manager *obj;
2455
2456         if (!adev->ras_enabled || !con)
2457                 return;
2458
2459         list_for_each_entry(obj, &con->head, node) {
2460                 struct ras_query_if info = {
2461                         .head = obj->head,
2462                 };
2463
2464                 amdgpu_ras_error_status_query(adev, &info);
2465         }
2466 }
2467
2468 /* recovery begin */
2469
2470 /* return 0 on success.
2471  * caller need free bps.
2472  */
2473 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2474                 struct ras_badpage **bps, unsigned int *count)
2475 {
2476         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2477         struct ras_err_handler_data *data;
2478         int i = 0;
2479         int ret = 0, status;
2480
2481         if (!con || !con->eh_data || !bps || !count)
2482                 return -EINVAL;
2483
2484         mutex_lock(&con->recovery_lock);
2485         data = con->eh_data;
2486         if (!data || data->count == 0) {
2487                 *bps = NULL;
2488                 ret = -EINVAL;
2489                 goto out;
2490         }
2491
2492         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2493         if (!*bps) {
2494                 ret = -ENOMEM;
2495                 goto out;
2496         }
2497
2498         for (; i < data->count; i++) {
2499                 (*bps)[i] = (struct ras_badpage){
2500                         .bp = data->bps[i].retired_page,
2501                         .size = AMDGPU_GPU_PAGE_SIZE,
2502                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2503                 };
2504                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2505                                 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2506                 if (status == -EBUSY)
2507                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2508                 else if (status == -ENOENT)
2509                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2510         }
2511
2512         *count = data->count;
2513 out:
2514         mutex_unlock(&con->recovery_lock);
2515         return ret;
2516 }
2517
2518 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2519                                    struct amdgpu_hive_info *hive, bool status)
2520 {
2521         struct amdgpu_device *tmp_adev;
2522
2523         if (hive) {
2524                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2525                         amdgpu_ras_set_fed(tmp_adev, status);
2526         } else {
2527                 amdgpu_ras_set_fed(adev, status);
2528         }
2529 }
2530
2531 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2532 {
2533         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2534         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2535         int hive_ras_recovery = 0;
2536
2537         if (hive) {
2538                 hive_ras_recovery = atomic_read(&hive->ras_recovery);
2539                 amdgpu_put_xgmi_hive(hive);
2540         }
2541
2542         if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2543                 return true;
2544
2545         return false;
2546 }
2547
2548 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2549 {
2550         if (amdgpu_ras_intr_triggered())
2551                 return RAS_EVENT_TYPE_FATAL;
2552         else
2553                 return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2554 }
2555
2556 static void amdgpu_ras_do_recovery(struct work_struct *work)
2557 {
2558         struct amdgpu_ras *ras =
2559                 container_of(work, struct amdgpu_ras, recovery_work);
2560         struct amdgpu_device *remote_adev = NULL;
2561         struct amdgpu_device *adev = ras->adev;
2562         struct list_head device_list, *device_list_handle =  NULL;
2563         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2564         enum ras_event_type type;
2565
2566         if (hive) {
2567                 atomic_set(&hive->ras_recovery, 1);
2568
2569                 /* If any device which is part of the hive received RAS fatal
2570                  * error interrupt, set fatal error status on all. This
2571                  * condition will need a recovery, and flag will be cleared
2572                  * as part of recovery.
2573                  */
2574                 list_for_each_entry(remote_adev, &hive->device_list,
2575                                     gmc.xgmi.head)
2576                         if (amdgpu_ras_get_fed_status(remote_adev)) {
2577                                 amdgpu_ras_set_fed_all(adev, hive, true);
2578                                 break;
2579                         }
2580         }
2581         if (!ras->disable_ras_err_cnt_harvest) {
2582
2583                 /* Build list of devices to query RAS related errors */
2584                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2585                         device_list_handle = &hive->device_list;
2586                 } else {
2587                         INIT_LIST_HEAD(&device_list);
2588                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
2589                         device_list_handle = &device_list;
2590                 }
2591
2592                 type = amdgpu_ras_get_fatal_error_event(adev);
2593                 list_for_each_entry(remote_adev,
2594                                 device_list_handle, gmc.xgmi.head) {
2595                         amdgpu_ras_query_err_status(remote_adev);
2596                         amdgpu_ras_log_on_err_counter(remote_adev, type);
2597                 }
2598
2599         }
2600
2601         if (amdgpu_device_should_recover_gpu(ras->adev)) {
2602                 struct amdgpu_reset_context reset_context;
2603                 memset(&reset_context, 0, sizeof(reset_context));
2604
2605                 reset_context.method = AMD_RESET_METHOD_NONE;
2606                 reset_context.reset_req_dev = adev;
2607                 reset_context.src = AMDGPU_RESET_SRC_RAS;
2608
2609                 /* Perform full reset in fatal error mode */
2610                 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2611                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2612                 else {
2613                         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2614
2615                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2616                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2617                                 reset_context.method = AMD_RESET_METHOD_MODE2;
2618                         }
2619
2620                         /* Fatal error occurs in poison mode, mode1 reset is used to
2621                          * recover gpu.
2622                          */
2623                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2624                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2625                                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2626
2627                                 psp_fatal_error_recovery_quirk(&adev->psp);
2628                         }
2629                 }
2630
2631                 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2632         }
2633         atomic_set(&ras->in_recovery, 0);
2634         if (hive) {
2635                 atomic_set(&hive->ras_recovery, 0);
2636                 amdgpu_put_xgmi_hive(hive);
2637         }
2638 }
2639
2640 /* alloc/realloc bps array */
2641 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2642                 struct ras_err_handler_data *data, int pages)
2643 {
2644         unsigned int old_space = data->count + data->space_left;
2645         unsigned int new_space = old_space + pages;
2646         unsigned int align_space = ALIGN(new_space, 512);
2647         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2648
2649         if (!bps) {
2650                 return -ENOMEM;
2651         }
2652
2653         if (data->bps) {
2654                 memcpy(bps, data->bps,
2655                                 data->count * sizeof(*data->bps));
2656                 kfree(data->bps);
2657         }
2658
2659         data->bps = bps;
2660         data->space_left += align_space - old_space;
2661         return 0;
2662 }
2663
2664 /* it deal with vram only. */
2665 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2666                 struct eeprom_table_record *bps, int pages)
2667 {
2668         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2669         struct ras_err_handler_data *data;
2670         int ret = 0;
2671         uint32_t i;
2672
2673         if (!con || !con->eh_data || !bps || pages <= 0)
2674                 return 0;
2675
2676         mutex_lock(&con->recovery_lock);
2677         data = con->eh_data;
2678         if (!data)
2679                 goto out;
2680
2681         for (i = 0; i < pages; i++) {
2682                 if (amdgpu_ras_check_bad_page_unlock(con,
2683                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2684                         continue;
2685
2686                 if (!data->space_left &&
2687                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2688                         ret = -ENOMEM;
2689                         goto out;
2690                 }
2691
2692                 amdgpu_ras_reserve_page(adev, bps[i].retired_page);
2693
2694                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2695                 data->count++;
2696                 data->space_left--;
2697         }
2698 out:
2699         mutex_unlock(&con->recovery_lock);
2700
2701         return ret;
2702 }
2703
2704 /*
2705  * write error record array to eeprom, the function should be
2706  * protected by recovery_lock
2707  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2708  */
2709 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2710                 unsigned long *new_cnt)
2711 {
2712         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2713         struct ras_err_handler_data *data;
2714         struct amdgpu_ras_eeprom_control *control;
2715         int save_count;
2716
2717         if (!con || !con->eh_data) {
2718                 if (new_cnt)
2719                         *new_cnt = 0;
2720
2721                 return 0;
2722         }
2723
2724         mutex_lock(&con->recovery_lock);
2725         control = &con->eeprom_control;
2726         data = con->eh_data;
2727         save_count = data->count - control->ras_num_recs;
2728         mutex_unlock(&con->recovery_lock);
2729
2730         if (new_cnt)
2731                 *new_cnt = save_count / adev->umc.retire_unit;
2732
2733         /* only new entries are saved */
2734         if (save_count > 0) {
2735                 if (amdgpu_ras_eeprom_append(control,
2736                                              &data->bps[control->ras_num_recs],
2737                                              save_count)) {
2738                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2739                         return -EIO;
2740                 }
2741
2742                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2743         }
2744
2745         return 0;
2746 }
2747
2748 /*
2749  * read error record array in eeprom and reserve enough space for
2750  * storing new bad pages
2751  */
2752 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2753 {
2754         struct amdgpu_ras_eeprom_control *control =
2755                 &adev->psp.ras_context.ras->eeprom_control;
2756         struct eeprom_table_record *bps;
2757         int ret;
2758
2759         /* no bad page record, skip eeprom access */
2760         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2761                 return 0;
2762
2763         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2764         if (!bps)
2765                 return -ENOMEM;
2766
2767         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2768         if (ret)
2769                 dev_err(adev->dev, "Failed to load EEPROM table records!");
2770         else
2771                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2772
2773         kfree(bps);
2774         return ret;
2775 }
2776
2777 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2778                                 uint64_t addr)
2779 {
2780         struct ras_err_handler_data *data = con->eh_data;
2781         int i;
2782
2783         addr >>= AMDGPU_GPU_PAGE_SHIFT;
2784         for (i = 0; i < data->count; i++)
2785                 if (addr == data->bps[i].retired_page)
2786                         return true;
2787
2788         return false;
2789 }
2790
2791 /*
2792  * check if an address belongs to bad page
2793  *
2794  * Note: this check is only for umc block
2795  */
2796 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2797                                 uint64_t addr)
2798 {
2799         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2800         bool ret = false;
2801
2802         if (!con || !con->eh_data)
2803                 return ret;
2804
2805         mutex_lock(&con->recovery_lock);
2806         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2807         mutex_unlock(&con->recovery_lock);
2808         return ret;
2809 }
2810
2811 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2812                                           uint32_t max_count)
2813 {
2814         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2815
2816         /*
2817          * Justification of value bad_page_cnt_threshold in ras structure
2818          *
2819          * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2820          * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2821          * scenarios accordingly.
2822          *
2823          * Bad page retirement enablement:
2824          *    - If amdgpu_bad_page_threshold = -2,
2825          *      bad_page_cnt_threshold = typical value by formula.
2826          *
2827          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2828          *      max record length in eeprom, use it directly.
2829          *
2830          * Bad page retirement disablement:
2831          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2832          *      functionality is disabled, and bad_page_cnt_threshold will
2833          *      take no effect.
2834          */
2835
2836         if (amdgpu_bad_page_threshold < 0) {
2837                 u64 val = adev->gmc.mc_vram_size;
2838
2839                 do_div(val, RAS_BAD_PAGE_COVER);
2840                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2841                                                   max_count);
2842         } else {
2843                 con->bad_page_cnt_threshold = min_t(int, max_count,
2844                                                     amdgpu_bad_page_threshold);
2845         }
2846 }
2847
2848 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
2849                 enum amdgpu_ras_block block, uint16_t pasid,
2850                 pasid_notify pasid_fn, void *data, uint32_t reset)
2851 {
2852         int ret = 0;
2853         struct ras_poison_msg poison_msg;
2854         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2855
2856         memset(&poison_msg, 0, sizeof(poison_msg));
2857         poison_msg.block = block;
2858         poison_msg.pasid = pasid;
2859         poison_msg.reset = reset;
2860         poison_msg.pasid_fn = pasid_fn;
2861         poison_msg.data = data;
2862
2863         ret = kfifo_put(&con->poison_fifo, poison_msg);
2864         if (!ret) {
2865                 dev_err(adev->dev, "Poison message fifo is full!\n");
2866                 return -ENOSPC;
2867         }
2868
2869         return 0;
2870 }
2871
2872 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
2873                 struct ras_poison_msg *poison_msg)
2874 {
2875         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2876
2877         return kfifo_get(&con->poison_fifo, poison_msg);
2878 }
2879
2880 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
2881 {
2882         mutex_init(&ecc_log->lock);
2883
2884         INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
2885         ecc_log->de_queried_count = 0;
2886         ecc_log->prev_de_queried_count = 0;
2887 }
2888
2889 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
2890 {
2891         struct radix_tree_iter iter;
2892         void __rcu **slot;
2893         struct ras_ecc_err *ecc_err;
2894
2895         mutex_lock(&ecc_log->lock);
2896         radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2897                 ecc_err = radix_tree_deref_slot(slot);
2898                 kfree(ecc_err->err_pages.pfn);
2899                 kfree(ecc_err);
2900                 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
2901         }
2902         mutex_unlock(&ecc_log->lock);
2903
2904         mutex_destroy(&ecc_log->lock);
2905         ecc_log->de_queried_count = 0;
2906         ecc_log->prev_de_queried_count = 0;
2907 }
2908
2909 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
2910                                 uint32_t delayed_ms)
2911 {
2912         int ret;
2913
2914         mutex_lock(&con->umc_ecc_log.lock);
2915         ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
2916                         UMC_ECC_NEW_DETECTED_TAG);
2917         mutex_unlock(&con->umc_ecc_log.lock);
2918
2919         if (ret)
2920                 schedule_delayed_work(&con->page_retirement_dwork,
2921                         msecs_to_jiffies(delayed_ms));
2922
2923         return ret ? true : false;
2924 }
2925
2926 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
2927 {
2928         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2929                                               page_retirement_dwork.work);
2930         struct amdgpu_device *adev = con->adev;
2931         struct ras_err_data err_data;
2932         unsigned long err_cnt;
2933
2934         /* If gpu reset is ongoing, delay retiring the bad pages */
2935         if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
2936                 amdgpu_ras_schedule_retirement_dwork(con,
2937                                 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
2938                 return;
2939         }
2940
2941         amdgpu_ras_error_data_init(&err_data);
2942
2943         amdgpu_umc_handle_bad_pages(adev, &err_data);
2944         err_cnt = err_data.err_addr_cnt;
2945
2946         amdgpu_ras_error_data_fini(&err_data);
2947
2948         if (err_cnt && amdgpu_ras_is_rma(adev))
2949                 amdgpu_ras_reset_gpu(adev);
2950
2951         amdgpu_ras_schedule_retirement_dwork(con,
2952                         AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
2953 }
2954
2955 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
2956                                 uint32_t poison_creation_count)
2957 {
2958         int ret = 0;
2959         struct ras_ecc_log_info *ecc_log;
2960         struct ras_query_if info;
2961         uint32_t timeout = 0;
2962         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2963         uint64_t de_queried_count;
2964         uint32_t new_detect_count, total_detect_count;
2965         uint32_t need_query_count = poison_creation_count;
2966         bool query_data_timeout = false;
2967         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2968
2969         memset(&info, 0, sizeof(info));
2970         info.head.block = AMDGPU_RAS_BLOCK__UMC;
2971
2972         ecc_log = &ras->umc_ecc_log;
2973         total_detect_count = 0;
2974         do {
2975                 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
2976                 if (ret)
2977                         return ret;
2978
2979                 de_queried_count = ecc_log->de_queried_count;
2980                 if (de_queried_count > ecc_log->prev_de_queried_count) {
2981                         new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
2982                         ecc_log->prev_de_queried_count = de_queried_count;
2983                         timeout = 0;
2984                 } else {
2985                         new_detect_count = 0;
2986                 }
2987
2988                 if (new_detect_count) {
2989                         total_detect_count += new_detect_count;
2990                 } else {
2991                         if (!timeout && need_query_count)
2992                                 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
2993
2994                         if (timeout) {
2995                                 if (!--timeout) {
2996                                         query_data_timeout = true;
2997                                         break;
2998                                 }
2999                                 msleep(1);
3000                         }
3001                 }
3002         } while (total_detect_count < need_query_count);
3003
3004         if (query_data_timeout) {
3005                 dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3006                         (need_query_count - total_detect_count));
3007                 return -ENOENT;
3008         }
3009
3010         if (total_detect_count)
3011                 schedule_delayed_work(&ras->page_retirement_dwork, 0);
3012
3013         return 0;
3014 }
3015
3016 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3017 {
3018         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3019         struct ras_poison_msg msg;
3020         int ret;
3021
3022         do {
3023                 ret = kfifo_get(&con->poison_fifo, &msg);
3024         } while (ret);
3025 }
3026
3027 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3028                         uint32_t msg_count, uint32_t *gpu_reset)
3029 {
3030         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3031         uint32_t reset_flags = 0, reset = 0;
3032         struct ras_poison_msg msg;
3033         int ret, i;
3034
3035         kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3036
3037         for (i = 0; i < msg_count; i++) {
3038                 ret = amdgpu_ras_get_poison_req(adev, &msg);
3039                 if (!ret)
3040                         continue;
3041
3042                 if (msg.pasid_fn)
3043                         msg.pasid_fn(adev, msg.pasid, msg.data);
3044
3045                 reset_flags |= msg.reset;
3046         }
3047
3048         /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3049         if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3050                 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3051                         reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3052                 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3053                         reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3054                 else
3055                         reset = reset_flags;
3056
3057                 flush_delayed_work(&con->page_retirement_dwork);
3058
3059                 con->gpu_reset_flags |= reset;
3060                 amdgpu_ras_reset_gpu(adev);
3061
3062                 *gpu_reset = reset;
3063
3064                 /* Wait for gpu recovery to complete */
3065                 flush_work(&con->recovery_work);
3066         }
3067
3068         return 0;
3069 }
3070
3071 static int amdgpu_ras_page_retirement_thread(void *param)
3072 {
3073         struct amdgpu_device *adev = (struct amdgpu_device *)param;
3074         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3075         uint32_t poison_creation_count, msg_count;
3076         uint32_t gpu_reset;
3077         int ret;
3078
3079         while (!kthread_should_stop()) {
3080
3081                 wait_event_interruptible(con->page_retirement_wq,
3082                                 kthread_should_stop() ||
3083                                 atomic_read(&con->page_retirement_req_cnt));
3084
3085                 if (kthread_should_stop())
3086                         break;
3087
3088                 gpu_reset = 0;
3089
3090                 do {
3091                         poison_creation_count = atomic_read(&con->poison_creation_count);
3092                         ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3093                         if (ret == -EIO)
3094                                 break;
3095
3096                         if (poison_creation_count) {
3097                                 atomic_sub(poison_creation_count, &con->poison_creation_count);
3098                                 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3099                         }
3100                 } while (atomic_read(&con->poison_creation_count));
3101
3102                 if (ret != -EIO) {
3103                         msg_count = kfifo_len(&con->poison_fifo);
3104                         if (msg_count) {
3105                                 ret = amdgpu_ras_poison_consumption_handler(adev,
3106                                                 msg_count, &gpu_reset);
3107                                 if ((ret != -EIO) &&
3108                                     (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3109                                         atomic_sub(msg_count, &con->page_retirement_req_cnt);
3110                         }
3111                 }
3112
3113                 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3114                         /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3115                         /* Clear poison creation request */
3116                         atomic_set(&con->poison_creation_count, 0);
3117
3118                         /* Clear poison fifo */
3119                         amdgpu_ras_clear_poison_fifo(adev);
3120
3121                         /* Clear all poison requests */
3122                         atomic_set(&con->page_retirement_req_cnt, 0);
3123
3124                         if (ret == -EIO) {
3125                                 /* Wait for mode-1 reset to complete */
3126                                 down_read(&adev->reset_domain->sem);
3127                                 up_read(&adev->reset_domain->sem);
3128                         }
3129
3130                         /* Wake up work to save bad pages to eeprom */
3131                         schedule_delayed_work(&con->page_retirement_dwork, 0);
3132                 } else if (gpu_reset) {
3133                         /* gpu just completed mode-2 reset or other reset */
3134                         /* Clear poison consumption messages cached in fifo */
3135                         msg_count = kfifo_len(&con->poison_fifo);
3136                         if (msg_count) {
3137                                 amdgpu_ras_clear_poison_fifo(adev);
3138                                 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3139                         }
3140
3141                         /* Wake up work to save bad pages to eeprom */
3142                         schedule_delayed_work(&con->page_retirement_dwork, 0);
3143                 }
3144         }
3145
3146         return 0;
3147 }
3148
3149 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
3150 {
3151         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3152         struct ras_err_handler_data **data;
3153         u32  max_eeprom_records_count = 0;
3154         int ret;
3155
3156         if (!con || amdgpu_sriov_vf(adev))
3157                 return 0;
3158
3159         /* Allow access to RAS EEPROM via debugfs, when the ASIC
3160          * supports RAS and debugfs is enabled, but when
3161          * adev->ras_enabled is unset, i.e. when "ras_enable"
3162          * module parameter is set to 0.
3163          */
3164         con->adev = adev;
3165
3166         if (!adev->ras_enabled)
3167                 return 0;
3168
3169         data = &con->eh_data;
3170         *data = kzalloc(sizeof(**data), GFP_KERNEL);
3171         if (!*data) {
3172                 ret = -ENOMEM;
3173                 goto out;
3174         }
3175
3176         mutex_init(&con->recovery_lock);
3177         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3178         atomic_set(&con->in_recovery, 0);
3179         con->eeprom_control.bad_channel_bitmap = 0;
3180
3181         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3182         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3183
3184         /* Todo: During test the SMU might fail to read the eeprom through I2C
3185          * when the GPU is pending on XGMI reset during probe time
3186          * (Mostly after second bus reset), skip it now
3187          */
3188         if (adev->gmc.xgmi.pending_reset)
3189                 return 0;
3190         ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
3191         /*
3192          * This calling fails when is_rma is true or
3193          * ret != 0.
3194          */
3195         if (amdgpu_ras_is_rma(adev) || ret)
3196                 goto free;
3197
3198         if (con->eeprom_control.ras_num_recs) {
3199                 ret = amdgpu_ras_load_bad_pages(adev);
3200                 if (ret)
3201                         goto free;
3202
3203                 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
3204
3205                 if (con->update_channel_flag == true) {
3206                         amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
3207                         con->update_channel_flag = false;
3208                 }
3209         }
3210
3211         mutex_init(&con->page_rsv_lock);
3212         INIT_KFIFO(con->poison_fifo);
3213         mutex_init(&con->page_retirement_lock);
3214         init_waitqueue_head(&con->page_retirement_wq);
3215         atomic_set(&con->page_retirement_req_cnt, 0);
3216         atomic_set(&con->poison_creation_count, 0);
3217         con->page_retirement_thread =
3218                 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3219         if (IS_ERR(con->page_retirement_thread)) {
3220                 con->page_retirement_thread = NULL;
3221                 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3222         }
3223
3224         INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3225         amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3226 #ifdef CONFIG_X86_MCE_AMD
3227         if ((adev->asic_type == CHIP_ALDEBARAN) &&
3228             (adev->gmc.xgmi.connected_to_cpu))
3229                 amdgpu_register_bad_pages_mca_notifier(adev);
3230 #endif
3231         return 0;
3232
3233 free:
3234         kfree((*data)->bps);
3235         kfree(*data);
3236         con->eh_data = NULL;
3237 out:
3238         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3239
3240         /*
3241          * Except error threshold exceeding case, other failure cases in this
3242          * function would not fail amdgpu driver init.
3243          */
3244         if (!amdgpu_ras_is_rma(adev))
3245                 ret = 0;
3246         else
3247                 ret = -EINVAL;
3248
3249         return ret;
3250 }
3251
3252 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3253 {
3254         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3255         struct ras_err_handler_data *data = con->eh_data;
3256         int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3257         bool ret;
3258
3259         /* recovery_init failed to init it, fini is useless */
3260         if (!data)
3261                 return 0;
3262
3263         /* Save all cached bad pages to eeprom */
3264         do {
3265                 flush_delayed_work(&con->page_retirement_dwork);
3266                 ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3267         } while (ret && max_flush_timeout--);
3268
3269         if (con->page_retirement_thread)
3270                 kthread_stop(con->page_retirement_thread);
3271
3272         atomic_set(&con->page_retirement_req_cnt, 0);
3273         atomic_set(&con->poison_creation_count, 0);
3274
3275         mutex_destroy(&con->page_rsv_lock);
3276
3277         cancel_work_sync(&con->recovery_work);
3278
3279         cancel_delayed_work_sync(&con->page_retirement_dwork);
3280
3281         amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3282
3283         mutex_lock(&con->recovery_lock);
3284         con->eh_data = NULL;
3285         kfree(data->bps);
3286         kfree(data);
3287         mutex_unlock(&con->recovery_lock);
3288
3289         return 0;
3290 }
3291 /* recovery end */
3292
3293 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3294 {
3295         if (amdgpu_sriov_vf(adev)) {
3296                 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3297                 case IP_VERSION(13, 0, 2):
3298                 case IP_VERSION(13, 0, 6):
3299                 case IP_VERSION(13, 0, 14):
3300                         return true;
3301                 default:
3302                         return false;
3303                 }
3304         }
3305
3306         if (adev->asic_type == CHIP_IP_DISCOVERY) {
3307                 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3308                 case IP_VERSION(13, 0, 0):
3309                 case IP_VERSION(13, 0, 6):
3310                 case IP_VERSION(13, 0, 10):
3311                 case IP_VERSION(13, 0, 14):
3312                         return true;
3313                 default:
3314                         return false;
3315                 }
3316         }
3317
3318         return adev->asic_type == CHIP_VEGA10 ||
3319                 adev->asic_type == CHIP_VEGA20 ||
3320                 adev->asic_type == CHIP_ARCTURUS ||
3321                 adev->asic_type == CHIP_ALDEBARAN ||
3322                 adev->asic_type == CHIP_SIENNA_CICHLID;
3323 }
3324
3325 /*
3326  * this is workaround for vega20 workstation sku,
3327  * force enable gfx ras, ignore vbios gfx ras flag
3328  * due to GC EDC can not write
3329  */
3330 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3331 {
3332         struct atom_context *ctx = adev->mode_info.atom_context;
3333
3334         if (!ctx)
3335                 return;
3336
3337         if (strnstr(ctx->vbios_pn, "D16406",
3338                     sizeof(ctx->vbios_pn)) ||
3339                 strnstr(ctx->vbios_pn, "D36002",
3340                         sizeof(ctx->vbios_pn)))
3341                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3342 }
3343
3344 /* Query ras capablity via atomfirmware interface */
3345 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3346 {
3347         /* mem_ecc cap */
3348         if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3349                 dev_info(adev->dev, "MEM ECC is active.\n");
3350                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3351                                          1 << AMDGPU_RAS_BLOCK__DF);
3352         } else {
3353                 dev_info(adev->dev, "MEM ECC is not presented.\n");
3354         }
3355
3356         /* sram_ecc cap */
3357         if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3358                 dev_info(adev->dev, "SRAM ECC is active.\n");
3359                 if (!amdgpu_sriov_vf(adev))
3360                         adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3361                                                   1 << AMDGPU_RAS_BLOCK__DF);
3362                 else
3363                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3364                                                  1 << AMDGPU_RAS_BLOCK__SDMA |
3365                                                  1 << AMDGPU_RAS_BLOCK__GFX);
3366
3367                 /*
3368                  * VCN/JPEG RAS can be supported on both bare metal and
3369                  * SRIOV environment
3370                  */
3371                 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3372                     amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3373                     amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3374                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3375                                                  1 << AMDGPU_RAS_BLOCK__JPEG);
3376                 else
3377                         adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3378                                                   1 << AMDGPU_RAS_BLOCK__JPEG);
3379
3380                 /*
3381                  * XGMI RAS is not supported if xgmi num physical nodes
3382                  * is zero
3383                  */
3384                 if (!adev->gmc.xgmi.num_physical_nodes)
3385                         adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3386         } else {
3387                 dev_info(adev->dev, "SRAM ECC is not presented.\n");
3388         }
3389 }
3390
3391 /* Query poison mode from umc/df IP callbacks */
3392 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3393 {
3394         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3395         bool df_poison, umc_poison;
3396
3397         /* poison setting is useless on SRIOV guest */
3398         if (amdgpu_sriov_vf(adev) || !con)
3399                 return;
3400
3401         /* Init poison supported flag, the default value is false */
3402         if (adev->gmc.xgmi.connected_to_cpu ||
3403             adev->gmc.is_app_apu) {
3404                 /* enabled by default when GPU is connected to CPU */
3405                 con->poison_supported = true;
3406         } else if (adev->df.funcs &&
3407             adev->df.funcs->query_ras_poison_mode &&
3408             adev->umc.ras &&
3409             adev->umc.ras->query_ras_poison_mode) {
3410                 df_poison =
3411                         adev->df.funcs->query_ras_poison_mode(adev);
3412                 umc_poison =
3413                         adev->umc.ras->query_ras_poison_mode(adev);
3414
3415                 /* Only poison is set in both DF and UMC, we can support it */
3416                 if (df_poison && umc_poison)
3417                         con->poison_supported = true;
3418                 else if (df_poison != umc_poison)
3419                         dev_warn(adev->dev,
3420                                 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3421                                 df_poison, umc_poison);
3422         }
3423 }
3424
3425 /*
3426  * check hardware's ras ability which will be saved in hw_supported.
3427  * if hardware does not support ras, we can skip some ras initializtion and
3428  * forbid some ras operations from IP.
3429  * if software itself, say boot parameter, limit the ras ability. We still
3430  * need allow IP do some limited operations, like disable. In such case,
3431  * we have to initialize ras as normal. but need check if operation is
3432  * allowed or not in each function.
3433  */
3434 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3435 {
3436         adev->ras_hw_enabled = adev->ras_enabled = 0;
3437
3438         if (!amdgpu_ras_asic_supported(adev))
3439                 return;
3440
3441         /* query ras capability from psp */
3442         if (amdgpu_psp_get_ras_capability(&adev->psp))
3443                 goto init_ras_enabled_flag;
3444
3445         /* query ras capablity from bios */
3446         if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3447                 amdgpu_ras_query_ras_capablity_from_vbios(adev);
3448         } else {
3449                 /* driver only manages a few IP blocks RAS feature
3450                  * when GPU is connected cpu through XGMI */
3451                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3452                                            1 << AMDGPU_RAS_BLOCK__SDMA |
3453                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
3454         }
3455
3456         /* apply asic specific settings (vega20 only for now) */
3457         amdgpu_ras_get_quirks(adev);
3458
3459         /* query poison mode from umc/df ip callback */
3460         amdgpu_ras_query_poison_mode(adev);
3461
3462 init_ras_enabled_flag:
3463         /* hw_supported needs to be aligned with RAS block mask. */
3464         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3465
3466         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3467                 adev->ras_hw_enabled & amdgpu_ras_mask;
3468
3469         /* aca is disabled by default */
3470         adev->aca.is_enabled = false;
3471 }
3472
3473 static void amdgpu_ras_counte_dw(struct work_struct *work)
3474 {
3475         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3476                                               ras_counte_delay_work.work);
3477         struct amdgpu_device *adev = con->adev;
3478         struct drm_device *dev = adev_to_drm(adev);
3479         unsigned long ce_count, ue_count;
3480         int res;
3481
3482         res = pm_runtime_get_sync(dev->dev);
3483         if (res < 0)
3484                 goto Out;
3485
3486         /* Cache new values.
3487          */
3488         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3489                 atomic_set(&con->ras_ce_count, ce_count);
3490                 atomic_set(&con->ras_ue_count, ue_count);
3491         }
3492
3493         pm_runtime_mark_last_busy(dev->dev);
3494 Out:
3495         pm_runtime_put_autosuspend(dev->dev);
3496 }
3497
3498 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3499 {
3500         return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3501                         AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3502                         AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3503                         AMDGPU_RAS_ERROR__PARITY;
3504 }
3505
3506 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3507 {
3508         struct ras_event_state *event_state;
3509         int i;
3510
3511         memset(mgr, 0, sizeof(*mgr));
3512         atomic64_set(&mgr->seqno, 0);
3513
3514         for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3515                 event_state = &mgr->event_state[i];
3516                 event_state->last_seqno = RAS_EVENT_INVALID_ID;
3517                 atomic64_set(&event_state->count, 0);
3518         }
3519 }
3520
3521 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3522 {
3523         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3524         struct amdgpu_hive_info *hive;
3525
3526         if (!ras)
3527                 return;
3528
3529         hive = amdgpu_get_xgmi_hive(adev);
3530         ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3531
3532         /* init event manager with node 0 on xgmi system */
3533         if (!amdgpu_in_reset(adev)) {
3534                 if (!hive || adev->gmc.xgmi.node_id == 0)
3535                         ras_event_mgr_init(ras->event_mgr);
3536         }
3537
3538         if (hive)
3539                 amdgpu_put_xgmi_hive(hive);
3540 }
3541
3542 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3543 {
3544         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3545
3546         if (!con || (adev->flags & AMD_IS_APU))
3547                 return;
3548
3549         switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3550         case IP_VERSION(13, 0, 2):
3551         case IP_VERSION(13, 0, 6):
3552         case IP_VERSION(13, 0, 14):
3553                 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3554                 break;
3555         default:
3556                 break;
3557         }
3558 }
3559
3560 int amdgpu_ras_init(struct amdgpu_device *adev)
3561 {
3562         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3563         int r;
3564
3565         if (con)
3566                 return 0;
3567
3568         con = kzalloc(sizeof(*con) +
3569                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3570                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3571                         GFP_KERNEL);
3572         if (!con)
3573                 return -ENOMEM;
3574
3575         con->adev = adev;
3576         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3577         atomic_set(&con->ras_ce_count, 0);
3578         atomic_set(&con->ras_ue_count, 0);
3579
3580         con->objs = (struct ras_manager *)(con + 1);
3581
3582         amdgpu_ras_set_context(adev, con);
3583
3584         amdgpu_ras_check_supported(adev);
3585
3586         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3587                 /* set gfx block ras context feature for VEGA20 Gaming
3588                  * send ras disable cmd to ras ta during ras late init.
3589                  */
3590                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3591                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3592
3593                         return 0;
3594                 }
3595
3596                 r = 0;
3597                 goto release_con;
3598         }
3599
3600         con->update_channel_flag = false;
3601         con->features = 0;
3602         con->schema = 0;
3603         INIT_LIST_HEAD(&con->head);
3604         /* Might need get this flag from vbios. */
3605         con->flags = RAS_DEFAULT_FLAGS;
3606
3607         /* initialize nbio ras function ahead of any other
3608          * ras functions so hardware fatal error interrupt
3609          * can be enabled as early as possible */
3610         switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3611         case IP_VERSION(7, 4, 0):
3612         case IP_VERSION(7, 4, 1):
3613         case IP_VERSION(7, 4, 4):
3614                 if (!adev->gmc.xgmi.connected_to_cpu)
3615                         adev->nbio.ras = &nbio_v7_4_ras;
3616                 break;
3617         case IP_VERSION(4, 3, 0):
3618                 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3619                         /* unlike other generation of nbio ras,
3620                          * nbio v4_3 only support fatal error interrupt
3621                          * to inform software that DF is freezed due to
3622                          * system fatal error event. driver should not
3623                          * enable nbio ras in such case. Instead,
3624                          * check DF RAS */
3625                         adev->nbio.ras = &nbio_v4_3_ras;
3626                 break;
3627         case IP_VERSION(7, 9, 0):
3628                 if (!adev->gmc.is_app_apu)
3629                         adev->nbio.ras = &nbio_v7_9_ras;
3630                 break;
3631         default:
3632                 /* nbio ras is not available */
3633                 break;
3634         }
3635
3636         /* nbio ras block needs to be enabled ahead of other ras blocks
3637          * to handle fatal error */
3638         r = amdgpu_nbio_ras_sw_init(adev);
3639         if (r)
3640                 return r;
3641
3642         if (adev->nbio.ras &&
3643             adev->nbio.ras->init_ras_controller_interrupt) {
3644                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3645                 if (r)
3646                         goto release_con;
3647         }
3648
3649         if (adev->nbio.ras &&
3650             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3651                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3652                 if (r)
3653                         goto release_con;
3654         }
3655
3656         /* Packed socket_id to ras feature mask bits[31:29] */
3657         if (adev->smuio.funcs &&
3658             adev->smuio.funcs->get_socket_id)
3659                 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3660                                         AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3661
3662         /* Get RAS schema for particular SOC */
3663         con->schema = amdgpu_get_ras_schema(adev);
3664
3665         amdgpu_ras_init_reserved_vram_size(adev);
3666
3667         if (amdgpu_ras_fs_init(adev)) {
3668                 r = -EINVAL;
3669                 goto release_con;
3670         }
3671
3672         if (amdgpu_ras_aca_is_supported(adev)) {
3673                 if (amdgpu_aca_is_enabled(adev))
3674                         r = amdgpu_aca_init(adev);
3675                 else
3676                         r = amdgpu_mca_init(adev);
3677                 if (r)
3678                         goto release_con;
3679         }
3680
3681         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3682                  "hardware ability[%x] ras_mask[%x]\n",
3683                  adev->ras_hw_enabled, adev->ras_enabled);
3684
3685         return 0;
3686 release_con:
3687         amdgpu_ras_set_context(adev, NULL);
3688         kfree(con);
3689
3690         return r;
3691 }
3692
3693 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3694 {
3695         if (adev->gmc.xgmi.connected_to_cpu ||
3696             adev->gmc.is_app_apu)
3697                 return 1;
3698         return 0;
3699 }
3700
3701 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3702                                         struct ras_common_if *ras_block)
3703 {
3704         struct ras_query_if info = {
3705                 .head = *ras_block,
3706         };
3707
3708         if (!amdgpu_persistent_edc_harvesting_supported(adev))
3709                 return 0;
3710
3711         if (amdgpu_ras_query_error_status(adev, &info) != 0)
3712                 DRM_WARN("RAS init harvest failure");
3713
3714         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3715                 DRM_WARN("RAS init harvest reset failure");
3716
3717         return 0;
3718 }
3719
3720 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3721 {
3722        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3723
3724        if (!con)
3725                return false;
3726
3727        return con->poison_supported;
3728 }
3729
3730 /* helper function to handle common stuff in ip late init phase */
3731 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3732                          struct ras_common_if *ras_block)
3733 {
3734         struct amdgpu_ras_block_object *ras_obj = NULL;
3735         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3736         struct ras_query_if *query_info;
3737         unsigned long ue_count, ce_count;
3738         int r;
3739
3740         /* disable RAS feature per IP block if it is not supported */
3741         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3742                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3743                 return 0;
3744         }
3745
3746         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3747         if (r) {
3748                 if (adev->in_suspend || amdgpu_in_reset(adev)) {
3749                         /* in resume phase, if fail to enable ras,
3750                          * clean up all ras fs nodes, and disable ras */
3751                         goto cleanup;
3752                 } else
3753                         return r;
3754         }
3755
3756         /* check for errors on warm reset edc persisant supported ASIC */
3757         amdgpu_persistent_edc_harvesting(adev, ras_block);
3758
3759         /* in resume phase, no need to create ras fs node */
3760         if (adev->in_suspend || amdgpu_in_reset(adev))
3761                 return 0;
3762
3763         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3764         if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3765             (ras_obj->hw_ops->query_poison_status ||
3766             ras_obj->hw_ops->handle_poison_consumption))) {
3767                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3768                 if (r)
3769                         goto cleanup;
3770         }
3771
3772         if (ras_obj->hw_ops &&
3773             (ras_obj->hw_ops->query_ras_error_count ||
3774              ras_obj->hw_ops->query_ras_error_status)) {
3775                 r = amdgpu_ras_sysfs_create(adev, ras_block);
3776                 if (r)
3777                         goto interrupt;
3778
3779                 /* Those are the cached values at init.
3780                  */
3781                 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3782                 if (!query_info)
3783                         return -ENOMEM;
3784                 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3785
3786                 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3787                         atomic_set(&con->ras_ce_count, ce_count);
3788                         atomic_set(&con->ras_ue_count, ue_count);
3789                 }
3790
3791                 kfree(query_info);
3792         }
3793
3794         return 0;
3795
3796 interrupt:
3797         if (ras_obj->ras_cb)
3798                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3799 cleanup:
3800         amdgpu_ras_feature_enable(adev, ras_block, 0);
3801         return r;
3802 }
3803
3804 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3805                          struct ras_common_if *ras_block)
3806 {
3807         return amdgpu_ras_block_late_init(adev, ras_block);
3808 }
3809
3810 /* helper function to remove ras fs node and interrupt handler */
3811 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3812                           struct ras_common_if *ras_block)
3813 {
3814         struct amdgpu_ras_block_object *ras_obj;
3815         if (!ras_block)
3816                 return;
3817
3818         amdgpu_ras_sysfs_remove(adev, ras_block);
3819
3820         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3821         if (ras_obj->ras_cb)
3822                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3823 }
3824
3825 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3826                           struct ras_common_if *ras_block)
3827 {
3828         return amdgpu_ras_block_late_fini(adev, ras_block);
3829 }
3830
3831 /* do some init work after IP late init as dependence.
3832  * and it runs in resume/gpu reset/booting up cases.
3833  */
3834 void amdgpu_ras_resume(struct amdgpu_device *adev)
3835 {
3836         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3837         struct ras_manager *obj, *tmp;
3838
3839         if (!adev->ras_enabled || !con) {
3840                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
3841                 amdgpu_release_ras_context(adev);
3842
3843                 return;
3844         }
3845
3846         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3847                 /* Set up all other IPs which are not implemented. There is a
3848                  * tricky thing that IP's actual ras error type should be
3849                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3850                  * ERROR_NONE make sense anyway.
3851                  */
3852                 amdgpu_ras_enable_all_features(adev, 1);
3853
3854                 /* We enable ras on all hw_supported block, but as boot
3855                  * parameter might disable some of them and one or more IP has
3856                  * not implemented yet. So we disable them on behalf.
3857                  */
3858                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
3859                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3860                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
3861                                 /* there should be no any reference. */
3862                                 WARN_ON(alive_obj(obj));
3863                         }
3864                 }
3865         }
3866 }
3867
3868 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3869 {
3870         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3871
3872         if (!adev->ras_enabled || !con)
3873                 return;
3874
3875         amdgpu_ras_disable_all_features(adev, 0);
3876         /* Make sure all ras objects are disabled. */
3877         if (AMDGPU_RAS_GET_FEATURES(con->features))
3878                 amdgpu_ras_disable_all_features(adev, 1);
3879 }
3880
3881 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3882 {
3883         struct amdgpu_ras_block_list *node, *tmp;
3884         struct amdgpu_ras_block_object *obj;
3885         int r;
3886
3887         amdgpu_ras_event_mgr_init(adev);
3888
3889         if (amdgpu_ras_aca_is_supported(adev)) {
3890                 if (amdgpu_in_reset(adev)) {
3891                         if (amdgpu_aca_is_enabled(adev))
3892                                 r = amdgpu_aca_reset(adev);
3893                         else
3894                                 r = amdgpu_mca_reset(adev);
3895                         if (r)
3896                                 return r;
3897                 }
3898
3899                 if (!amdgpu_sriov_vf(adev)) {
3900                         if (amdgpu_aca_is_enabled(adev))
3901                                 amdgpu_ras_set_aca_debug_mode(adev, false);
3902                         else
3903                                 amdgpu_ras_set_mca_debug_mode(adev, false);
3904                 }
3905         }
3906
3907         /* Guest side doesn't need init ras feature */
3908         if (amdgpu_sriov_vf(adev))
3909                 return 0;
3910
3911         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3912                 obj = node->ras_obj;
3913                 if (!obj) {
3914                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3915                         continue;
3916                 }
3917
3918                 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3919                         continue;
3920
3921                 if (obj->ras_late_init) {
3922                         r = obj->ras_late_init(adev, &obj->ras_comm);
3923                         if (r) {
3924                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3925                                         obj->ras_comm.name, r);
3926                                 return r;
3927                         }
3928                 } else
3929                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3930         }
3931
3932         return 0;
3933 }
3934
3935 /* do some fini work before IP fini as dependence */
3936 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3937 {
3938         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3939
3940         if (!adev->ras_enabled || !con)
3941                 return 0;
3942
3943
3944         /* Need disable ras on all IPs here before ip [hw/sw]fini */
3945         if (AMDGPU_RAS_GET_FEATURES(con->features))
3946                 amdgpu_ras_disable_all_features(adev, 0);
3947         amdgpu_ras_recovery_fini(adev);
3948         return 0;
3949 }
3950
3951 int amdgpu_ras_fini(struct amdgpu_device *adev)
3952 {
3953         struct amdgpu_ras_block_list *ras_node, *tmp;
3954         struct amdgpu_ras_block_object *obj = NULL;
3955         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3956
3957         if (!adev->ras_enabled || !con)
3958                 return 0;
3959
3960         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3961                 if (ras_node->ras_obj) {
3962                         obj = ras_node->ras_obj;
3963                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3964                             obj->ras_fini)
3965                                 obj->ras_fini(adev, &obj->ras_comm);
3966                         else
3967                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3968                 }
3969
3970                 /* Clear ras blocks from ras_list and free ras block list node */
3971                 list_del(&ras_node->node);
3972                 kfree(ras_node);
3973         }
3974
3975         amdgpu_ras_fs_fini(adev);
3976         amdgpu_ras_interrupt_remove_all(adev);
3977
3978         if (amdgpu_ras_aca_is_supported(adev)) {
3979                 if (amdgpu_aca_is_enabled(adev))
3980                         amdgpu_aca_fini(adev);
3981                 else
3982                         amdgpu_mca_fini(adev);
3983         }
3984
3985         WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
3986
3987         if (AMDGPU_RAS_GET_FEATURES(con->features))
3988                 amdgpu_ras_disable_all_features(adev, 0);
3989
3990         cancel_delayed_work_sync(&con->ras_counte_delay_work);
3991
3992         amdgpu_ras_set_context(adev, NULL);
3993         kfree(con);
3994
3995         return 0;
3996 }
3997
3998 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
3999 {
4000         struct amdgpu_ras *ras;
4001
4002         ras = amdgpu_ras_get_context(adev);
4003         if (!ras)
4004                 return false;
4005
4006         return atomic_read(&ras->fed);
4007 }
4008
4009 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4010 {
4011         struct amdgpu_ras *ras;
4012
4013         ras = amdgpu_ras_get_context(adev);
4014         if (ras)
4015                 atomic_set(&ras->fed, !!status);
4016 }
4017
4018 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4019 {
4020         struct amdgpu_ras *ras;
4021
4022         ras = amdgpu_ras_get_context(adev);
4023         if (!ras)
4024                 return NULL;
4025
4026         return ras->event_mgr;
4027 }
4028
4029 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4030                                      const void *caller)
4031 {
4032         struct ras_event_manager *event_mgr;
4033         struct ras_event_state *event_state;
4034         int ret = 0;
4035
4036         if (type >= RAS_EVENT_TYPE_COUNT) {
4037                 ret = -EINVAL;
4038                 goto out;
4039         }
4040
4041         event_mgr = __get_ras_event_mgr(adev);
4042         if (!event_mgr) {
4043                 ret = -EINVAL;
4044                 goto out;
4045         }
4046
4047         event_state = &event_mgr->event_state[type];
4048         event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4049         atomic64_inc(&event_state->count);
4050
4051 out:
4052         if (ret && caller)
4053                 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4054                          (int)type, caller, ret);
4055
4056         return ret;
4057 }
4058
4059 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4060 {
4061         struct ras_event_manager *event_mgr;
4062         u64 id;
4063
4064         if (type >= RAS_EVENT_TYPE_COUNT)
4065                 return RAS_EVENT_INVALID_ID;
4066
4067         switch (type) {
4068         case RAS_EVENT_TYPE_FATAL:
4069         case RAS_EVENT_TYPE_POISON_CREATION:
4070         case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4071                 event_mgr = __get_ras_event_mgr(adev);
4072                 if (!event_mgr)
4073                         return RAS_EVENT_INVALID_ID;
4074
4075                 id = event_mgr->event_state[type].last_seqno;
4076                 break;
4077         case RAS_EVENT_TYPE_INVALID:
4078         default:
4079                 id = RAS_EVENT_INVALID_ID;
4080                 break;
4081         }
4082
4083         return id;
4084 }
4085
4086 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4087 {
4088         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4089                 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4090                 enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4091                 u64 event_id;
4092
4093                 if (amdgpu_ras_mark_ras_event(adev, type))
4094                         return;
4095
4096                 event_id = amdgpu_ras_acquire_event_id(adev, type);
4097
4098                 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4099                               "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4100
4101                 amdgpu_ras_set_fed(adev, true);
4102                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4103                 amdgpu_ras_reset_gpu(adev);
4104         }
4105 }
4106
4107 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4108 {
4109         if (adev->asic_type == CHIP_VEGA20 &&
4110             adev->pm.fw_version <= 0x283400) {
4111                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4112                                 amdgpu_ras_intr_triggered();
4113         }
4114
4115         return false;
4116 }
4117
4118 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4119 {
4120         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4121
4122         if (!con)
4123                 return;
4124
4125         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4126                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4127                 amdgpu_ras_set_context(adev, NULL);
4128                 kfree(con);
4129         }
4130 }
4131
4132 #ifdef CONFIG_X86_MCE_AMD
4133 static struct amdgpu_device *find_adev(uint32_t node_id)
4134 {
4135         int i;
4136         struct amdgpu_device *adev = NULL;
4137
4138         for (i = 0; i < mce_adev_list.num_gpu; i++) {
4139                 adev = mce_adev_list.devs[i];
4140
4141                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
4142                     adev->gmc.xgmi.physical_node_id == node_id)
4143                         break;
4144                 adev = NULL;
4145         }
4146
4147         return adev;
4148 }
4149
4150 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
4151 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
4152 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4153 #define GPU_ID_OFFSET           8
4154
4155 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4156                                     unsigned long val, void *data)
4157 {
4158         struct mce *m = (struct mce *)data;
4159         struct amdgpu_device *adev = NULL;
4160         uint32_t gpu_id = 0;
4161         uint32_t umc_inst = 0, ch_inst = 0;
4162
4163         /*
4164          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4165          * and error occurred in DramECC (Extended error code = 0) then only
4166          * process the error, else bail out.
4167          */
4168         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4169                     (XEC(m->status, 0x3f) == 0x0)))
4170                 return NOTIFY_DONE;
4171
4172         /*
4173          * If it is correctable error, return.
4174          */
4175         if (mce_is_correctable(m))
4176                 return NOTIFY_OK;
4177
4178         /*
4179          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4180          */
4181         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4182
4183         adev = find_adev(gpu_id);
4184         if (!adev) {
4185                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4186                                                                 gpu_id);
4187                 return NOTIFY_DONE;
4188         }
4189
4190         /*
4191          * If it is uncorrectable error, then find out UMC instance and
4192          * channel index.
4193          */
4194         umc_inst = GET_UMC_INST(m->ipid);
4195         ch_inst = GET_CHAN_INDEX(m->ipid);
4196
4197         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4198                              umc_inst, ch_inst);
4199
4200         if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4201                 return NOTIFY_OK;
4202         else
4203                 return NOTIFY_DONE;
4204 }
4205
4206 static struct notifier_block amdgpu_bad_page_nb = {
4207         .notifier_call  = amdgpu_bad_page_notifier,
4208         .priority       = MCE_PRIO_UC,
4209 };
4210
4211 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4212 {
4213         /*
4214          * Add the adev to the mce_adev_list.
4215          * During mode2 reset, amdgpu device is temporarily
4216          * removed from the mgpu_info list which can cause
4217          * page retirement to fail.
4218          * Use this list instead of mgpu_info to find the amdgpu
4219          * device on which the UMC error was reported.
4220          */
4221         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4222
4223         /*
4224          * Register the x86 notifier only once
4225          * with MCE subsystem.
4226          */
4227         if (notifier_registered == false) {
4228                 mce_register_decode_chain(&amdgpu_bad_page_nb);
4229                 notifier_registered = true;
4230         }
4231 }
4232 #endif
4233
4234 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4235 {
4236         if (!adev)
4237                 return NULL;
4238
4239         return adev->psp.ras_context.ras;
4240 }
4241
4242 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4243 {
4244         if (!adev)
4245                 return -EINVAL;
4246
4247         adev->psp.ras_context.ras = ras_con;
4248         return 0;
4249 }
4250
4251 /* check if ras is supported on block, say, sdma, gfx */
4252 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4253                 unsigned int block)
4254 {
4255         int ret = 0;
4256         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4257
4258         if (block >= AMDGPU_RAS_BLOCK_COUNT)
4259                 return 0;
4260
4261         ret = ras && (adev->ras_enabled & (1 << block));
4262
4263         /* For the special asic with mem ecc enabled but sram ecc
4264          * not enabled, even if the ras block is not supported on
4265          * .ras_enabled, if the asic supports poison mode and the
4266          * ras block has ras configuration, it can be considered
4267          * that the ras block supports ras function.
4268          */
4269         if (!ret &&
4270             (block == AMDGPU_RAS_BLOCK__GFX ||
4271              block == AMDGPU_RAS_BLOCK__SDMA ||
4272              block == AMDGPU_RAS_BLOCK__VCN ||
4273              block == AMDGPU_RAS_BLOCK__JPEG) &&
4274                 (amdgpu_ras_mask & (1 << block)) &&
4275             amdgpu_ras_is_poison_mode_supported(adev) &&
4276             amdgpu_ras_get_ras_block(adev, block, 0))
4277                 ret = 1;
4278
4279         return ret;
4280 }
4281
4282 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4283 {
4284         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4285
4286         /* mode1 is the only selection for RMA status */
4287         if (amdgpu_ras_is_rma(adev)) {
4288                 ras->gpu_reset_flags = 0;
4289                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4290         }
4291
4292         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
4293                 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4294         return 0;
4295 }
4296
4297 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4298 {
4299         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4300         int ret = 0;
4301
4302         if (con) {
4303                 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4304                 if (!ret)
4305                         con->is_aca_debug_mode = enable;
4306         }
4307
4308         return ret;
4309 }
4310
4311 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4312 {
4313         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4314         int ret = 0;
4315
4316         if (con) {
4317                 if (amdgpu_aca_is_enabled(adev))
4318                         ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4319                 else
4320                         ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4321                 if (!ret)
4322                         con->is_aca_debug_mode = enable;
4323         }
4324
4325         return ret;
4326 }
4327
4328 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4329 {
4330         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4331         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4332         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4333
4334         if (!con)
4335                 return false;
4336
4337         if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4338             (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4339                 return con->is_aca_debug_mode;
4340         else
4341                 return true;
4342 }
4343
4344 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4345                                      unsigned int *error_query_mode)
4346 {
4347         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4348         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4349         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4350
4351         if (!con) {
4352                 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4353                 return false;
4354         }
4355
4356         if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
4357                 *error_query_mode =
4358                         (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4359         else
4360                 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4361
4362         return true;
4363 }
4364
4365 /* Register each ip ras block into amdgpu ras */
4366 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4367                 struct amdgpu_ras_block_object *ras_block_obj)
4368 {
4369         struct amdgpu_ras_block_list *ras_node;
4370         if (!adev || !ras_block_obj)
4371                 return -EINVAL;
4372
4373         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4374         if (!ras_node)
4375                 return -ENOMEM;
4376
4377         INIT_LIST_HEAD(&ras_node->node);
4378         ras_node->ras_obj = ras_block_obj;
4379         list_add_tail(&ras_node->node, &adev->ras_list);
4380
4381         return 0;
4382 }
4383
4384 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4385 {
4386         if (!err_type_name)
4387                 return;
4388
4389         switch (err_type) {
4390         case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4391                 sprintf(err_type_name, "correctable");
4392                 break;
4393         case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4394                 sprintf(err_type_name, "uncorrectable");
4395                 break;
4396         default:
4397                 sprintf(err_type_name, "unknown");
4398                 break;
4399         }
4400 }
4401
4402 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4403                                          const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4404                                          uint32_t instance,
4405                                          uint32_t *memory_id)
4406 {
4407         uint32_t err_status_lo_data, err_status_lo_offset;
4408
4409         if (!reg_entry)
4410                 return false;
4411
4412         err_status_lo_offset =
4413                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4414                                             reg_entry->seg_lo, reg_entry->reg_lo);
4415         err_status_lo_data = RREG32(err_status_lo_offset);
4416
4417         if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4418             !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4419                 return false;
4420
4421         *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4422
4423         return true;
4424 }
4425
4426 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4427                                        const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4428                                        uint32_t instance,
4429                                        unsigned long *err_cnt)
4430 {
4431         uint32_t err_status_hi_data, err_status_hi_offset;
4432
4433         if (!reg_entry)
4434                 return false;
4435
4436         err_status_hi_offset =
4437                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4438                                             reg_entry->seg_hi, reg_entry->reg_hi);
4439         err_status_hi_data = RREG32(err_status_hi_offset);
4440
4441         if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4442             !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4443                 /* keep the check here in case we need to refer to the result later */
4444                 dev_dbg(adev->dev, "Invalid err_info field\n");
4445
4446         /* read err count */
4447         *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4448
4449         return true;
4450 }
4451
4452 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4453                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
4454                                            uint32_t reg_list_size,
4455                                            const struct amdgpu_ras_memory_id_entry *mem_list,
4456                                            uint32_t mem_list_size,
4457                                            uint32_t instance,
4458                                            uint32_t err_type,
4459                                            unsigned long *err_count)
4460 {
4461         uint32_t memory_id;
4462         unsigned long err_cnt;
4463         char err_type_name[16];
4464         uint32_t i, j;
4465
4466         for (i = 0; i < reg_list_size; i++) {
4467                 /* query memory_id from err_status_lo */
4468                 if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4469                                                          instance, &memory_id))
4470                         continue;
4471
4472                 /* query err_cnt from err_status_hi */
4473                 if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4474                                                        instance, &err_cnt) ||
4475                     !err_cnt)
4476                         continue;
4477
4478                 *err_count += err_cnt;
4479
4480                 /* log the errors */
4481                 amdgpu_ras_get_error_type_name(err_type, err_type_name);
4482                 if (!mem_list) {
4483                         /* memory_list is not supported */
4484                         dev_info(adev->dev,
4485                                  "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4486                                  err_cnt, err_type_name,
4487                                  reg_list[i].block_name,
4488                                  instance, memory_id);
4489                 } else {
4490                         for (j = 0; j < mem_list_size; j++) {
4491                                 if (memory_id == mem_list[j].memory_id) {
4492                                         dev_info(adev->dev,
4493                                                  "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4494                                                  err_cnt, err_type_name,
4495                                                  reg_list[i].block_name,
4496                                                  instance, mem_list[j].name);
4497                                         break;
4498                                 }
4499                         }
4500                 }
4501         }
4502 }
4503
4504 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4505                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
4506                                            uint32_t reg_list_size,
4507                                            uint32_t instance)
4508 {
4509         uint32_t err_status_lo_offset, err_status_hi_offset;
4510         uint32_t i;
4511
4512         for (i = 0; i < reg_list_size; i++) {
4513                 err_status_lo_offset =
4514                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4515                                                     reg_list[i].seg_lo, reg_list[i].reg_lo);
4516                 err_status_hi_offset =
4517                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4518                                                     reg_list[i].seg_hi, reg_list[i].reg_hi);
4519                 WREG32(err_status_lo_offset, 0);
4520                 WREG32(err_status_hi_offset, 0);
4521         }
4522 }
4523
4524 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4525 {
4526         memset(err_data, 0, sizeof(*err_data));
4527
4528         INIT_LIST_HEAD(&err_data->err_node_list);
4529
4530         return 0;
4531 }
4532
4533 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4534 {
4535         if (!err_node)
4536                 return;
4537
4538         list_del(&err_node->node);
4539         kvfree(err_node);
4540 }
4541
4542 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4543 {
4544         struct ras_err_node *err_node, *tmp;
4545
4546         list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4547                 amdgpu_ras_error_node_release(err_node);
4548 }
4549
4550 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4551                                                              struct amdgpu_smuio_mcm_config_info *mcm_info)
4552 {
4553         struct ras_err_node *err_node;
4554         struct amdgpu_smuio_mcm_config_info *ref_id;
4555
4556         if (!err_data || !mcm_info)
4557                 return NULL;
4558
4559         for_each_ras_error(err_node, err_data) {
4560                 ref_id = &err_node->err_info.mcm_info;
4561
4562                 if (mcm_info->socket_id == ref_id->socket_id &&
4563                     mcm_info->die_id == ref_id->die_id)
4564                         return err_node;
4565         }
4566
4567         return NULL;
4568 }
4569
4570 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4571 {
4572         struct ras_err_node *err_node;
4573
4574         err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4575         if (!err_node)
4576                 return NULL;
4577
4578         INIT_LIST_HEAD(&err_node->node);
4579
4580         return err_node;
4581 }
4582
4583 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4584 {
4585         struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4586         struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4587         struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4588         struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4589
4590         if (unlikely(infoa->socket_id != infob->socket_id))
4591                 return infoa->socket_id - infob->socket_id;
4592         else
4593                 return infoa->die_id - infob->die_id;
4594
4595         return 0;
4596 }
4597
4598 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4599                                 struct amdgpu_smuio_mcm_config_info *mcm_info)
4600 {
4601         struct ras_err_node *err_node;
4602
4603         err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4604         if (err_node)
4605                 return &err_node->err_info;
4606
4607         err_node = amdgpu_ras_error_node_new();
4608         if (!err_node)
4609                 return NULL;
4610
4611         memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4612
4613         err_data->err_list_count++;
4614         list_add_tail(&err_node->node, &err_data->err_node_list);
4615         list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4616
4617         return &err_node->err_info;
4618 }
4619
4620 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4621                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4622                                         u64 count)
4623 {
4624         struct ras_err_info *err_info;
4625
4626         if (!err_data || !mcm_info)
4627                 return -EINVAL;
4628
4629         if (!count)
4630                 return 0;
4631
4632         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4633         if (!err_info)
4634                 return -EINVAL;
4635
4636         err_info->ue_count += count;
4637         err_data->ue_count += count;
4638
4639         return 0;
4640 }
4641
4642 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4643                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4644                                         u64 count)
4645 {
4646         struct ras_err_info *err_info;
4647
4648         if (!err_data || !mcm_info)
4649                 return -EINVAL;
4650
4651         if (!count)
4652                 return 0;
4653
4654         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4655         if (!err_info)
4656                 return -EINVAL;
4657
4658         err_info->ce_count += count;
4659         err_data->ce_count += count;
4660
4661         return 0;
4662 }
4663
4664 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4665                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4666                                         u64 count)
4667 {
4668         struct ras_err_info *err_info;
4669
4670         if (!err_data || !mcm_info)
4671                 return -EINVAL;
4672
4673         if (!count)
4674                 return 0;
4675
4676         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4677         if (!err_info)
4678                 return -EINVAL;
4679
4680         err_info->de_count += count;
4681         err_data->de_count += count;
4682
4683         return 0;
4684 }
4685
4686 #define mmMP0_SMN_C2PMSG_92     0x1609C
4687 #define mmMP0_SMN_C2PMSG_126    0x160BE
4688 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4689                                                  u32 instance)
4690 {
4691         u32 socket_id, aid_id, hbm_id;
4692         u32 fw_status;
4693         u32 boot_error;
4694         u64 reg_addr;
4695
4696         /* The pattern for smn addressing in other SOC could be different from
4697          * the one for aqua_vanjaram. We should revisit the code if the pattern
4698          * is changed. In such case, replace the aqua_vanjaram implementation
4699          * with more common helper */
4700         reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4701                    aqua_vanjaram_encode_ext_smn_addressing(instance);
4702         fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4703
4704         reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4705                    aqua_vanjaram_encode_ext_smn_addressing(instance);
4706         boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4707
4708         socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4709         aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4710         hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
4711
4712         if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4713                 dev_info(adev->dev,
4714                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
4715                          socket_id, aid_id, hbm_id, fw_status);
4716
4717         if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4718                 dev_info(adev->dev,
4719                          "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
4720                          socket_id, aid_id, fw_status);
4721
4722         if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4723                 dev_info(adev->dev,
4724                          "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
4725                          socket_id, aid_id, fw_status);
4726
4727         if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4728                 dev_info(adev->dev,
4729                          "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
4730                          socket_id, aid_id, fw_status);
4731
4732         if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4733                 dev_info(adev->dev,
4734                          "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
4735                          socket_id, aid_id, fw_status);
4736
4737         if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4738                 dev_info(adev->dev,
4739                          "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
4740                          socket_id, aid_id, fw_status);
4741
4742         if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4743                 dev_info(adev->dev,
4744                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
4745                          socket_id, aid_id, hbm_id, fw_status);
4746
4747         if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4748                 dev_info(adev->dev,
4749                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
4750                          socket_id, aid_id, hbm_id, fw_status);
4751
4752         if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
4753                 dev_info(adev->dev,
4754                          "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
4755                          socket_id, aid_id, fw_status);
4756
4757         if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
4758                 dev_info(adev->dev,
4759                          "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
4760                          socket_id, aid_id, fw_status);
4761 }
4762
4763 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
4764                                            u32 instance)
4765 {
4766         u64 reg_addr;
4767         u32 reg_data;
4768         int retry_loop;
4769
4770         reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4771                    aqua_vanjaram_encode_ext_smn_addressing(instance);
4772
4773         for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4774                 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4775                 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
4776                         return false;
4777                 else
4778                         msleep(1);
4779         }
4780
4781         return true;
4782 }
4783
4784 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4785 {
4786         u32 i;
4787
4788         for (i = 0; i < num_instances; i++) {
4789                 if (amdgpu_ras_boot_error_detected(adev, i))
4790                         amdgpu_ras_boot_time_error_reporting(adev, i);
4791         }
4792 }
4793
4794 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
4795 {
4796         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4797         struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
4798         uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
4799         int ret = 0;
4800
4801         mutex_lock(&con->page_rsv_lock);
4802         ret = amdgpu_vram_mgr_query_page_status(mgr, start);
4803         if (ret == -ENOENT)
4804                 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
4805         mutex_unlock(&con->page_rsv_lock);
4806
4807         return ret;
4808 }
4809
4810 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
4811                                 const char *fmt, ...)
4812 {
4813         struct va_format vaf;
4814         va_list args;
4815
4816         va_start(args, fmt);
4817         vaf.fmt = fmt;
4818         vaf.va = &args;
4819
4820         if (RAS_EVENT_ID_IS_VALID(event_id))
4821                 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
4822         else
4823                 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
4824
4825         va_end(args);
4826 }
4827
4828 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
4829 {
4830         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4831
4832         if (!con)
4833                 return false;
4834
4835         return con->is_rma;
4836 }
This page took 0.328611 seconds and 4 git commands to generate.