]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
Merge tag 'slab-for-6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka...
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
43
44 #ifdef CONFIG_X86_MCE_AMD
45 #include <asm/mce.h>
46
47 static bool notifier_registered;
48 #endif
49 static const char *RAS_FS_NAME = "ras";
50
51 const char *ras_error_string[] = {
52         "none",
53         "parity",
54         "single_correctable",
55         "multi_uncorrectable",
56         "poison",
57 };
58
59 const char *ras_block_string[] = {
60         "umc",
61         "sdma",
62         "gfx",
63         "mmhub",
64         "athub",
65         "pcie_bif",
66         "hdp",
67         "xgmi_wafl",
68         "df",
69         "smn",
70         "sem",
71         "mp0",
72         "mp1",
73         "fuse",
74         "mca",
75         "vcn",
76         "jpeg",
77         "ih",
78         "mpio",
79 };
80
81 const char *ras_mca_block_string[] = {
82         "mca_mp0",
83         "mca_mp1",
84         "mca_mpio",
85         "mca_iohc",
86 };
87
88 struct amdgpu_ras_block_list {
89         /* ras block link */
90         struct list_head node;
91
92         struct amdgpu_ras_block_object *ras_obj;
93 };
94
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
96 {
97         if (!ras_block)
98                 return "NULL";
99
100         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101             ras_block->block >= ARRAY_SIZE(ras_block_string))
102                 return "OUT OF RANGE";
103
104         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105                 return ras_mca_block_string[ras_block->sub_block_index];
106
107         return ras_block_string[ras_block->block];
108 }
109
110 #define ras_block_str(_BLOCK_) \
111         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
114
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116
117 /* inject address is 52 bits */
118 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
119
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
122
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
124
125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
126
127 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
128
129 enum amdgpu_ras_retire_page_reservation {
130         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
131         AMDGPU_RAS_RETIRE_PAGE_PENDING,
132         AMDGPU_RAS_RETIRE_PAGE_FAULT,
133 };
134
135 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
136
137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
138                                 uint64_t addr);
139 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
140                                 uint64_t addr);
141 #ifdef CONFIG_X86_MCE_AMD
142 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
143 struct mce_notifier_adev_list {
144         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
145         int num_gpu;
146 };
147 static struct mce_notifier_adev_list mce_adev_list;
148 #endif
149
150 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
151 {
152         if (adev && amdgpu_ras_get_context(adev))
153                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
154 }
155
156 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
157 {
158         if (adev && amdgpu_ras_get_context(adev))
159                 return amdgpu_ras_get_context(adev)->error_query_ready;
160
161         return false;
162 }
163
164 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
165 {
166         struct ras_err_data err_data;
167         struct eeprom_table_record err_rec;
168         int ret;
169
170         if ((address >= adev->gmc.mc_vram_size) ||
171             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
172                 dev_warn(adev->dev,
173                          "RAS WARN: input address 0x%llx is invalid.\n",
174                          address);
175                 return -EINVAL;
176         }
177
178         if (amdgpu_ras_check_bad_page(adev, address)) {
179                 dev_warn(adev->dev,
180                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
181                          address);
182                 return 0;
183         }
184
185         ret = amdgpu_ras_error_data_init(&err_data);
186         if (ret)
187                 return ret;
188
189         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
190         err_data.err_addr = &err_rec;
191         amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
192
193         if (amdgpu_bad_page_threshold != 0) {
194                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
195                                          err_data.err_addr_cnt);
196                 amdgpu_ras_save_bad_pages(adev, NULL);
197         }
198
199         amdgpu_ras_error_data_fini(&err_data);
200
201         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
202         dev_warn(adev->dev, "Clear EEPROM:\n");
203         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
204
205         return 0;
206 }
207
208 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
209                                         size_t size, loff_t *pos)
210 {
211         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
212         struct ras_query_if info = {
213                 .head = obj->head,
214         };
215         ssize_t s;
216         char val[128];
217
218         if (amdgpu_ras_query_error_status(obj->adev, &info))
219                 return -EINVAL;
220
221         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
222         if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
223             amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
224                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
225                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
226         }
227
228         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
229                         "ue", info.ue_count,
230                         "ce", info.ce_count);
231         if (*pos >= s)
232                 return 0;
233
234         s -= *pos;
235         s = min_t(u64, s, size);
236
237
238         if (copy_to_user(buf, &val[*pos], s))
239                 return -EINVAL;
240
241         *pos += s;
242
243         return s;
244 }
245
246 static const struct file_operations amdgpu_ras_debugfs_ops = {
247         .owner = THIS_MODULE,
248         .read = amdgpu_ras_debugfs_read,
249         .write = NULL,
250         .llseek = default_llseek
251 };
252
253 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
254 {
255         int i;
256
257         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
258                 *block_id = i;
259                 if (strcmp(name, ras_block_string[i]) == 0)
260                         return 0;
261         }
262         return -EINVAL;
263 }
264
265 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
266                 const char __user *buf, size_t size,
267                 loff_t *pos, struct ras_debug_if *data)
268 {
269         ssize_t s = min_t(u64, 64, size);
270         char str[65];
271         char block_name[33];
272         char err[9] = "ue";
273         int op = -1;
274         int block_id;
275         uint32_t sub_block;
276         u64 address, value;
277         /* default value is 0 if the mask is not set by user */
278         u32 instance_mask = 0;
279
280         if (*pos)
281                 return -EINVAL;
282         *pos = size;
283
284         memset(str, 0, sizeof(str));
285         memset(data, 0, sizeof(*data));
286
287         if (copy_from_user(str, buf, s))
288                 return -EINVAL;
289
290         if (sscanf(str, "disable %32s", block_name) == 1)
291                 op = 0;
292         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
293                 op = 1;
294         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
295                 op = 2;
296         else if (strstr(str, "retire_page") != NULL)
297                 op = 3;
298         else if (str[0] && str[1] && str[2] && str[3])
299                 /* ascii string, but commands are not matched. */
300                 return -EINVAL;
301
302         if (op != -1) {
303                 if (op == 3) {
304                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
305                             sscanf(str, "%*s %llu", &address) != 1)
306                                 return -EINVAL;
307
308                         data->op = op;
309                         data->inject.address = address;
310
311                         return 0;
312                 }
313
314                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
315                         return -EINVAL;
316
317                 data->head.block = block_id;
318                 /* only ue, ce and poison errors are supported */
319                 if (!memcmp("ue", err, 2))
320                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
321                 else if (!memcmp("ce", err, 2))
322                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
323                 else if (!memcmp("poison", err, 6))
324                         data->head.type = AMDGPU_RAS_ERROR__POISON;
325                 else
326                         return -EINVAL;
327
328                 data->op = op;
329
330                 if (op == 2) {
331                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
332                                    &sub_block, &address, &value, &instance_mask) != 4 &&
333                             sscanf(str, "%*s %*s %*s %u %llu %llu %u",
334                                    &sub_block, &address, &value, &instance_mask) != 4 &&
335                                 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
336                                    &sub_block, &address, &value) != 3 &&
337                             sscanf(str, "%*s %*s %*s %u %llu %llu",
338                                    &sub_block, &address, &value) != 3)
339                                 return -EINVAL;
340                         data->head.sub_block_index = sub_block;
341                         data->inject.address = address;
342                         data->inject.value = value;
343                         data->inject.instance_mask = instance_mask;
344                 }
345         } else {
346                 if (size < sizeof(*data))
347                         return -EINVAL;
348
349                 if (copy_from_user(data, buf, sizeof(*data)))
350                         return -EINVAL;
351         }
352
353         return 0;
354 }
355
356 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
357                                 struct ras_debug_if *data)
358 {
359         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
360         uint32_t mask, inst_mask = data->inject.instance_mask;
361
362         /* no need to set instance mask if there is only one instance */
363         if (num_xcc <= 1 && inst_mask) {
364                 data->inject.instance_mask = 0;
365                 dev_dbg(adev->dev,
366                         "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
367                         inst_mask);
368
369                 return;
370         }
371
372         switch (data->head.block) {
373         case AMDGPU_RAS_BLOCK__GFX:
374                 mask = GENMASK(num_xcc - 1, 0);
375                 break;
376         case AMDGPU_RAS_BLOCK__SDMA:
377                 mask = GENMASK(adev->sdma.num_instances - 1, 0);
378                 break;
379         case AMDGPU_RAS_BLOCK__VCN:
380         case AMDGPU_RAS_BLOCK__JPEG:
381                 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
382                 break;
383         default:
384                 mask = inst_mask;
385                 break;
386         }
387
388         /* remove invalid bits in instance mask */
389         data->inject.instance_mask &= mask;
390         if (inst_mask != data->inject.instance_mask)
391                 dev_dbg(adev->dev,
392                         "Adjust RAS inject mask 0x%x to 0x%x\n",
393                         inst_mask, data->inject.instance_mask);
394 }
395
396 /**
397  * DOC: AMDGPU RAS debugfs control interface
398  *
399  * The control interface accepts struct ras_debug_if which has two members.
400  *
401  * First member: ras_debug_if::head or ras_debug_if::inject.
402  *
403  * head is used to indicate which IP block will be under control.
404  *
405  * head has four members, they are block, type, sub_block_index, name.
406  * block: which IP will be under control.
407  * type: what kind of error will be enabled/disabled/injected.
408  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
409  * name: the name of IP.
410  *
411  * inject has three more members than head, they are address, value and mask.
412  * As their names indicate, inject operation will write the
413  * value to the address.
414  *
415  * The second member: struct ras_debug_if::op.
416  * It has three kinds of operations.
417  *
418  * - 0: disable RAS on the block. Take ::head as its data.
419  * - 1: enable RAS on the block. Take ::head as its data.
420  * - 2: inject errors on the block. Take ::inject as its data.
421  *
422  * How to use the interface?
423  *
424  * In a program
425  *
426  * Copy the struct ras_debug_if in your code and initialize it.
427  * Write the struct to the control interface.
428  *
429  * From shell
430  *
431  * .. code-block:: bash
432  *
433  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
434  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
435  *      echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *
437  * Where N, is the card which you want to affect.
438  *
439  * "disable" requires only the block.
440  * "enable" requires the block and error type.
441  * "inject" requires the block, error type, address, and value.
442  *
443  * The block is one of: umc, sdma, gfx, etc.
444  *      see ras_block_string[] for details
445  *
446  * The error type is one of: ue, ce and poison where,
447  *      ue is multi-uncorrectable
448  *      ce is single-correctable
449  *      poison is poison
450  *
451  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
452  * The address and value are hexadecimal numbers, leading 0x is optional.
453  * The mask means instance mask, is optional, default value is 0x1.
454  *
455  * For instance,
456  *
457  * .. code-block:: bash
458  *
459  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
460  *      echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
461  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *
463  * How to check the result of the operation?
464  *
465  * To check disable/enable, see "ras" features at,
466  * /sys/class/drm/card[0/1/2...]/device/ras/features
467  *
468  * To check inject, see the corresponding error count at,
469  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
470  *
471  * .. note::
472  *      Operations are only allowed on blocks which are supported.
473  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
474  *      to see which blocks support RAS on a particular asic.
475  *
476  */
477 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
478                                              const char __user *buf,
479                                              size_t size, loff_t *pos)
480 {
481         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
482         struct ras_debug_if data;
483         int ret = 0;
484
485         if (!amdgpu_ras_get_error_query_ready(adev)) {
486                 dev_warn(adev->dev, "RAS WARN: error injection "
487                                 "currently inaccessible\n");
488                 return size;
489         }
490
491         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
492         if (ret)
493                 return ret;
494
495         if (data.op == 3) {
496                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
497                 if (!ret)
498                         return size;
499                 else
500                         return ret;
501         }
502
503         if (!amdgpu_ras_is_supported(adev, data.head.block))
504                 return -EINVAL;
505
506         switch (data.op) {
507         case 0:
508                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
509                 break;
510         case 1:
511                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
512                 break;
513         case 2:
514                 if ((data.inject.address >= adev->gmc.mc_vram_size &&
515                     adev->gmc.mc_vram_size) ||
516                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
517                         dev_warn(adev->dev, "RAS WARN: input address "
518                                         "0x%llx is invalid.",
519                                         data.inject.address);
520                         ret = -EINVAL;
521                         break;
522                 }
523
524                 /* umc ce/ue error injection for a bad page is not allowed */
525                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
526                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
527                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
528                                  "already been marked as bad!\n",
529                                  data.inject.address);
530                         break;
531                 }
532
533                 amdgpu_ras_instance_mask_check(adev, &data);
534
535                 /* data.inject.address is offset instead of absolute gpu address */
536                 ret = amdgpu_ras_error_inject(adev, &data.inject);
537                 break;
538         default:
539                 ret = -EINVAL;
540                 break;
541         }
542
543         if (ret)
544                 return ret;
545
546         return size;
547 }
548
549 /**
550  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
551  *
552  * Some boards contain an EEPROM which is used to persistently store a list of
553  * bad pages which experiences ECC errors in vram.  This interface provides
554  * a way to reset the EEPROM, e.g., after testing error injection.
555  *
556  * Usage:
557  *
558  * .. code-block:: bash
559  *
560  *      echo 1 > ../ras/ras_eeprom_reset
561  *
562  * will reset EEPROM table to 0 entries.
563  *
564  */
565 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
566                                                const char __user *buf,
567                                                size_t size, loff_t *pos)
568 {
569         struct amdgpu_device *adev =
570                 (struct amdgpu_device *)file_inode(f)->i_private;
571         int ret;
572
573         ret = amdgpu_ras_eeprom_reset_table(
574                 &(amdgpu_ras_get_context(adev)->eeprom_control));
575
576         if (!ret) {
577                 /* Something was written to EEPROM.
578                  */
579                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
580                 return size;
581         } else {
582                 return ret;
583         }
584 }
585
586 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
587         .owner = THIS_MODULE,
588         .read = NULL,
589         .write = amdgpu_ras_debugfs_ctrl_write,
590         .llseek = default_llseek
591 };
592
593 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
594         .owner = THIS_MODULE,
595         .read = NULL,
596         .write = amdgpu_ras_debugfs_eeprom_write,
597         .llseek = default_llseek
598 };
599
600 /**
601  * DOC: AMDGPU RAS sysfs Error Count Interface
602  *
603  * It allows the user to read the error count for each IP block on the gpu through
604  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
605  *
606  * It outputs the multiple lines which report the uncorrected (ue) and corrected
607  * (ce) error counts.
608  *
609  * The format of one line is below,
610  *
611  * [ce|ue]: count
612  *
613  * Example:
614  *
615  * .. code-block:: bash
616  *
617  *      ue: 0
618  *      ce: 1
619  *
620  */
621 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
622                 struct device_attribute *attr, char *buf)
623 {
624         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
625         struct ras_query_if info = {
626                 .head = obj->head,
627         };
628
629         if (!amdgpu_ras_get_error_query_ready(obj->adev))
630                 return sysfs_emit(buf, "Query currently inaccessible\n");
631
632         if (amdgpu_ras_query_error_status(obj->adev, &info))
633                 return -EINVAL;
634
635         if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
636             amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
637                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
638                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
639         }
640
641         if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
642                 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
643                                 "ce", info.ce_count, "de", info.de_count);
644         else
645                 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
646                                 "ce", info.ce_count);
647 }
648
649 /* obj begin */
650
651 #define get_obj(obj) do { (obj)->use++; } while (0)
652 #define alive_obj(obj) ((obj)->use)
653
654 static inline void put_obj(struct ras_manager *obj)
655 {
656         if (obj && (--obj->use == 0)) {
657                 list_del(&obj->node);
658                 amdgpu_ras_error_data_fini(&obj->err_data);
659         }
660
661         if (obj && (obj->use < 0))
662                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
663 }
664
665 /* make one obj and return it. */
666 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
667                 struct ras_common_if *head)
668 {
669         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
670         struct ras_manager *obj;
671
672         if (!adev->ras_enabled || !con)
673                 return NULL;
674
675         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
676                 return NULL;
677
678         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
679                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
680                         return NULL;
681
682                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
683         } else
684                 obj = &con->objs[head->block];
685
686         /* already exist. return obj? */
687         if (alive_obj(obj))
688                 return NULL;
689
690         if (amdgpu_ras_error_data_init(&obj->err_data))
691                 return NULL;
692
693         obj->head = *head;
694         obj->adev = adev;
695         list_add(&obj->node, &con->head);
696         get_obj(obj);
697
698         return obj;
699 }
700
701 /* return an obj equal to head, or the first when head is NULL */
702 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
703                 struct ras_common_if *head)
704 {
705         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
706         struct ras_manager *obj;
707         int i;
708
709         if (!adev->ras_enabled || !con)
710                 return NULL;
711
712         if (head) {
713                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
714                         return NULL;
715
716                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
717                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
718                                 return NULL;
719
720                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
721                 } else
722                         obj = &con->objs[head->block];
723
724                 if (alive_obj(obj))
725                         return obj;
726         } else {
727                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
728                         obj = &con->objs[i];
729                         if (alive_obj(obj))
730                                 return obj;
731                 }
732         }
733
734         return NULL;
735 }
736 /* obj end */
737
738 /* feature ctl begin */
739 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
740                                          struct ras_common_if *head)
741 {
742         return adev->ras_hw_enabled & BIT(head->block);
743 }
744
745 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
746                 struct ras_common_if *head)
747 {
748         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
749
750         return con->features & BIT(head->block);
751 }
752
753 /*
754  * if obj is not created, then create one.
755  * set feature enable flag.
756  */
757 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
758                 struct ras_common_if *head, int enable)
759 {
760         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
761         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
762
763         /* If hardware does not support ras, then do not create obj.
764          * But if hardware support ras, we can create the obj.
765          * Ras framework checks con->hw_supported to see if it need do
766          * corresponding initialization.
767          * IP checks con->support to see if it need disable ras.
768          */
769         if (!amdgpu_ras_is_feature_allowed(adev, head))
770                 return 0;
771
772         if (enable) {
773                 if (!obj) {
774                         obj = amdgpu_ras_create_obj(adev, head);
775                         if (!obj)
776                                 return -EINVAL;
777                 } else {
778                         /* In case we create obj somewhere else */
779                         get_obj(obj);
780                 }
781                 con->features |= BIT(head->block);
782         } else {
783                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
784                         con->features &= ~BIT(head->block);
785                         put_obj(obj);
786                 }
787         }
788
789         return 0;
790 }
791
792 /* wrapper of psp_ras_enable_features */
793 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
794                 struct ras_common_if *head, bool enable)
795 {
796         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
797         union ta_ras_cmd_input *info;
798         int ret;
799
800         if (!con)
801                 return -EINVAL;
802
803         /* For non-gfx ip, do not enable ras feature if it is not allowed */
804         /* For gfx ip, regardless of feature support status, */
805         /* Force issue enable or disable ras feature commands */
806         if (head->block != AMDGPU_RAS_BLOCK__GFX &&
807             !amdgpu_ras_is_feature_allowed(adev, head))
808                 return 0;
809
810         /* Only enable gfx ras feature from host side */
811         if (head->block == AMDGPU_RAS_BLOCK__GFX &&
812             !amdgpu_sriov_vf(adev) &&
813             !amdgpu_ras_intr_triggered()) {
814                 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
815                 if (!info)
816                         return -ENOMEM;
817
818                 if (!enable) {
819                         info->disable_features = (struct ta_ras_disable_features_input) {
820                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
821                                 .error_type = amdgpu_ras_error_to_ta(head->type),
822                         };
823                 } else {
824                         info->enable_features = (struct ta_ras_enable_features_input) {
825                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
826                                 .error_type = amdgpu_ras_error_to_ta(head->type),
827                         };
828                 }
829
830                 ret = psp_ras_enable_features(&adev->psp, info, enable);
831                 if (ret) {
832                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
833                                 enable ? "enable":"disable",
834                                 get_ras_block_str(head),
835                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
836                         kfree(info);
837                         return ret;
838                 }
839
840                 kfree(info);
841         }
842
843         /* setup the obj */
844         __amdgpu_ras_feature_enable(adev, head, enable);
845
846         return 0;
847 }
848
849 /* Only used in device probe stage and called only once. */
850 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
851                 struct ras_common_if *head, bool enable)
852 {
853         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
854         int ret;
855
856         if (!con)
857                 return -EINVAL;
858
859         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
860                 if (enable) {
861                         /* There is no harm to issue a ras TA cmd regardless of
862                          * the currecnt ras state.
863                          * If current state == target state, it will do nothing
864                          * But sometimes it requests driver to reset and repost
865                          * with error code -EAGAIN.
866                          */
867                         ret = amdgpu_ras_feature_enable(adev, head, 1);
868                         /* With old ras TA, we might fail to enable ras.
869                          * Log it and just setup the object.
870                          * TODO need remove this WA in the future.
871                          */
872                         if (ret == -EINVAL) {
873                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
874                                 if (!ret)
875                                         dev_info(adev->dev,
876                                                 "RAS INFO: %s setup object\n",
877                                                 get_ras_block_str(head));
878                         }
879                 } else {
880                         /* setup the object then issue a ras TA disable cmd.*/
881                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
882                         if (ret)
883                                 return ret;
884
885                         /* gfx block ras disable cmd must send to ras-ta */
886                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
887                                 con->features |= BIT(head->block);
888
889                         ret = amdgpu_ras_feature_enable(adev, head, 0);
890
891                         /* clean gfx block ras features flag */
892                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
893                                 con->features &= ~BIT(head->block);
894                 }
895         } else
896                 ret = amdgpu_ras_feature_enable(adev, head, enable);
897
898         return ret;
899 }
900
901 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
902                 bool bypass)
903 {
904         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
905         struct ras_manager *obj, *tmp;
906
907         list_for_each_entry_safe(obj, tmp, &con->head, node) {
908                 /* bypass psp.
909                  * aka just release the obj and corresponding flags
910                  */
911                 if (bypass) {
912                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
913                                 break;
914                 } else {
915                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
916                                 break;
917                 }
918         }
919
920         return con->features;
921 }
922
923 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
924                 bool bypass)
925 {
926         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
927         int i;
928         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
929
930         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
931                 struct ras_common_if head = {
932                         .block = i,
933                         .type = default_ras_type,
934                         .sub_block_index = 0,
935                 };
936
937                 if (i == AMDGPU_RAS_BLOCK__MCA)
938                         continue;
939
940                 if (bypass) {
941                         /*
942                          * bypass psp. vbios enable ras for us.
943                          * so just create the obj
944                          */
945                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
946                                 break;
947                 } else {
948                         if (amdgpu_ras_feature_enable(adev, &head, 1))
949                                 break;
950                 }
951         }
952
953         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
954                 struct ras_common_if head = {
955                         .block = AMDGPU_RAS_BLOCK__MCA,
956                         .type = default_ras_type,
957                         .sub_block_index = i,
958                 };
959
960                 if (bypass) {
961                         /*
962                          * bypass psp. vbios enable ras for us.
963                          * so just create the obj
964                          */
965                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
966                                 break;
967                 } else {
968                         if (amdgpu_ras_feature_enable(adev, &head, 1))
969                                 break;
970                 }
971         }
972
973         return con->features;
974 }
975 /* feature ctl end */
976
977 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
978                 enum amdgpu_ras_block block)
979 {
980         if (!block_obj)
981                 return -EINVAL;
982
983         if (block_obj->ras_comm.block == block)
984                 return 0;
985
986         return -EINVAL;
987 }
988
989 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
990                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
991 {
992         struct amdgpu_ras_block_list *node, *tmp;
993         struct amdgpu_ras_block_object *obj;
994
995         if (block >= AMDGPU_RAS_BLOCK__LAST)
996                 return NULL;
997
998         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
999                 if (!node->ras_obj) {
1000                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1001                         continue;
1002                 }
1003
1004                 obj = node->ras_obj;
1005                 if (obj->ras_block_match) {
1006                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1007                                 return obj;
1008                 } else {
1009                         if (amdgpu_ras_block_match_default(obj, block) == 0)
1010                                 return obj;
1011                 }
1012         }
1013
1014         return NULL;
1015 }
1016
1017 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1018 {
1019         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1020         int ret = 0;
1021
1022         /*
1023          * choosing right query method according to
1024          * whether smu support query error information
1025          */
1026         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1027         if (ret == -EOPNOTSUPP) {
1028                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1029                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1030                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1031
1032                 /* umc query_ras_error_address is also responsible for clearing
1033                  * error status
1034                  */
1035                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1036                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1037                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1038         } else if (!ret) {
1039                 if (adev->umc.ras &&
1040                         adev->umc.ras->ecc_info_query_ras_error_count)
1041                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1042
1043                 if (adev->umc.ras &&
1044                         adev->umc.ras->ecc_info_query_ras_error_address)
1045                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1046         }
1047 }
1048
1049 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1050                                               struct ras_manager *ras_mgr,
1051                                               struct ras_err_data *err_data,
1052                                               struct ras_query_context *qctx,
1053                                               const char *blk_name,
1054                                               bool is_ue,
1055                                               bool is_de)
1056 {
1057         struct amdgpu_smuio_mcm_config_info *mcm_info;
1058         struct ras_err_node *err_node;
1059         struct ras_err_info *err_info;
1060         u64 event_id = qctx->evid.event_id;
1061
1062         if (is_ue) {
1063                 for_each_ras_error(err_node, err_data) {
1064                         err_info = &err_node->err_info;
1065                         mcm_info = &err_info->mcm_info;
1066                         if (err_info->ue_count) {
1067                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1068                                               "%lld new uncorrectable hardware errors detected in %s block\n",
1069                                               mcm_info->socket_id,
1070                                               mcm_info->die_id,
1071                                               err_info->ue_count,
1072                                               blk_name);
1073                         }
1074                 }
1075
1076                 for_each_ras_error(err_node, &ras_mgr->err_data) {
1077                         err_info = &err_node->err_info;
1078                         mcm_info = &err_info->mcm_info;
1079                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1080                                       "%lld uncorrectable hardware errors detected in total in %s block\n",
1081                                       mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1082                 }
1083
1084         } else {
1085                 if (is_de) {
1086                         for_each_ras_error(err_node, err_data) {
1087                                 err_info = &err_node->err_info;
1088                                 mcm_info = &err_info->mcm_info;
1089                                 if (err_info->de_count) {
1090                                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1091                                                       "%lld new deferred hardware errors detected in %s block\n",
1092                                                       mcm_info->socket_id,
1093                                                       mcm_info->die_id,
1094                                                       err_info->de_count,
1095                                                       blk_name);
1096                                 }
1097                         }
1098
1099                         for_each_ras_error(err_node, &ras_mgr->err_data) {
1100                                 err_info = &err_node->err_info;
1101                                 mcm_info = &err_info->mcm_info;
1102                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1103                                               "%lld deferred hardware errors detected in total in %s block\n",
1104                                               mcm_info->socket_id, mcm_info->die_id,
1105                                               err_info->de_count, blk_name);
1106                         }
1107                 } else {
1108                         for_each_ras_error(err_node, err_data) {
1109                                 err_info = &err_node->err_info;
1110                                 mcm_info = &err_info->mcm_info;
1111                                 if (err_info->ce_count) {
1112                                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1113                                                       "%lld new correctable hardware errors detected in %s block\n",
1114                                                       mcm_info->socket_id,
1115                                                       mcm_info->die_id,
1116                                                       err_info->ce_count,
1117                                                       blk_name);
1118                                 }
1119                         }
1120
1121                         for_each_ras_error(err_node, &ras_mgr->err_data) {
1122                                 err_info = &err_node->err_info;
1123                                 mcm_info = &err_info->mcm_info;
1124                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1125                                               "%lld correctable hardware errors detected in total in %s block\n",
1126                                               mcm_info->socket_id, mcm_info->die_id,
1127                                               err_info->ce_count, blk_name);
1128                         }
1129                 }
1130         }
1131 }
1132
1133 static inline bool err_data_has_source_info(struct ras_err_data *data)
1134 {
1135         return !list_empty(&data->err_node_list);
1136 }
1137
1138 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1139                                              struct ras_query_if *query_if,
1140                                              struct ras_err_data *err_data,
1141                                              struct ras_query_context *qctx)
1142 {
1143         struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1144         const char *blk_name = get_ras_block_str(&query_if->head);
1145         u64 event_id = qctx->evid.event_id;
1146
1147         if (err_data->ce_count) {
1148                 if (err_data_has_source_info(err_data)) {
1149                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1150                                                           blk_name, false, false);
1151                 } else if (!adev->aid_mask &&
1152                            adev->smuio.funcs &&
1153                            adev->smuio.funcs->get_socket_id &&
1154                            adev->smuio.funcs->get_die_id) {
1155                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1156                                       "%ld correctable hardware errors "
1157                                       "detected in %s block\n",
1158                                       adev->smuio.funcs->get_socket_id(adev),
1159                                       adev->smuio.funcs->get_die_id(adev),
1160                                       ras_mgr->err_data.ce_count,
1161                                       blk_name);
1162                 } else {
1163                         RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1164                                       "detected in %s block\n",
1165                                       ras_mgr->err_data.ce_count,
1166                                       blk_name);
1167                 }
1168         }
1169
1170         if (err_data->ue_count) {
1171                 if (err_data_has_source_info(err_data)) {
1172                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1173                                                           blk_name, true, false);
1174                 } else if (!adev->aid_mask &&
1175                            adev->smuio.funcs &&
1176                            adev->smuio.funcs->get_socket_id &&
1177                            adev->smuio.funcs->get_die_id) {
1178                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1179                                       "%ld uncorrectable hardware errors "
1180                                       "detected in %s block\n",
1181                                       adev->smuio.funcs->get_socket_id(adev),
1182                                       adev->smuio.funcs->get_die_id(adev),
1183                                       ras_mgr->err_data.ue_count,
1184                                       blk_name);
1185                 } else {
1186                         RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1187                                       "detected in %s block\n",
1188                                       ras_mgr->err_data.ue_count,
1189                                       blk_name);
1190                 }
1191         }
1192
1193         if (err_data->de_count) {
1194                 if (err_data_has_source_info(err_data)) {
1195                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1196                                                           blk_name, false, true);
1197                 } else if (!adev->aid_mask &&
1198                            adev->smuio.funcs &&
1199                            adev->smuio.funcs->get_socket_id &&
1200                            adev->smuio.funcs->get_die_id) {
1201                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1202                                       "%ld deferred hardware errors "
1203                                       "detected in %s block\n",
1204                                       adev->smuio.funcs->get_socket_id(adev),
1205                                       adev->smuio.funcs->get_die_id(adev),
1206                                       ras_mgr->err_data.de_count,
1207                                       blk_name);
1208                 } else {
1209                         RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1210                                       "detected in %s block\n",
1211                                       ras_mgr->err_data.de_count,
1212                                       blk_name);
1213                 }
1214         }
1215 }
1216
1217 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1218                                                   struct ras_query_if *query_if,
1219                                                   struct ras_err_data *err_data,
1220                                                   struct ras_query_context *qctx)
1221 {
1222         unsigned long new_ue, new_ce, new_de;
1223         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1224         const char *blk_name = get_ras_block_str(&query_if->head);
1225         u64 event_id = qctx->evid.event_id;
1226
1227         new_ce = err_data->ce_count - obj->err_data.ce_count;
1228         new_ue = err_data->ue_count - obj->err_data.ue_count;
1229         new_de = err_data->de_count - obj->err_data.de_count;
1230
1231         if (new_ce) {
1232                 RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1233                               "detected in %s block\n",
1234                               new_ce,
1235                               blk_name);
1236         }
1237
1238         if (new_ue) {
1239                 RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1240                               "detected in %s block\n",
1241                               new_ue,
1242                               blk_name);
1243         }
1244
1245         if (new_de) {
1246                 RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1247                               "detected in %s block\n",
1248                               new_de,
1249                               blk_name);
1250         }
1251 }
1252
1253 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1254 {
1255         struct ras_err_node *err_node;
1256         struct ras_err_info *err_info;
1257
1258         if (err_data_has_source_info(err_data)) {
1259                 for_each_ras_error(err_node, err_data) {
1260                         err_info = &err_node->err_info;
1261                         amdgpu_ras_error_statistic_de_count(&obj->err_data,
1262                                         &err_info->mcm_info, err_info->de_count);
1263                         amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1264                                         &err_info->mcm_info, err_info->ce_count);
1265                         amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1266                                         &err_info->mcm_info, err_info->ue_count);
1267                 }
1268         } else {
1269                 /* for legacy asic path which doesn't has error source info */
1270                 obj->err_data.ue_count += err_data->ue_count;
1271                 obj->err_data.ce_count += err_data->ce_count;
1272                 obj->err_data.de_count += err_data->de_count;
1273         }
1274 }
1275
1276 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1277                                                              struct ras_err_data *err_data)
1278 {
1279         /* Host reports absolute counts */
1280         obj->err_data.ue_count = err_data->ue_count;
1281         obj->err_data.ce_count = err_data->ce_count;
1282         obj->err_data.de_count = err_data->de_count;
1283 }
1284
1285 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1286 {
1287         struct ras_common_if head;
1288
1289         memset(&head, 0, sizeof(head));
1290         head.block = blk;
1291
1292         return amdgpu_ras_find_obj(adev, &head);
1293 }
1294
1295 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1296                         const struct aca_info *aca_info, void *data)
1297 {
1298         struct ras_manager *obj;
1299
1300         /* in resume phase, no need to create aca fs node */
1301         if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1302                 return 0;
1303
1304         obj = get_ras_manager(adev, blk);
1305         if (!obj)
1306                 return -EINVAL;
1307
1308         return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1309 }
1310
1311 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1312 {
1313         struct ras_manager *obj;
1314
1315         obj = get_ras_manager(adev, blk);
1316         if (!obj)
1317                 return -EINVAL;
1318
1319         amdgpu_aca_remove_handle(&obj->aca_handle);
1320
1321         return 0;
1322 }
1323
1324 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1325                                          enum aca_error_type type, struct ras_err_data *err_data,
1326                                          struct ras_query_context *qctx)
1327 {
1328         struct ras_manager *obj;
1329
1330         obj = get_ras_manager(adev, blk);
1331         if (!obj)
1332                 return -EINVAL;
1333
1334         return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1335 }
1336
1337 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1338                                   struct aca_handle *handle, char *buf, void *data)
1339 {
1340         struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1341         struct ras_query_if info = {
1342                 .head = obj->head,
1343         };
1344
1345         if (!amdgpu_ras_get_error_query_ready(obj->adev))
1346                 return sysfs_emit(buf, "Query currently inaccessible\n");
1347
1348         if (amdgpu_ras_query_error_status(obj->adev, &info))
1349                 return -EINVAL;
1350
1351         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1352                           "ce", info.ce_count, "de", info.de_count);
1353 }
1354
1355 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1356                                                 struct ras_query_if *info,
1357                                                 struct ras_err_data *err_data,
1358                                                 struct ras_query_context *qctx,
1359                                                 unsigned int error_query_mode)
1360 {
1361         enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1362         struct amdgpu_ras_block_object *block_obj = NULL;
1363         int ret;
1364
1365         if (blk == AMDGPU_RAS_BLOCK_COUNT)
1366                 return -EINVAL;
1367
1368         if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1369                 return -EINVAL;
1370
1371         if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1372                 return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1373         } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1374                 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1375                         amdgpu_ras_get_ecc_info(adev, err_data);
1376                 } else {
1377                         block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1378                         if (!block_obj || !block_obj->hw_ops) {
1379                                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1380                                              get_ras_block_str(&info->head));
1381                                 return -EINVAL;
1382                         }
1383
1384                         if (block_obj->hw_ops->query_ras_error_count)
1385                                 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1386
1387                         if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1388                             (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1389                             (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1390                                 if (block_obj->hw_ops->query_ras_error_status)
1391                                         block_obj->hw_ops->query_ras_error_status(adev);
1392                         }
1393                 }
1394         } else {
1395                 if (amdgpu_aca_is_enabled(adev)) {
1396                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1397                         if (ret)
1398                                 return ret;
1399
1400                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1401                         if (ret)
1402                                 return ret;
1403
1404                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1405                         if (ret)
1406                                 return ret;
1407                 } else {
1408                         /* FIXME: add code to check return value later */
1409                         amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1410                         amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1411                 }
1412         }
1413
1414         return 0;
1415 }
1416
1417 /* query/inject/cure begin */
1418 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1419                                                     struct ras_query_if *info,
1420                                                     enum ras_event_type type)
1421 {
1422         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1423         struct ras_err_data err_data;
1424         struct ras_query_context qctx;
1425         unsigned int error_query_mode;
1426         int ret;
1427
1428         if (!obj)
1429                 return -EINVAL;
1430
1431         ret = amdgpu_ras_error_data_init(&err_data);
1432         if (ret)
1433                 return ret;
1434
1435         if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1436                 return -EINVAL;
1437
1438         memset(&qctx, 0, sizeof(qctx));
1439         qctx.evid.type = type;
1440         qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1441
1442         if (!down_read_trylock(&adev->reset_domain->sem)) {
1443                 ret = -EIO;
1444                 goto out_fini_err_data;
1445         }
1446
1447         ret = amdgpu_ras_query_error_status_helper(adev, info,
1448                                                    &err_data,
1449                                                    &qctx,
1450                                                    error_query_mode);
1451         up_read(&adev->reset_domain->sem);
1452         if (ret)
1453                 goto out_fini_err_data;
1454
1455         if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1456                 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1457                 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1458         } else {
1459                 /* Host provides absolute error counts. First generate the report
1460                  * using the previous VF internal count against new host count.
1461                  * Then Update VF internal count.
1462                  */
1463                 amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1464                 amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1465         }
1466
1467         info->ue_count = obj->err_data.ue_count;
1468         info->ce_count = obj->err_data.ce_count;
1469         info->de_count = obj->err_data.de_count;
1470
1471 out_fini_err_data:
1472         amdgpu_ras_error_data_fini(&err_data);
1473
1474         return ret;
1475 }
1476
1477 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1478 {
1479         return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1480 }
1481
1482 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1483                 enum amdgpu_ras_block block)
1484 {
1485         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1486         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1487         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1488
1489         if (!block_obj || !block_obj->hw_ops) {
1490                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1491                                 ras_block_str(block));
1492                 return -EOPNOTSUPP;
1493         }
1494
1495         if (!amdgpu_ras_is_supported(adev, block) ||
1496             !amdgpu_ras_get_aca_debug_mode(adev))
1497                 return -EOPNOTSUPP;
1498
1499         /* skip ras error reset in gpu reset */
1500         if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1501             ((smu_funcs && smu_funcs->set_debug_mode) ||
1502              (mca_funcs && mca_funcs->mca_set_debug_mode)))
1503                 return -EOPNOTSUPP;
1504
1505         if (block_obj->hw_ops->reset_ras_error_count)
1506                 block_obj->hw_ops->reset_ras_error_count(adev);
1507
1508         return 0;
1509 }
1510
1511 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1512                 enum amdgpu_ras_block block)
1513 {
1514         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1515
1516         if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1517                 return 0;
1518
1519         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1520             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1521                 if (block_obj->hw_ops->reset_ras_error_status)
1522                         block_obj->hw_ops->reset_ras_error_status(adev);
1523         }
1524
1525         return 0;
1526 }
1527
1528 /* wrapper of psp_ras_trigger_error */
1529 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1530                 struct ras_inject_if *info)
1531 {
1532         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1533         struct ta_ras_trigger_error_input block_info = {
1534                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1535                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1536                 .sub_block_index = info->head.sub_block_index,
1537                 .address = info->address,
1538                 .value = info->value,
1539         };
1540         int ret = -EINVAL;
1541         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1542                                                         info->head.block,
1543                                                         info->head.sub_block_index);
1544
1545         /* inject on guest isn't allowed, return success directly */
1546         if (amdgpu_sriov_vf(adev))
1547                 return 0;
1548
1549         if (!obj)
1550                 return -EINVAL;
1551
1552         if (!block_obj || !block_obj->hw_ops)   {
1553                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1554                              get_ras_block_str(&info->head));
1555                 return -EINVAL;
1556         }
1557
1558         /* Calculate XGMI relative offset */
1559         if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1560             info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1561                 block_info.address =
1562                         amdgpu_xgmi_get_relative_phy_addr(adev,
1563                                                           block_info.address);
1564         }
1565
1566         if (block_obj->hw_ops->ras_error_inject) {
1567                 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1568                         ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1569                 else /* Special ras_error_inject is defined (e.g: xgmi) */
1570                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1571                                                 info->instance_mask);
1572         } else {
1573                 /* default path */
1574                 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1575         }
1576
1577         if (ret)
1578                 dev_err(adev->dev, "ras inject %s failed %d\n",
1579                         get_ras_block_str(&info->head), ret);
1580
1581         return ret;
1582 }
1583
1584 /**
1585  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1586  * @adev: pointer to AMD GPU device
1587  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1588  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1589  * @query_info: pointer to ras_query_if
1590  *
1591  * Return 0 for query success or do nothing, otherwise return an error
1592  * on failures
1593  */
1594 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1595                                                unsigned long *ce_count,
1596                                                unsigned long *ue_count,
1597                                                struct ras_query_if *query_info)
1598 {
1599         int ret;
1600
1601         if (!query_info)
1602                 /* do nothing if query_info is not specified */
1603                 return 0;
1604
1605         ret = amdgpu_ras_query_error_status(adev, query_info);
1606         if (ret)
1607                 return ret;
1608
1609         *ce_count += query_info->ce_count;
1610         *ue_count += query_info->ue_count;
1611
1612         /* some hardware/IP supports read to clear
1613          * no need to explictly reset the err status after the query call */
1614         if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1615             amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1616                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1617                         dev_warn(adev->dev,
1618                                  "Failed to reset error counter and error status\n");
1619         }
1620
1621         return 0;
1622 }
1623
1624 /**
1625  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1626  * @adev: pointer to AMD GPU device
1627  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1628  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1629  * errors.
1630  * @query_info: pointer to ras_query_if if the query request is only for
1631  * specific ip block; if info is NULL, then the qurey request is for
1632  * all the ip blocks that support query ras error counters/status
1633  *
1634  * If set, @ce_count or @ue_count, count and return the corresponding
1635  * error counts in those integer pointers. Return 0 if the device
1636  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1637  */
1638 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1639                                  unsigned long *ce_count,
1640                                  unsigned long *ue_count,
1641                                  struct ras_query_if *query_info)
1642 {
1643         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1644         struct ras_manager *obj;
1645         unsigned long ce, ue;
1646         int ret;
1647
1648         if (!adev->ras_enabled || !con)
1649                 return -EOPNOTSUPP;
1650
1651         /* Don't count since no reporting.
1652          */
1653         if (!ce_count && !ue_count)
1654                 return 0;
1655
1656         ce = 0;
1657         ue = 0;
1658         if (!query_info) {
1659                 /* query all the ip blocks that support ras query interface */
1660                 list_for_each_entry(obj, &con->head, node) {
1661                         struct ras_query_if info = {
1662                                 .head = obj->head,
1663                         };
1664
1665                         ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1666                 }
1667         } else {
1668                 /* query specific ip block */
1669                 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1670         }
1671
1672         if (ret)
1673                 return ret;
1674
1675         if (ce_count)
1676                 *ce_count = ce;
1677
1678         if (ue_count)
1679                 *ue_count = ue;
1680
1681         return 0;
1682 }
1683 /* query/inject/cure end */
1684
1685
1686 /* sysfs begin */
1687
1688 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1689                 struct ras_badpage **bps, unsigned int *count);
1690
1691 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1692 {
1693         switch (flags) {
1694         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1695                 return "R";
1696         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1697                 return "P";
1698         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1699         default:
1700                 return "F";
1701         }
1702 }
1703
1704 /**
1705  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1706  *
1707  * It allows user to read the bad pages of vram on the gpu through
1708  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1709  *
1710  * It outputs multiple lines, and each line stands for one gpu page.
1711  *
1712  * The format of one line is below,
1713  * gpu pfn : gpu page size : flags
1714  *
1715  * gpu pfn and gpu page size are printed in hex format.
1716  * flags can be one of below character,
1717  *
1718  * R: reserved, this gpu page is reserved and not able to use.
1719  *
1720  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1721  * in next window of page_reserve.
1722  *
1723  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1724  *
1725  * Examples:
1726  *
1727  * .. code-block:: bash
1728  *
1729  *      0x00000001 : 0x00001000 : R
1730  *      0x00000002 : 0x00001000 : P
1731  *
1732  */
1733
1734 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1735                 struct kobject *kobj, struct bin_attribute *attr,
1736                 char *buf, loff_t ppos, size_t count)
1737 {
1738         struct amdgpu_ras *con =
1739                 container_of(attr, struct amdgpu_ras, badpages_attr);
1740         struct amdgpu_device *adev = con->adev;
1741         const unsigned int element_size =
1742                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1743         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1744         unsigned int end = div64_ul(ppos + count - 1, element_size);
1745         ssize_t s = 0;
1746         struct ras_badpage *bps = NULL;
1747         unsigned int bps_count = 0;
1748
1749         memset(buf, 0, count);
1750
1751         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1752                 return 0;
1753
1754         for (; start < end && start < bps_count; start++)
1755                 s += scnprintf(&buf[s], element_size + 1,
1756                                 "0x%08x : 0x%08x : %1s\n",
1757                                 bps[start].bp,
1758                                 bps[start].size,
1759                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1760
1761         kfree(bps);
1762
1763         return s;
1764 }
1765
1766 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1767                 struct device_attribute *attr, char *buf)
1768 {
1769         struct amdgpu_ras *con =
1770                 container_of(attr, struct amdgpu_ras, features_attr);
1771
1772         return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1773 }
1774
1775 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1776                 struct device_attribute *attr, char *buf)
1777 {
1778         struct amdgpu_ras *con =
1779                 container_of(attr, struct amdgpu_ras, version_attr);
1780         return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1781 }
1782
1783 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1784                 struct device_attribute *attr, char *buf)
1785 {
1786         struct amdgpu_ras *con =
1787                 container_of(attr, struct amdgpu_ras, schema_attr);
1788         return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1789 }
1790
1791 static struct {
1792         enum ras_event_type type;
1793         const char *name;
1794 } dump_event[] = {
1795         {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1796         {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1797         {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1798 };
1799
1800 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1801                                                  struct device_attribute *attr, char *buf)
1802 {
1803         struct amdgpu_ras *con =
1804                 container_of(attr, struct amdgpu_ras, event_state_attr);
1805         struct ras_event_manager *event_mgr = con->event_mgr;
1806         struct ras_event_state *event_state;
1807         int i, size = 0;
1808
1809         if (!event_mgr)
1810                 return -EINVAL;
1811
1812         size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1813         for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1814                 event_state = &event_mgr->event_state[dump_event[i].type];
1815                 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1816                                       dump_event[i].name,
1817                                       atomic64_read(&event_state->count),
1818                                       event_state->last_seqno);
1819         }
1820
1821         return (ssize_t)size;
1822 }
1823
1824 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1825 {
1826         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1827
1828         if (adev->dev->kobj.sd)
1829                 sysfs_remove_file_from_group(&adev->dev->kobj,
1830                                 &con->badpages_attr.attr,
1831                                 RAS_FS_NAME);
1832 }
1833
1834 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1835 {
1836         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1837         struct attribute *attrs[] = {
1838                 &con->features_attr.attr,
1839                 &con->version_attr.attr,
1840                 &con->schema_attr.attr,
1841                 &con->event_state_attr.attr,
1842                 NULL
1843         };
1844         struct attribute_group group = {
1845                 .name = RAS_FS_NAME,
1846                 .attrs = attrs,
1847         };
1848
1849         if (adev->dev->kobj.sd)
1850                 sysfs_remove_group(&adev->dev->kobj, &group);
1851
1852         return 0;
1853 }
1854
1855 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1856                 struct ras_common_if *head)
1857 {
1858         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1859
1860         if (amdgpu_aca_is_enabled(adev))
1861                 return 0;
1862
1863         if (!obj || obj->attr_inuse)
1864                 return -EINVAL;
1865
1866         get_obj(obj);
1867
1868         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1869                 "%s_err_count", head->name);
1870
1871         obj->sysfs_attr = (struct device_attribute){
1872                 .attr = {
1873                         .name = obj->fs_data.sysfs_name,
1874                         .mode = S_IRUGO,
1875                 },
1876                         .show = amdgpu_ras_sysfs_read,
1877         };
1878         sysfs_attr_init(&obj->sysfs_attr.attr);
1879
1880         if (sysfs_add_file_to_group(&adev->dev->kobj,
1881                                 &obj->sysfs_attr.attr,
1882                                 RAS_FS_NAME)) {
1883                 put_obj(obj);
1884                 return -EINVAL;
1885         }
1886
1887         obj->attr_inuse = 1;
1888
1889         return 0;
1890 }
1891
1892 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1893                 struct ras_common_if *head)
1894 {
1895         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1896
1897         if (amdgpu_aca_is_enabled(adev))
1898                 return 0;
1899
1900         if (!obj || !obj->attr_inuse)
1901                 return -EINVAL;
1902
1903         if (adev->dev->kobj.sd)
1904                 sysfs_remove_file_from_group(&adev->dev->kobj,
1905                                 &obj->sysfs_attr.attr,
1906                                 RAS_FS_NAME);
1907         obj->attr_inuse = 0;
1908         put_obj(obj);
1909
1910         return 0;
1911 }
1912
1913 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1914 {
1915         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1916         struct ras_manager *obj, *tmp;
1917
1918         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1919                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1920         }
1921
1922         if (amdgpu_bad_page_threshold != 0)
1923                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1924
1925         amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1926
1927         return 0;
1928 }
1929 /* sysfs end */
1930
1931 /**
1932  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1933  *
1934  * Normally when there is an uncorrectable error, the driver will reset
1935  * the GPU to recover.  However, in the event of an unrecoverable error,
1936  * the driver provides an interface to reboot the system automatically
1937  * in that event.
1938  *
1939  * The following file in debugfs provides that interface:
1940  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1941  *
1942  * Usage:
1943  *
1944  * .. code-block:: bash
1945  *
1946  *      echo true > .../ras/auto_reboot
1947  *
1948  */
1949 /* debugfs begin */
1950 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1951 {
1952         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1953         struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1954         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1955         struct dentry     *dir;
1956
1957         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1958         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1959                             &amdgpu_ras_debugfs_ctrl_ops);
1960         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1961                             &amdgpu_ras_debugfs_eeprom_ops);
1962         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1963                            &con->bad_page_cnt_threshold);
1964         debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1965         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1966         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1967         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1968                             &amdgpu_ras_debugfs_eeprom_size_ops);
1969         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1970                                                        S_IRUGO, dir, adev,
1971                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1972         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1973
1974         /*
1975          * After one uncorrectable error happens, usually GPU recovery will
1976          * be scheduled. But due to the known problem in GPU recovery failing
1977          * to bring GPU back, below interface provides one direct way to
1978          * user to reboot system automatically in such case within
1979          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1980          * will never be called.
1981          */
1982         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1983
1984         /*
1985          * User could set this not to clean up hardware's error count register
1986          * of RAS IPs during ras recovery.
1987          */
1988         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1989                             &con->disable_ras_err_cnt_harvest);
1990         return dir;
1991 }
1992
1993 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1994                                       struct ras_fs_if *head,
1995                                       struct dentry *dir)
1996 {
1997         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1998
1999         if (!obj || !dir)
2000                 return;
2001
2002         get_obj(obj);
2003
2004         memcpy(obj->fs_data.debugfs_name,
2005                         head->debugfs_name,
2006                         sizeof(obj->fs_data.debugfs_name));
2007
2008         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2009                             obj, &amdgpu_ras_debugfs_ops);
2010 }
2011
2012 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2013 {
2014         bool ret;
2015
2016         switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2017         case IP_VERSION(13, 0, 6):
2018         case IP_VERSION(13, 0, 14):
2019                 ret = true;
2020                 break;
2021         default:
2022                 ret = false;
2023                 break;
2024         }
2025
2026         return ret;
2027 }
2028
2029 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2030 {
2031         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2032         struct dentry *dir;
2033         struct ras_manager *obj;
2034         struct ras_fs_if fs_info;
2035
2036         /*
2037          * it won't be called in resume path, no need to check
2038          * suspend and gpu reset status
2039          */
2040         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2041                 return;
2042
2043         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2044
2045         list_for_each_entry(obj, &con->head, node) {
2046                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2047                         (obj->attr_inuse == 1)) {
2048                         sprintf(fs_info.debugfs_name, "%s_err_inject",
2049                                         get_ras_block_str(&obj->head));
2050                         fs_info.head = obj->head;
2051                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2052                 }
2053         }
2054
2055         if (amdgpu_ras_aca_is_supported(adev)) {
2056                 if (amdgpu_aca_is_enabled(adev))
2057                         amdgpu_aca_smu_debugfs_init(adev, dir);
2058                 else
2059                         amdgpu_mca_smu_debugfs_init(adev, dir);
2060         }
2061 }
2062
2063 /* debugfs end */
2064
2065 /* ras fs */
2066 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2067                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
2068 static DEVICE_ATTR(features, S_IRUGO,
2069                 amdgpu_ras_sysfs_features_read, NULL);
2070 static DEVICE_ATTR(version, 0444,
2071                 amdgpu_ras_sysfs_version_show, NULL);
2072 static DEVICE_ATTR(schema, 0444,
2073                 amdgpu_ras_sysfs_schema_show, NULL);
2074 static DEVICE_ATTR(event_state, 0444,
2075                    amdgpu_ras_sysfs_event_state_show, NULL);
2076 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2077 {
2078         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2079         struct attribute_group group = {
2080                 .name = RAS_FS_NAME,
2081         };
2082         struct attribute *attrs[] = {
2083                 &con->features_attr.attr,
2084                 &con->version_attr.attr,
2085                 &con->schema_attr.attr,
2086                 &con->event_state_attr.attr,
2087                 NULL
2088         };
2089         struct bin_attribute *bin_attrs[] = {
2090                 NULL,
2091                 NULL,
2092         };
2093         int r;
2094
2095         group.attrs = attrs;
2096
2097         /* add features entry */
2098         con->features_attr = dev_attr_features;
2099         sysfs_attr_init(attrs[0]);
2100
2101         /* add version entry */
2102         con->version_attr = dev_attr_version;
2103         sysfs_attr_init(attrs[1]);
2104
2105         /* add schema entry */
2106         con->schema_attr = dev_attr_schema;
2107         sysfs_attr_init(attrs[2]);
2108
2109         /* add event_state entry */
2110         con->event_state_attr = dev_attr_event_state;
2111         sysfs_attr_init(attrs[3]);
2112
2113         if (amdgpu_bad_page_threshold != 0) {
2114                 /* add bad_page_features entry */
2115                 bin_attr_gpu_vram_bad_pages.private = NULL;
2116                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2117                 bin_attrs[0] = &con->badpages_attr;
2118                 group.bin_attrs = bin_attrs;
2119                 sysfs_bin_attr_init(bin_attrs[0]);
2120         }
2121
2122         r = sysfs_create_group(&adev->dev->kobj, &group);
2123         if (r)
2124                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
2125
2126         return 0;
2127 }
2128
2129 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2130 {
2131         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2132         struct ras_manager *con_obj, *ip_obj, *tmp;
2133
2134         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2135                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2136                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2137                         if (ip_obj)
2138                                 put_obj(ip_obj);
2139                 }
2140         }
2141
2142         amdgpu_ras_sysfs_remove_all(adev);
2143         return 0;
2144 }
2145 /* ras fs end */
2146
2147 /* ih begin */
2148
2149 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2150  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2151  * register to check whether the interrupt is triggered or not, and properly
2152  * ack the interrupt if it is there
2153  */
2154 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2155 {
2156         /* Fatal error events are handled on host side */
2157         if (amdgpu_sriov_vf(adev))
2158                 return;
2159
2160         if (adev->nbio.ras &&
2161             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2162                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2163
2164         if (adev->nbio.ras &&
2165             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2166                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2167 }
2168
2169 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2170                                 struct amdgpu_iv_entry *entry)
2171 {
2172         bool poison_stat = false;
2173         struct amdgpu_device *adev = obj->adev;
2174         struct amdgpu_ras_block_object *block_obj =
2175                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2176         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2177         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2178         u64 event_id;
2179         int ret;
2180
2181         if (!block_obj || !con)
2182                 return;
2183
2184         ret = amdgpu_ras_mark_ras_event(adev, type);
2185         if (ret)
2186                 return;
2187
2188         /* both query_poison_status and handle_poison_consumption are optional,
2189          * but at least one of them should be implemented if we need poison
2190          * consumption handler
2191          */
2192         if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2193                 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2194                 if (!poison_stat) {
2195                         /* Not poison consumption interrupt, no need to handle it */
2196                         dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2197                                         block_obj->ras_comm.name);
2198
2199                         return;
2200                 }
2201         }
2202
2203         amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2204
2205         if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2206                 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2207
2208         /* gpu reset is fallback for failed and default cases.
2209          * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2210          */
2211         if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2212                 event_id = amdgpu_ras_acquire_event_id(adev, type);
2213                 RAS_EVENT_LOG(adev, event_id,
2214                               "GPU reset for %s RAS poison consumption is issued!\n",
2215                               block_obj->ras_comm.name);
2216                 amdgpu_ras_reset_gpu(adev);
2217         }
2218
2219         if (!poison_stat)
2220                 amdgpu_gfx_poison_consumption_handler(adev, entry);
2221 }
2222
2223 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2224                                 struct amdgpu_iv_entry *entry)
2225 {
2226         struct amdgpu_device *adev = obj->adev;
2227         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2228         u64 event_id;
2229         int ret;
2230
2231         ret = amdgpu_ras_mark_ras_event(adev, type);
2232         if (ret)
2233                 return;
2234
2235         event_id = amdgpu_ras_acquire_event_id(adev, type);
2236         RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2237
2238         if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2239                 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2240
2241                 atomic_inc(&con->page_retirement_req_cnt);
2242                 atomic_inc(&con->poison_creation_count);
2243
2244                 wake_up(&con->page_retirement_wq);
2245         }
2246 }
2247
2248 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2249                                 struct amdgpu_iv_entry *entry)
2250 {
2251         struct ras_ih_data *data = &obj->ih_data;
2252         struct ras_err_data err_data;
2253         int ret;
2254
2255         if (!data->cb)
2256                 return;
2257
2258         ret = amdgpu_ras_error_data_init(&err_data);
2259         if (ret)
2260                 return;
2261
2262         /* Let IP handle its data, maybe we need get the output
2263          * from the callback to update the error type/count, etc
2264          */
2265         amdgpu_ras_set_fed(obj->adev, true);
2266         ret = data->cb(obj->adev, &err_data, entry);
2267         /* ue will trigger an interrupt, and in that case
2268          * we need do a reset to recovery the whole system.
2269          * But leave IP do that recovery, here we just dispatch
2270          * the error.
2271          */
2272         if (ret == AMDGPU_RAS_SUCCESS) {
2273                 /* these counts could be left as 0 if
2274                  * some blocks do not count error number
2275                  */
2276                 obj->err_data.ue_count += err_data.ue_count;
2277                 obj->err_data.ce_count += err_data.ce_count;
2278                 obj->err_data.de_count += err_data.de_count;
2279         }
2280
2281         amdgpu_ras_error_data_fini(&err_data);
2282 }
2283
2284 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2285 {
2286         struct ras_ih_data *data = &obj->ih_data;
2287         struct amdgpu_iv_entry entry;
2288
2289         while (data->rptr != data->wptr) {
2290                 rmb();
2291                 memcpy(&entry, &data->ring[data->rptr],
2292                                 data->element_size);
2293
2294                 wmb();
2295                 data->rptr = (data->aligned_element_size +
2296                                 data->rptr) % data->ring_size;
2297
2298                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2299                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2300                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2301                         else
2302                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2303                 } else {
2304                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2305                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2306                         else
2307                                 dev_warn(obj->adev->dev,
2308                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
2309                 }
2310         }
2311 }
2312
2313 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2314 {
2315         struct ras_ih_data *data =
2316                 container_of(work, struct ras_ih_data, ih_work);
2317         struct ras_manager *obj =
2318                 container_of(data, struct ras_manager, ih_data);
2319
2320         amdgpu_ras_interrupt_handler(obj);
2321 }
2322
2323 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2324                 struct ras_dispatch_if *info)
2325 {
2326         struct ras_manager *obj;
2327         struct ras_ih_data *data;
2328
2329         obj = amdgpu_ras_find_obj(adev, &info->head);
2330         if (!obj)
2331                 return -EINVAL;
2332
2333         data = &obj->ih_data;
2334
2335         if (data->inuse == 0)
2336                 return 0;
2337
2338         /* Might be overflow... */
2339         memcpy(&data->ring[data->wptr], info->entry,
2340                         data->element_size);
2341
2342         wmb();
2343         data->wptr = (data->aligned_element_size +
2344                         data->wptr) % data->ring_size;
2345
2346         schedule_work(&data->ih_work);
2347
2348         return 0;
2349 }
2350
2351 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2352                 struct ras_common_if *head)
2353 {
2354         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2355         struct ras_ih_data *data;
2356
2357         if (!obj)
2358                 return -EINVAL;
2359
2360         data = &obj->ih_data;
2361         if (data->inuse == 0)
2362                 return 0;
2363
2364         cancel_work_sync(&data->ih_work);
2365
2366         kfree(data->ring);
2367         memset(data, 0, sizeof(*data));
2368         put_obj(obj);
2369
2370         return 0;
2371 }
2372
2373 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2374                 struct ras_common_if *head)
2375 {
2376         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2377         struct ras_ih_data *data;
2378         struct amdgpu_ras_block_object *ras_obj;
2379
2380         if (!obj) {
2381                 /* in case we registe the IH before enable ras feature */
2382                 obj = amdgpu_ras_create_obj(adev, head);
2383                 if (!obj)
2384                         return -EINVAL;
2385         } else
2386                 get_obj(obj);
2387
2388         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2389
2390         data = &obj->ih_data;
2391         /* add the callback.etc */
2392         *data = (struct ras_ih_data) {
2393                 .inuse = 0,
2394                 .cb = ras_obj->ras_cb,
2395                 .element_size = sizeof(struct amdgpu_iv_entry),
2396                 .rptr = 0,
2397                 .wptr = 0,
2398         };
2399
2400         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2401
2402         data->aligned_element_size = ALIGN(data->element_size, 8);
2403         /* the ring can store 64 iv entries. */
2404         data->ring_size = 64 * data->aligned_element_size;
2405         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2406         if (!data->ring) {
2407                 put_obj(obj);
2408                 return -ENOMEM;
2409         }
2410
2411         /* IH is ready */
2412         data->inuse = 1;
2413
2414         return 0;
2415 }
2416
2417 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2418 {
2419         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2420         struct ras_manager *obj, *tmp;
2421
2422         list_for_each_entry_safe(obj, tmp, &con->head, node) {
2423                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2424         }
2425
2426         return 0;
2427 }
2428 /* ih end */
2429
2430 /* traversal all IPs except NBIO to query error counter */
2431 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2432 {
2433         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2434         struct ras_manager *obj;
2435
2436         if (!adev->ras_enabled || !con)
2437                 return;
2438
2439         list_for_each_entry(obj, &con->head, node) {
2440                 struct ras_query_if info = {
2441                         .head = obj->head,
2442                 };
2443
2444                 /*
2445                  * PCIE_BIF IP has one different isr by ras controller
2446                  * interrupt, the specific ras counter query will be
2447                  * done in that isr. So skip such block from common
2448                  * sync flood interrupt isr calling.
2449                  */
2450                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2451                         continue;
2452
2453                 /*
2454                  * this is a workaround for aldebaran, skip send msg to
2455                  * smu to get ecc_info table due to smu handle get ecc
2456                  * info table failed temporarily.
2457                  * should be removed until smu fix handle ecc_info table.
2458                  */
2459                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2460                     (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2461                      IP_VERSION(13, 0, 2)))
2462                         continue;
2463
2464                 amdgpu_ras_query_error_status_with_event(adev, &info, type);
2465
2466                 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2467                             IP_VERSION(11, 0, 2) &&
2468                     amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2469                             IP_VERSION(11, 0, 4) &&
2470                     amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2471                             IP_VERSION(13, 0, 0)) {
2472                         if (amdgpu_ras_reset_error_status(adev, info.head.block))
2473                                 dev_warn(adev->dev, "Failed to reset error counter and error status");
2474                 }
2475         }
2476 }
2477
2478 /* Parse RdRspStatus and WrRspStatus */
2479 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2480                                           struct ras_query_if *info)
2481 {
2482         struct amdgpu_ras_block_object *block_obj;
2483         /*
2484          * Only two block need to query read/write
2485          * RspStatus at current state
2486          */
2487         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2488                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2489                 return;
2490
2491         block_obj = amdgpu_ras_get_ras_block(adev,
2492                                         info->head.block,
2493                                         info->head.sub_block_index);
2494
2495         if (!block_obj || !block_obj->hw_ops) {
2496                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2497                              get_ras_block_str(&info->head));
2498                 return;
2499         }
2500
2501         if (block_obj->hw_ops->query_ras_error_status)
2502                 block_obj->hw_ops->query_ras_error_status(adev);
2503
2504 }
2505
2506 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2507 {
2508         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2509         struct ras_manager *obj;
2510
2511         if (!adev->ras_enabled || !con)
2512                 return;
2513
2514         list_for_each_entry(obj, &con->head, node) {
2515                 struct ras_query_if info = {
2516                         .head = obj->head,
2517                 };
2518
2519                 amdgpu_ras_error_status_query(adev, &info);
2520         }
2521 }
2522
2523 /* recovery begin */
2524
2525 /* return 0 on success.
2526  * caller need free bps.
2527  */
2528 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2529                 struct ras_badpage **bps, unsigned int *count)
2530 {
2531         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2532         struct ras_err_handler_data *data;
2533         int i = 0;
2534         int ret = 0, status;
2535
2536         if (!con || !con->eh_data || !bps || !count)
2537                 return -EINVAL;
2538
2539         mutex_lock(&con->recovery_lock);
2540         data = con->eh_data;
2541         if (!data || data->count == 0) {
2542                 *bps = NULL;
2543                 ret = -EINVAL;
2544                 goto out;
2545         }
2546
2547         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2548         if (!*bps) {
2549                 ret = -ENOMEM;
2550                 goto out;
2551         }
2552
2553         for (; i < data->count; i++) {
2554                 (*bps)[i] = (struct ras_badpage){
2555                         .bp = data->bps[i].retired_page,
2556                         .size = AMDGPU_GPU_PAGE_SIZE,
2557                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2558                 };
2559                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2560                                 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2561                 if (status == -EBUSY)
2562                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2563                 else if (status == -ENOENT)
2564                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2565         }
2566
2567         *count = data->count;
2568 out:
2569         mutex_unlock(&con->recovery_lock);
2570         return ret;
2571 }
2572
2573 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2574                                    struct amdgpu_hive_info *hive, bool status)
2575 {
2576         struct amdgpu_device *tmp_adev;
2577
2578         if (hive) {
2579                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2580                         amdgpu_ras_set_fed(tmp_adev, status);
2581         } else {
2582                 amdgpu_ras_set_fed(adev, status);
2583         }
2584 }
2585
2586 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2587 {
2588         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2589         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2590         int hive_ras_recovery = 0;
2591
2592         if (hive) {
2593                 hive_ras_recovery = atomic_read(&hive->ras_recovery);
2594                 amdgpu_put_xgmi_hive(hive);
2595         }
2596
2597         if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2598                 return true;
2599
2600         return false;
2601 }
2602
2603 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2604 {
2605         if (amdgpu_ras_intr_triggered())
2606                 return RAS_EVENT_TYPE_FATAL;
2607         else
2608                 return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2609 }
2610
2611 static void amdgpu_ras_do_recovery(struct work_struct *work)
2612 {
2613         struct amdgpu_ras *ras =
2614                 container_of(work, struct amdgpu_ras, recovery_work);
2615         struct amdgpu_device *remote_adev = NULL;
2616         struct amdgpu_device *adev = ras->adev;
2617         struct list_head device_list, *device_list_handle =  NULL;
2618         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2619         enum ras_event_type type;
2620
2621         if (hive) {
2622                 atomic_set(&hive->ras_recovery, 1);
2623
2624                 /* If any device which is part of the hive received RAS fatal
2625                  * error interrupt, set fatal error status on all. This
2626                  * condition will need a recovery, and flag will be cleared
2627                  * as part of recovery.
2628                  */
2629                 list_for_each_entry(remote_adev, &hive->device_list,
2630                                     gmc.xgmi.head)
2631                         if (amdgpu_ras_get_fed_status(remote_adev)) {
2632                                 amdgpu_ras_set_fed_all(adev, hive, true);
2633                                 break;
2634                         }
2635         }
2636         if (!ras->disable_ras_err_cnt_harvest) {
2637
2638                 /* Build list of devices to query RAS related errors */
2639                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2640                         device_list_handle = &hive->device_list;
2641                 } else {
2642                         INIT_LIST_HEAD(&device_list);
2643                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
2644                         device_list_handle = &device_list;
2645                 }
2646
2647                 type = amdgpu_ras_get_fatal_error_event(adev);
2648                 list_for_each_entry(remote_adev,
2649                                 device_list_handle, gmc.xgmi.head) {
2650                         amdgpu_ras_query_err_status(remote_adev);
2651                         amdgpu_ras_log_on_err_counter(remote_adev, type);
2652                 }
2653
2654         }
2655
2656         if (amdgpu_device_should_recover_gpu(ras->adev)) {
2657                 struct amdgpu_reset_context reset_context;
2658                 memset(&reset_context, 0, sizeof(reset_context));
2659
2660                 reset_context.method = AMD_RESET_METHOD_NONE;
2661                 reset_context.reset_req_dev = adev;
2662                 reset_context.src = AMDGPU_RESET_SRC_RAS;
2663                 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2664
2665                 /* Perform full reset in fatal error mode */
2666                 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2667                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2668                 else {
2669                         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2670
2671                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2672                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2673                                 reset_context.method = AMD_RESET_METHOD_MODE2;
2674                         }
2675
2676                         /* Fatal error occurs in poison mode, mode1 reset is used to
2677                          * recover gpu.
2678                          */
2679                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2680                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2681                                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2682
2683                                 psp_fatal_error_recovery_quirk(&adev->psp);
2684                         }
2685                 }
2686
2687                 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2688         }
2689         atomic_set(&ras->in_recovery, 0);
2690         if (hive) {
2691                 atomic_set(&hive->ras_recovery, 0);
2692                 amdgpu_put_xgmi_hive(hive);
2693         }
2694 }
2695
2696 /* alloc/realloc bps array */
2697 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2698                 struct ras_err_handler_data *data, int pages)
2699 {
2700         unsigned int old_space = data->count + data->space_left;
2701         unsigned int new_space = old_space + pages;
2702         unsigned int align_space = ALIGN(new_space, 512);
2703         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2704
2705         if (!bps) {
2706                 return -ENOMEM;
2707         }
2708
2709         if (data->bps) {
2710                 memcpy(bps, data->bps,
2711                                 data->count * sizeof(*data->bps));
2712                 kfree(data->bps);
2713         }
2714
2715         data->bps = bps;
2716         data->space_left += align_space - old_space;
2717         return 0;
2718 }
2719
2720 /* it deal with vram only. */
2721 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2722                 struct eeprom_table_record *bps, int pages)
2723 {
2724         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2725         struct ras_err_handler_data *data;
2726         int ret = 0;
2727         uint32_t i;
2728
2729         if (!con || !con->eh_data || !bps || pages <= 0)
2730                 return 0;
2731
2732         mutex_lock(&con->recovery_lock);
2733         data = con->eh_data;
2734         if (!data)
2735                 goto out;
2736
2737         for (i = 0; i < pages; i++) {
2738                 if (amdgpu_ras_check_bad_page_unlock(con,
2739                         bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2740                         continue;
2741
2742                 if (!data->space_left &&
2743                         amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2744                         ret = -ENOMEM;
2745                         goto out;
2746                 }
2747
2748                 amdgpu_ras_reserve_page(adev, bps[i].retired_page);
2749
2750                 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2751                 data->count++;
2752                 data->space_left--;
2753         }
2754 out:
2755         mutex_unlock(&con->recovery_lock);
2756
2757         return ret;
2758 }
2759
2760 /*
2761  * write error record array to eeprom, the function should be
2762  * protected by recovery_lock
2763  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2764  */
2765 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2766                 unsigned long *new_cnt)
2767 {
2768         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2769         struct ras_err_handler_data *data;
2770         struct amdgpu_ras_eeprom_control *control;
2771         int save_count;
2772
2773         if (!con || !con->eh_data) {
2774                 if (new_cnt)
2775                         *new_cnt = 0;
2776
2777                 return 0;
2778         }
2779
2780         mutex_lock(&con->recovery_lock);
2781         control = &con->eeprom_control;
2782         data = con->eh_data;
2783         save_count = data->count - control->ras_num_recs;
2784         mutex_unlock(&con->recovery_lock);
2785
2786         if (new_cnt)
2787                 *new_cnt = save_count / adev->umc.retire_unit;
2788
2789         /* only new entries are saved */
2790         if (save_count > 0) {
2791                 if (amdgpu_ras_eeprom_append(control,
2792                                              &data->bps[control->ras_num_recs],
2793                                              save_count)) {
2794                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2795                         return -EIO;
2796                 }
2797
2798                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2799         }
2800
2801         return 0;
2802 }
2803
2804 /*
2805  * read error record array in eeprom and reserve enough space for
2806  * storing new bad pages
2807  */
2808 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2809 {
2810         struct amdgpu_ras_eeprom_control *control =
2811                 &adev->psp.ras_context.ras->eeprom_control;
2812         struct eeprom_table_record *bps;
2813         int ret;
2814
2815         /* no bad page record, skip eeprom access */
2816         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2817                 return 0;
2818
2819         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2820         if (!bps)
2821                 return -ENOMEM;
2822
2823         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2824         if (ret)
2825                 dev_err(adev->dev, "Failed to load EEPROM table records!");
2826         else
2827                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2828
2829         kfree(bps);
2830         return ret;
2831 }
2832
2833 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2834                                 uint64_t addr)
2835 {
2836         struct ras_err_handler_data *data = con->eh_data;
2837         int i;
2838
2839         addr >>= AMDGPU_GPU_PAGE_SHIFT;
2840         for (i = 0; i < data->count; i++)
2841                 if (addr == data->bps[i].retired_page)
2842                         return true;
2843
2844         return false;
2845 }
2846
2847 /*
2848  * check if an address belongs to bad page
2849  *
2850  * Note: this check is only for umc block
2851  */
2852 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2853                                 uint64_t addr)
2854 {
2855         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2856         bool ret = false;
2857
2858         if (!con || !con->eh_data)
2859                 return ret;
2860
2861         mutex_lock(&con->recovery_lock);
2862         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2863         mutex_unlock(&con->recovery_lock);
2864         return ret;
2865 }
2866
2867 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2868                                           uint32_t max_count)
2869 {
2870         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2871
2872         /*
2873          * Justification of value bad_page_cnt_threshold in ras structure
2874          *
2875          * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2876          * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2877          * scenarios accordingly.
2878          *
2879          * Bad page retirement enablement:
2880          *    - If amdgpu_bad_page_threshold = -2,
2881          *      bad_page_cnt_threshold = typical value by formula.
2882          *
2883          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
2884          *      max record length in eeprom, use it directly.
2885          *
2886          * Bad page retirement disablement:
2887          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
2888          *      functionality is disabled, and bad_page_cnt_threshold will
2889          *      take no effect.
2890          */
2891
2892         if (amdgpu_bad_page_threshold < 0) {
2893                 u64 val = adev->gmc.mc_vram_size;
2894
2895                 do_div(val, RAS_BAD_PAGE_COVER);
2896                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2897                                                   max_count);
2898         } else {
2899                 con->bad_page_cnt_threshold = min_t(int, max_count,
2900                                                     amdgpu_bad_page_threshold);
2901         }
2902 }
2903
2904 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
2905                 enum amdgpu_ras_block block, uint16_t pasid,
2906                 pasid_notify pasid_fn, void *data, uint32_t reset)
2907 {
2908         int ret = 0;
2909         struct ras_poison_msg poison_msg;
2910         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2911
2912         memset(&poison_msg, 0, sizeof(poison_msg));
2913         poison_msg.block = block;
2914         poison_msg.pasid = pasid;
2915         poison_msg.reset = reset;
2916         poison_msg.pasid_fn = pasid_fn;
2917         poison_msg.data = data;
2918
2919         ret = kfifo_put(&con->poison_fifo, poison_msg);
2920         if (!ret) {
2921                 dev_err(adev->dev, "Poison message fifo is full!\n");
2922                 return -ENOSPC;
2923         }
2924
2925         return 0;
2926 }
2927
2928 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
2929                 struct ras_poison_msg *poison_msg)
2930 {
2931         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2932
2933         return kfifo_get(&con->poison_fifo, poison_msg);
2934 }
2935
2936 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
2937 {
2938         mutex_init(&ecc_log->lock);
2939
2940         INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
2941         ecc_log->de_queried_count = 0;
2942         ecc_log->prev_de_queried_count = 0;
2943 }
2944
2945 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
2946 {
2947         struct radix_tree_iter iter;
2948         void __rcu **slot;
2949         struct ras_ecc_err *ecc_err;
2950
2951         mutex_lock(&ecc_log->lock);
2952         radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
2953                 ecc_err = radix_tree_deref_slot(slot);
2954                 kfree(ecc_err->err_pages.pfn);
2955                 kfree(ecc_err);
2956                 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
2957         }
2958         mutex_unlock(&ecc_log->lock);
2959
2960         mutex_destroy(&ecc_log->lock);
2961         ecc_log->de_queried_count = 0;
2962         ecc_log->prev_de_queried_count = 0;
2963 }
2964
2965 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
2966                                 uint32_t delayed_ms)
2967 {
2968         int ret;
2969
2970         mutex_lock(&con->umc_ecc_log.lock);
2971         ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
2972                         UMC_ECC_NEW_DETECTED_TAG);
2973         mutex_unlock(&con->umc_ecc_log.lock);
2974
2975         if (ret)
2976                 schedule_delayed_work(&con->page_retirement_dwork,
2977                         msecs_to_jiffies(delayed_ms));
2978
2979         return ret ? true : false;
2980 }
2981
2982 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
2983 {
2984         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2985                                               page_retirement_dwork.work);
2986         struct amdgpu_device *adev = con->adev;
2987         struct ras_err_data err_data;
2988         unsigned long err_cnt;
2989
2990         /* If gpu reset is ongoing, delay retiring the bad pages */
2991         if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
2992                 amdgpu_ras_schedule_retirement_dwork(con,
2993                                 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
2994                 return;
2995         }
2996
2997         amdgpu_ras_error_data_init(&err_data);
2998
2999         amdgpu_umc_handle_bad_pages(adev, &err_data);
3000         err_cnt = err_data.err_addr_cnt;
3001
3002         amdgpu_ras_error_data_fini(&err_data);
3003
3004         if (err_cnt && amdgpu_ras_is_rma(adev))
3005                 amdgpu_ras_reset_gpu(adev);
3006
3007         amdgpu_ras_schedule_retirement_dwork(con,
3008                         AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3009 }
3010
3011 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3012                                 uint32_t poison_creation_count)
3013 {
3014         int ret = 0;
3015         struct ras_ecc_log_info *ecc_log;
3016         struct ras_query_if info;
3017         uint32_t timeout = 0;
3018         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3019         uint64_t de_queried_count;
3020         uint32_t new_detect_count, total_detect_count;
3021         uint32_t need_query_count = poison_creation_count;
3022         bool query_data_timeout = false;
3023         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3024
3025         memset(&info, 0, sizeof(info));
3026         info.head.block = AMDGPU_RAS_BLOCK__UMC;
3027
3028         ecc_log = &ras->umc_ecc_log;
3029         total_detect_count = 0;
3030         do {
3031                 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3032                 if (ret)
3033                         return ret;
3034
3035                 de_queried_count = ecc_log->de_queried_count;
3036                 if (de_queried_count > ecc_log->prev_de_queried_count) {
3037                         new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
3038                         ecc_log->prev_de_queried_count = de_queried_count;
3039                         timeout = 0;
3040                 } else {
3041                         new_detect_count = 0;
3042                 }
3043
3044                 if (new_detect_count) {
3045                         total_detect_count += new_detect_count;
3046                 } else {
3047                         if (!timeout && need_query_count)
3048                                 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3049
3050                         if (timeout) {
3051                                 if (!--timeout) {
3052                                         query_data_timeout = true;
3053                                         break;
3054                                 }
3055                                 msleep(1);
3056                         }
3057                 }
3058         } while (total_detect_count < need_query_count);
3059
3060         if (query_data_timeout) {
3061                 dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3062                         (need_query_count - total_detect_count));
3063                 return -ENOENT;
3064         }
3065
3066         if (total_detect_count)
3067                 schedule_delayed_work(&ras->page_retirement_dwork, 0);
3068
3069         return 0;
3070 }
3071
3072 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3073 {
3074         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3075         struct ras_poison_msg msg;
3076         int ret;
3077
3078         do {
3079                 ret = kfifo_get(&con->poison_fifo, &msg);
3080         } while (ret);
3081 }
3082
3083 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3084                         uint32_t msg_count, uint32_t *gpu_reset)
3085 {
3086         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3087         uint32_t reset_flags = 0, reset = 0;
3088         struct ras_poison_msg msg;
3089         int ret, i;
3090
3091         kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3092
3093         for (i = 0; i < msg_count; i++) {
3094                 ret = amdgpu_ras_get_poison_req(adev, &msg);
3095                 if (!ret)
3096                         continue;
3097
3098                 if (msg.pasid_fn)
3099                         msg.pasid_fn(adev, msg.pasid, msg.data);
3100
3101                 reset_flags |= msg.reset;
3102         }
3103
3104         /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3105         if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3106                 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3107                         reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3108                 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3109                         reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3110                 else
3111                         reset = reset_flags;
3112
3113                 flush_delayed_work(&con->page_retirement_dwork);
3114
3115                 con->gpu_reset_flags |= reset;
3116                 amdgpu_ras_reset_gpu(adev);
3117
3118                 *gpu_reset = reset;
3119
3120                 /* Wait for gpu recovery to complete */
3121                 flush_work(&con->recovery_work);
3122         }
3123
3124         return 0;
3125 }
3126
3127 static int amdgpu_ras_page_retirement_thread(void *param)
3128 {
3129         struct amdgpu_device *adev = (struct amdgpu_device *)param;
3130         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3131         uint32_t poison_creation_count, msg_count;
3132         uint32_t gpu_reset;
3133         int ret;
3134
3135         while (!kthread_should_stop()) {
3136
3137                 wait_event_interruptible(con->page_retirement_wq,
3138                                 kthread_should_stop() ||
3139                                 atomic_read(&con->page_retirement_req_cnt));
3140
3141                 if (kthread_should_stop())
3142                         break;
3143
3144                 gpu_reset = 0;
3145
3146                 do {
3147                         poison_creation_count = atomic_read(&con->poison_creation_count);
3148                         ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3149                         if (ret == -EIO)
3150                                 break;
3151
3152                         if (poison_creation_count) {
3153                                 atomic_sub(poison_creation_count, &con->poison_creation_count);
3154                                 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3155                         }
3156                 } while (atomic_read(&con->poison_creation_count));
3157
3158                 if (ret != -EIO) {
3159                         msg_count = kfifo_len(&con->poison_fifo);
3160                         if (msg_count) {
3161                                 ret = amdgpu_ras_poison_consumption_handler(adev,
3162                                                 msg_count, &gpu_reset);
3163                                 if ((ret != -EIO) &&
3164                                     (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3165                                         atomic_sub(msg_count, &con->page_retirement_req_cnt);
3166                         }
3167                 }
3168
3169                 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3170                         /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3171                         /* Clear poison creation request */
3172                         atomic_set(&con->poison_creation_count, 0);
3173
3174                         /* Clear poison fifo */
3175                         amdgpu_ras_clear_poison_fifo(adev);
3176
3177                         /* Clear all poison requests */
3178                         atomic_set(&con->page_retirement_req_cnt, 0);
3179
3180                         if (ret == -EIO) {
3181                                 /* Wait for mode-1 reset to complete */
3182                                 down_read(&adev->reset_domain->sem);
3183                                 up_read(&adev->reset_domain->sem);
3184                         }
3185
3186                         /* Wake up work to save bad pages to eeprom */
3187                         schedule_delayed_work(&con->page_retirement_dwork, 0);
3188                 } else if (gpu_reset) {
3189                         /* gpu just completed mode-2 reset or other reset */
3190                         /* Clear poison consumption messages cached in fifo */
3191                         msg_count = kfifo_len(&con->poison_fifo);
3192                         if (msg_count) {
3193                                 amdgpu_ras_clear_poison_fifo(adev);
3194                                 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3195                         }
3196
3197                         /* Wake up work to save bad pages to eeprom */
3198                         schedule_delayed_work(&con->page_retirement_dwork, 0);
3199                 }
3200         }
3201
3202         return 0;
3203 }
3204
3205 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3206 {
3207         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3208         int ret;
3209
3210         if (!con || amdgpu_sriov_vf(adev))
3211                 return 0;
3212
3213         ret = amdgpu_ras_eeprom_init(&con->eeprom_control);
3214
3215         if (ret)
3216                 return ret;
3217
3218         /* HW not usable */
3219         if (amdgpu_ras_is_rma(adev))
3220                 return -EHWPOISON;
3221
3222         if (con->eeprom_control.ras_num_recs) {
3223                 ret = amdgpu_ras_load_bad_pages(adev);
3224                 if (ret)
3225                         return ret;
3226
3227                 amdgpu_dpm_send_hbm_bad_pages_num(
3228                         adev, con->eeprom_control.ras_num_recs);
3229
3230                 if (con->update_channel_flag == true) {
3231                         amdgpu_dpm_send_hbm_bad_channel_flag(
3232                                 adev, con->eeprom_control.bad_channel_bitmap);
3233                         con->update_channel_flag = false;
3234                 }
3235         }
3236
3237         return ret;
3238 }
3239
3240 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3241 {
3242         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3243         struct ras_err_handler_data **data;
3244         u32  max_eeprom_records_count = 0;
3245         int ret;
3246
3247         if (!con || amdgpu_sriov_vf(adev))
3248                 return 0;
3249
3250         /* Allow access to RAS EEPROM via debugfs, when the ASIC
3251          * supports RAS and debugfs is enabled, but when
3252          * adev->ras_enabled is unset, i.e. when "ras_enable"
3253          * module parameter is set to 0.
3254          */
3255         con->adev = adev;
3256
3257         if (!adev->ras_enabled)
3258                 return 0;
3259
3260         data = &con->eh_data;
3261         *data = kzalloc(sizeof(**data), GFP_KERNEL);
3262         if (!*data) {
3263                 ret = -ENOMEM;
3264                 goto out;
3265         }
3266
3267         mutex_init(&con->recovery_lock);
3268         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3269         atomic_set(&con->in_recovery, 0);
3270         con->eeprom_control.bad_channel_bitmap = 0;
3271
3272         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3273         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3274
3275         if (init_bp_info) {
3276                 ret = amdgpu_ras_init_badpage_info(adev);
3277                 if (ret)
3278                         goto free;
3279         }
3280
3281         mutex_init(&con->page_rsv_lock);
3282         INIT_KFIFO(con->poison_fifo);
3283         mutex_init(&con->page_retirement_lock);
3284         init_waitqueue_head(&con->page_retirement_wq);
3285         atomic_set(&con->page_retirement_req_cnt, 0);
3286         atomic_set(&con->poison_creation_count, 0);
3287         con->page_retirement_thread =
3288                 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3289         if (IS_ERR(con->page_retirement_thread)) {
3290                 con->page_retirement_thread = NULL;
3291                 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3292         }
3293
3294         INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3295         amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3296 #ifdef CONFIG_X86_MCE_AMD
3297         if ((adev->asic_type == CHIP_ALDEBARAN) &&
3298             (adev->gmc.xgmi.connected_to_cpu))
3299                 amdgpu_register_bad_pages_mca_notifier(adev);
3300 #endif
3301         return 0;
3302
3303 free:
3304         kfree((*data)->bps);
3305         kfree(*data);
3306         con->eh_data = NULL;
3307 out:
3308         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3309
3310         /*
3311          * Except error threshold exceeding case, other failure cases in this
3312          * function would not fail amdgpu driver init.
3313          */
3314         if (!amdgpu_ras_is_rma(adev))
3315                 ret = 0;
3316         else
3317                 ret = -EINVAL;
3318
3319         return ret;
3320 }
3321
3322 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3323 {
3324         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3325         struct ras_err_handler_data *data = con->eh_data;
3326         int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3327         bool ret;
3328
3329         /* recovery_init failed to init it, fini is useless */
3330         if (!data)
3331                 return 0;
3332
3333         /* Save all cached bad pages to eeprom */
3334         do {
3335                 flush_delayed_work(&con->page_retirement_dwork);
3336                 ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3337         } while (ret && max_flush_timeout--);
3338
3339         if (con->page_retirement_thread)
3340                 kthread_stop(con->page_retirement_thread);
3341
3342         atomic_set(&con->page_retirement_req_cnt, 0);
3343         atomic_set(&con->poison_creation_count, 0);
3344
3345         mutex_destroy(&con->page_rsv_lock);
3346
3347         cancel_work_sync(&con->recovery_work);
3348
3349         cancel_delayed_work_sync(&con->page_retirement_dwork);
3350
3351         amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3352
3353         mutex_lock(&con->recovery_lock);
3354         con->eh_data = NULL;
3355         kfree(data->bps);
3356         kfree(data);
3357         mutex_unlock(&con->recovery_lock);
3358
3359         return 0;
3360 }
3361 /* recovery end */
3362
3363 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3364 {
3365         if (amdgpu_sriov_vf(adev)) {
3366                 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3367                 case IP_VERSION(13, 0, 2):
3368                 case IP_VERSION(13, 0, 6):
3369                 case IP_VERSION(13, 0, 14):
3370                         return true;
3371                 default:
3372                         return false;
3373                 }
3374         }
3375
3376         if (adev->asic_type == CHIP_IP_DISCOVERY) {
3377                 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3378                 case IP_VERSION(13, 0, 0):
3379                 case IP_VERSION(13, 0, 6):
3380                 case IP_VERSION(13, 0, 10):
3381                 case IP_VERSION(13, 0, 14):
3382                         return true;
3383                 default:
3384                         return false;
3385                 }
3386         }
3387
3388         return adev->asic_type == CHIP_VEGA10 ||
3389                 adev->asic_type == CHIP_VEGA20 ||
3390                 adev->asic_type == CHIP_ARCTURUS ||
3391                 adev->asic_type == CHIP_ALDEBARAN ||
3392                 adev->asic_type == CHIP_SIENNA_CICHLID;
3393 }
3394
3395 /*
3396  * this is workaround for vega20 workstation sku,
3397  * force enable gfx ras, ignore vbios gfx ras flag
3398  * due to GC EDC can not write
3399  */
3400 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3401 {
3402         struct atom_context *ctx = adev->mode_info.atom_context;
3403
3404         if (!ctx)
3405                 return;
3406
3407         if (strnstr(ctx->vbios_pn, "D16406",
3408                     sizeof(ctx->vbios_pn)) ||
3409                 strnstr(ctx->vbios_pn, "D36002",
3410                         sizeof(ctx->vbios_pn)))
3411                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3412 }
3413
3414 /* Query ras capablity via atomfirmware interface */
3415 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3416 {
3417         /* mem_ecc cap */
3418         if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3419                 dev_info(adev->dev, "MEM ECC is active.\n");
3420                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3421                                          1 << AMDGPU_RAS_BLOCK__DF);
3422         } else {
3423                 dev_info(adev->dev, "MEM ECC is not presented.\n");
3424         }
3425
3426         /* sram_ecc cap */
3427         if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3428                 dev_info(adev->dev, "SRAM ECC is active.\n");
3429                 if (!amdgpu_sriov_vf(adev))
3430                         adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3431                                                   1 << AMDGPU_RAS_BLOCK__DF);
3432                 else
3433                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3434                                                  1 << AMDGPU_RAS_BLOCK__SDMA |
3435                                                  1 << AMDGPU_RAS_BLOCK__GFX);
3436
3437                 /*
3438                  * VCN/JPEG RAS can be supported on both bare metal and
3439                  * SRIOV environment
3440                  */
3441                 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3442                     amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3443                     amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3444                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3445                                                  1 << AMDGPU_RAS_BLOCK__JPEG);
3446                 else
3447                         adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3448                                                   1 << AMDGPU_RAS_BLOCK__JPEG);
3449
3450                 /*
3451                  * XGMI RAS is not supported if xgmi num physical nodes
3452                  * is zero
3453                  */
3454                 if (!adev->gmc.xgmi.num_physical_nodes)
3455                         adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3456         } else {
3457                 dev_info(adev->dev, "SRAM ECC is not presented.\n");
3458         }
3459 }
3460
3461 /* Query poison mode from umc/df IP callbacks */
3462 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3463 {
3464         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3465         bool df_poison, umc_poison;
3466
3467         /* poison setting is useless on SRIOV guest */
3468         if (amdgpu_sriov_vf(adev) || !con)
3469                 return;
3470
3471         /* Init poison supported flag, the default value is false */
3472         if (adev->gmc.xgmi.connected_to_cpu ||
3473             adev->gmc.is_app_apu) {
3474                 /* enabled by default when GPU is connected to CPU */
3475                 con->poison_supported = true;
3476         } else if (adev->df.funcs &&
3477             adev->df.funcs->query_ras_poison_mode &&
3478             adev->umc.ras &&
3479             adev->umc.ras->query_ras_poison_mode) {
3480                 df_poison =
3481                         adev->df.funcs->query_ras_poison_mode(adev);
3482                 umc_poison =
3483                         adev->umc.ras->query_ras_poison_mode(adev);
3484
3485                 /* Only poison is set in both DF and UMC, we can support it */
3486                 if (df_poison && umc_poison)
3487                         con->poison_supported = true;
3488                 else if (df_poison != umc_poison)
3489                         dev_warn(adev->dev,
3490                                 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3491                                 df_poison, umc_poison);
3492         }
3493 }
3494
3495 /*
3496  * check hardware's ras ability which will be saved in hw_supported.
3497  * if hardware does not support ras, we can skip some ras initializtion and
3498  * forbid some ras operations from IP.
3499  * if software itself, say boot parameter, limit the ras ability. We still
3500  * need allow IP do some limited operations, like disable. In such case,
3501  * we have to initialize ras as normal. but need check if operation is
3502  * allowed or not in each function.
3503  */
3504 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3505 {
3506         adev->ras_hw_enabled = adev->ras_enabled = 0;
3507
3508         if (!amdgpu_ras_asic_supported(adev))
3509                 return;
3510
3511         if (amdgpu_sriov_vf(adev)) {
3512                 if (amdgpu_virt_get_ras_capability(adev))
3513                         goto init_ras_enabled_flag;
3514         }
3515
3516         /* query ras capability from psp */
3517         if (amdgpu_psp_get_ras_capability(&adev->psp))
3518                 goto init_ras_enabled_flag;
3519
3520         /* query ras capablity from bios */
3521         if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3522                 amdgpu_ras_query_ras_capablity_from_vbios(adev);
3523         } else {
3524                 /* driver only manages a few IP blocks RAS feature
3525                  * when GPU is connected cpu through XGMI */
3526                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3527                                            1 << AMDGPU_RAS_BLOCK__SDMA |
3528                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
3529         }
3530
3531         /* apply asic specific settings (vega20 only for now) */
3532         amdgpu_ras_get_quirks(adev);
3533
3534         /* query poison mode from umc/df ip callback */
3535         amdgpu_ras_query_poison_mode(adev);
3536
3537 init_ras_enabled_flag:
3538         /* hw_supported needs to be aligned with RAS block mask. */
3539         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3540
3541         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3542                 adev->ras_hw_enabled & amdgpu_ras_mask;
3543
3544         /* aca is disabled by default */
3545         adev->aca.is_enabled = false;
3546
3547         /* bad page feature is not applicable to specific app platform */
3548         if (adev->gmc.is_app_apu &&
3549             amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3550                 amdgpu_bad_page_threshold = 0;
3551 }
3552
3553 static void amdgpu_ras_counte_dw(struct work_struct *work)
3554 {
3555         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3556                                               ras_counte_delay_work.work);
3557         struct amdgpu_device *adev = con->adev;
3558         struct drm_device *dev = adev_to_drm(adev);
3559         unsigned long ce_count, ue_count;
3560         int res;
3561
3562         res = pm_runtime_get_sync(dev->dev);
3563         if (res < 0)
3564                 goto Out;
3565
3566         /* Cache new values.
3567          */
3568         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3569                 atomic_set(&con->ras_ce_count, ce_count);
3570                 atomic_set(&con->ras_ue_count, ue_count);
3571         }
3572
3573         pm_runtime_mark_last_busy(dev->dev);
3574 Out:
3575         pm_runtime_put_autosuspend(dev->dev);
3576 }
3577
3578 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3579 {
3580         return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3581                         AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3582                         AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3583                         AMDGPU_RAS_ERROR__PARITY;
3584 }
3585
3586 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3587 {
3588         struct ras_event_state *event_state;
3589         int i;
3590
3591         memset(mgr, 0, sizeof(*mgr));
3592         atomic64_set(&mgr->seqno, 0);
3593
3594         for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3595                 event_state = &mgr->event_state[i];
3596                 event_state->last_seqno = RAS_EVENT_INVALID_ID;
3597                 atomic64_set(&event_state->count, 0);
3598         }
3599 }
3600
3601 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3602 {
3603         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3604         struct amdgpu_hive_info *hive;
3605
3606         if (!ras)
3607                 return;
3608
3609         hive = amdgpu_get_xgmi_hive(adev);
3610         ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3611
3612         /* init event manager with node 0 on xgmi system */
3613         if (!amdgpu_reset_in_recovery(adev)) {
3614                 if (!hive || adev->gmc.xgmi.node_id == 0)
3615                         ras_event_mgr_init(ras->event_mgr);
3616         }
3617
3618         if (hive)
3619                 amdgpu_put_xgmi_hive(hive);
3620 }
3621
3622 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3623 {
3624         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3625
3626         if (!con || (adev->flags & AMD_IS_APU))
3627                 return;
3628
3629         switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3630         case IP_VERSION(13, 0, 2):
3631         case IP_VERSION(13, 0, 6):
3632         case IP_VERSION(13, 0, 14):
3633                 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3634                 break;
3635         default:
3636                 break;
3637         }
3638 }
3639
3640 int amdgpu_ras_init(struct amdgpu_device *adev)
3641 {
3642         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3643         int r;
3644
3645         if (con)
3646                 return 0;
3647
3648         con = kzalloc(sizeof(*con) +
3649                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3650                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3651                         GFP_KERNEL);
3652         if (!con)
3653                 return -ENOMEM;
3654
3655         con->adev = adev;
3656         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3657         atomic_set(&con->ras_ce_count, 0);
3658         atomic_set(&con->ras_ue_count, 0);
3659
3660         con->objs = (struct ras_manager *)(con + 1);
3661
3662         amdgpu_ras_set_context(adev, con);
3663
3664         amdgpu_ras_check_supported(adev);
3665
3666         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3667                 /* set gfx block ras context feature for VEGA20 Gaming
3668                  * send ras disable cmd to ras ta during ras late init.
3669                  */
3670                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3671                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3672
3673                         return 0;
3674                 }
3675
3676                 r = 0;
3677                 goto release_con;
3678         }
3679
3680         con->update_channel_flag = false;
3681         con->features = 0;
3682         con->schema = 0;
3683         INIT_LIST_HEAD(&con->head);
3684         /* Might need get this flag from vbios. */
3685         con->flags = RAS_DEFAULT_FLAGS;
3686
3687         /* initialize nbio ras function ahead of any other
3688          * ras functions so hardware fatal error interrupt
3689          * can be enabled as early as possible */
3690         switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3691         case IP_VERSION(7, 4, 0):
3692         case IP_VERSION(7, 4, 1):
3693         case IP_VERSION(7, 4, 4):
3694                 if (!adev->gmc.xgmi.connected_to_cpu)
3695                         adev->nbio.ras = &nbio_v7_4_ras;
3696                 break;
3697         case IP_VERSION(4, 3, 0):
3698                 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3699                         /* unlike other generation of nbio ras,
3700                          * nbio v4_3 only support fatal error interrupt
3701                          * to inform software that DF is freezed due to
3702                          * system fatal error event. driver should not
3703                          * enable nbio ras in such case. Instead,
3704                          * check DF RAS */
3705                         adev->nbio.ras = &nbio_v4_3_ras;
3706                 break;
3707         case IP_VERSION(7, 9, 0):
3708                 if (!adev->gmc.is_app_apu)
3709                         adev->nbio.ras = &nbio_v7_9_ras;
3710                 break;
3711         default:
3712                 /* nbio ras is not available */
3713                 break;
3714         }
3715
3716         /* nbio ras block needs to be enabled ahead of other ras blocks
3717          * to handle fatal error */
3718         r = amdgpu_nbio_ras_sw_init(adev);
3719         if (r)
3720                 return r;
3721
3722         if (adev->nbio.ras &&
3723             adev->nbio.ras->init_ras_controller_interrupt) {
3724                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3725                 if (r)
3726                         goto release_con;
3727         }
3728
3729         if (adev->nbio.ras &&
3730             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3731                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3732                 if (r)
3733                         goto release_con;
3734         }
3735
3736         /* Packed socket_id to ras feature mask bits[31:29] */
3737         if (adev->smuio.funcs &&
3738             adev->smuio.funcs->get_socket_id)
3739                 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3740                                         AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3741
3742         /* Get RAS schema for particular SOC */
3743         con->schema = amdgpu_get_ras_schema(adev);
3744
3745         amdgpu_ras_init_reserved_vram_size(adev);
3746
3747         if (amdgpu_ras_fs_init(adev)) {
3748                 r = -EINVAL;
3749                 goto release_con;
3750         }
3751
3752         if (amdgpu_ras_aca_is_supported(adev)) {
3753                 if (amdgpu_aca_is_enabled(adev))
3754                         r = amdgpu_aca_init(adev);
3755                 else
3756                         r = amdgpu_mca_init(adev);
3757                 if (r)
3758                         goto release_con;
3759         }
3760
3761         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3762                  "hardware ability[%x] ras_mask[%x]\n",
3763                  adev->ras_hw_enabled, adev->ras_enabled);
3764
3765         return 0;
3766 release_con:
3767         amdgpu_ras_set_context(adev, NULL);
3768         kfree(con);
3769
3770         return r;
3771 }
3772
3773 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3774 {
3775         if (adev->gmc.xgmi.connected_to_cpu ||
3776             adev->gmc.is_app_apu)
3777                 return 1;
3778         return 0;
3779 }
3780
3781 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3782                                         struct ras_common_if *ras_block)
3783 {
3784         struct ras_query_if info = {
3785                 .head = *ras_block,
3786         };
3787
3788         if (!amdgpu_persistent_edc_harvesting_supported(adev))
3789                 return 0;
3790
3791         if (amdgpu_ras_query_error_status(adev, &info) != 0)
3792                 DRM_WARN("RAS init harvest failure");
3793
3794         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3795                 DRM_WARN("RAS init harvest reset failure");
3796
3797         return 0;
3798 }
3799
3800 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
3801 {
3802        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3803
3804        if (!con)
3805                return false;
3806
3807        return con->poison_supported;
3808 }
3809
3810 /* helper function to handle common stuff in ip late init phase */
3811 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
3812                          struct ras_common_if *ras_block)
3813 {
3814         struct amdgpu_ras_block_object *ras_obj = NULL;
3815         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3816         struct ras_query_if *query_info;
3817         unsigned long ue_count, ce_count;
3818         int r;
3819
3820         /* disable RAS feature per IP block if it is not supported */
3821         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
3822                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
3823                 return 0;
3824         }
3825
3826         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
3827         if (r) {
3828                 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
3829                         /* in resume phase, if fail to enable ras,
3830                          * clean up all ras fs nodes, and disable ras */
3831                         goto cleanup;
3832                 } else
3833                         return r;
3834         }
3835
3836         /* check for errors on warm reset edc persisant supported ASIC */
3837         amdgpu_persistent_edc_harvesting(adev, ras_block);
3838
3839         /* in resume phase, no need to create ras fs node */
3840         if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
3841                 return 0;
3842
3843         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3844         if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3845             (ras_obj->hw_ops->query_poison_status ||
3846             ras_obj->hw_ops->handle_poison_consumption))) {
3847                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3848                 if (r)
3849                         goto cleanup;
3850         }
3851
3852         if (ras_obj->hw_ops &&
3853             (ras_obj->hw_ops->query_ras_error_count ||
3854              ras_obj->hw_ops->query_ras_error_status)) {
3855                 r = amdgpu_ras_sysfs_create(adev, ras_block);
3856                 if (r)
3857                         goto interrupt;
3858
3859                 /* Those are the cached values at init.
3860                  */
3861                 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3862                 if (!query_info)
3863                         return -ENOMEM;
3864                 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3865
3866                 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3867                         atomic_set(&con->ras_ce_count, ce_count);
3868                         atomic_set(&con->ras_ue_count, ue_count);
3869                 }
3870
3871                 kfree(query_info);
3872         }
3873
3874         return 0;
3875
3876 interrupt:
3877         if (ras_obj->ras_cb)
3878                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3879 cleanup:
3880         amdgpu_ras_feature_enable(adev, ras_block, 0);
3881         return r;
3882 }
3883
3884 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3885                          struct ras_common_if *ras_block)
3886 {
3887         return amdgpu_ras_block_late_init(adev, ras_block);
3888 }
3889
3890 /* helper function to remove ras fs node and interrupt handler */
3891 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3892                           struct ras_common_if *ras_block)
3893 {
3894         struct amdgpu_ras_block_object *ras_obj;
3895         if (!ras_block)
3896                 return;
3897
3898         amdgpu_ras_sysfs_remove(adev, ras_block);
3899
3900         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3901         if (ras_obj->ras_cb)
3902                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3903 }
3904
3905 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3906                           struct ras_common_if *ras_block)
3907 {
3908         return amdgpu_ras_block_late_fini(adev, ras_block);
3909 }
3910
3911 /* do some init work after IP late init as dependence.
3912  * and it runs in resume/gpu reset/booting up cases.
3913  */
3914 void amdgpu_ras_resume(struct amdgpu_device *adev)
3915 {
3916         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3917         struct ras_manager *obj, *tmp;
3918
3919         if (!adev->ras_enabled || !con) {
3920                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
3921                 amdgpu_release_ras_context(adev);
3922
3923                 return;
3924         }
3925
3926         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3927                 /* Set up all other IPs which are not implemented. There is a
3928                  * tricky thing that IP's actual ras error type should be
3929                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3930                  * ERROR_NONE make sense anyway.
3931                  */
3932                 amdgpu_ras_enable_all_features(adev, 1);
3933
3934                 /* We enable ras on all hw_supported block, but as boot
3935                  * parameter might disable some of them and one or more IP has
3936                  * not implemented yet. So we disable them on behalf.
3937                  */
3938                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
3939                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3940                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
3941                                 /* there should be no any reference. */
3942                                 WARN_ON(alive_obj(obj));
3943                         }
3944                 }
3945         }
3946 }
3947
3948 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3949 {
3950         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3951
3952         if (!adev->ras_enabled || !con)
3953                 return;
3954
3955         amdgpu_ras_disable_all_features(adev, 0);
3956         /* Make sure all ras objects are disabled. */
3957         if (AMDGPU_RAS_GET_FEATURES(con->features))
3958                 amdgpu_ras_disable_all_features(adev, 1);
3959 }
3960
3961 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3962 {
3963         struct amdgpu_ras_block_list *node, *tmp;
3964         struct amdgpu_ras_block_object *obj;
3965         int r;
3966
3967         amdgpu_ras_event_mgr_init(adev);
3968
3969         if (amdgpu_ras_aca_is_supported(adev)) {
3970                 if (amdgpu_reset_in_recovery(adev)) {
3971                         if (amdgpu_aca_is_enabled(adev))
3972                                 r = amdgpu_aca_reset(adev);
3973                         else
3974                                 r = amdgpu_mca_reset(adev);
3975                         if (r)
3976                                 return r;
3977                 }
3978
3979                 if (!amdgpu_sriov_vf(adev)) {
3980                         if (amdgpu_aca_is_enabled(adev))
3981                                 amdgpu_ras_set_aca_debug_mode(adev, false);
3982                         else
3983                                 amdgpu_ras_set_mca_debug_mode(adev, false);
3984                 }
3985         }
3986
3987         /* Guest side doesn't need init ras feature */
3988         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
3989                 return 0;
3990
3991         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3992                 obj = node->ras_obj;
3993                 if (!obj) {
3994                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3995                         continue;
3996                 }
3997
3998                 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
3999                         continue;
4000
4001                 if (obj->ras_late_init) {
4002                         r = obj->ras_late_init(adev, &obj->ras_comm);
4003                         if (r) {
4004                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4005                                         obj->ras_comm.name, r);
4006                                 return r;
4007                         }
4008                 } else
4009                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4010         }
4011
4012         return 0;
4013 }
4014
4015 /* do some fini work before IP fini as dependence */
4016 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4017 {
4018         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4019
4020         if (!adev->ras_enabled || !con)
4021                 return 0;
4022
4023
4024         /* Need disable ras on all IPs here before ip [hw/sw]fini */
4025         if (AMDGPU_RAS_GET_FEATURES(con->features))
4026                 amdgpu_ras_disable_all_features(adev, 0);
4027         amdgpu_ras_recovery_fini(adev);
4028         return 0;
4029 }
4030
4031 int amdgpu_ras_fini(struct amdgpu_device *adev)
4032 {
4033         struct amdgpu_ras_block_list *ras_node, *tmp;
4034         struct amdgpu_ras_block_object *obj = NULL;
4035         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4036
4037         if (!adev->ras_enabled || !con)
4038                 return 0;
4039
4040         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4041                 if (ras_node->ras_obj) {
4042                         obj = ras_node->ras_obj;
4043                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4044                             obj->ras_fini)
4045                                 obj->ras_fini(adev, &obj->ras_comm);
4046                         else
4047                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4048                 }
4049
4050                 /* Clear ras blocks from ras_list and free ras block list node */
4051                 list_del(&ras_node->node);
4052                 kfree(ras_node);
4053         }
4054
4055         amdgpu_ras_fs_fini(adev);
4056         amdgpu_ras_interrupt_remove_all(adev);
4057
4058         if (amdgpu_ras_aca_is_supported(adev)) {
4059                 if (amdgpu_aca_is_enabled(adev))
4060                         amdgpu_aca_fini(adev);
4061                 else
4062                         amdgpu_mca_fini(adev);
4063         }
4064
4065         WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4066
4067         if (AMDGPU_RAS_GET_FEATURES(con->features))
4068                 amdgpu_ras_disable_all_features(adev, 0);
4069
4070         cancel_delayed_work_sync(&con->ras_counte_delay_work);
4071
4072         amdgpu_ras_set_context(adev, NULL);
4073         kfree(con);
4074
4075         return 0;
4076 }
4077
4078 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4079 {
4080         struct amdgpu_ras *ras;
4081
4082         ras = amdgpu_ras_get_context(adev);
4083         if (!ras)
4084                 return false;
4085
4086         return atomic_read(&ras->fed);
4087 }
4088
4089 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4090 {
4091         struct amdgpu_ras *ras;
4092
4093         ras = amdgpu_ras_get_context(adev);
4094         if (ras)
4095                 atomic_set(&ras->fed, !!status);
4096 }
4097
4098 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4099 {
4100         struct amdgpu_ras *ras;
4101
4102         ras = amdgpu_ras_get_context(adev);
4103         if (!ras)
4104                 return NULL;
4105
4106         return ras->event_mgr;
4107 }
4108
4109 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4110                                      const void *caller)
4111 {
4112         struct ras_event_manager *event_mgr;
4113         struct ras_event_state *event_state;
4114         int ret = 0;
4115
4116         if (type >= RAS_EVENT_TYPE_COUNT) {
4117                 ret = -EINVAL;
4118                 goto out;
4119         }
4120
4121         event_mgr = __get_ras_event_mgr(adev);
4122         if (!event_mgr) {
4123                 ret = -EINVAL;
4124                 goto out;
4125         }
4126
4127         event_state = &event_mgr->event_state[type];
4128         event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4129         atomic64_inc(&event_state->count);
4130
4131 out:
4132         if (ret && caller)
4133                 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4134                          (int)type, caller, ret);
4135
4136         return ret;
4137 }
4138
4139 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4140 {
4141         struct ras_event_manager *event_mgr;
4142         u64 id;
4143
4144         if (type >= RAS_EVENT_TYPE_COUNT)
4145                 return RAS_EVENT_INVALID_ID;
4146
4147         switch (type) {
4148         case RAS_EVENT_TYPE_FATAL:
4149         case RAS_EVENT_TYPE_POISON_CREATION:
4150         case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4151                 event_mgr = __get_ras_event_mgr(adev);
4152                 if (!event_mgr)
4153                         return RAS_EVENT_INVALID_ID;
4154
4155                 id = event_mgr->event_state[type].last_seqno;
4156                 break;
4157         case RAS_EVENT_TYPE_INVALID:
4158         default:
4159                 id = RAS_EVENT_INVALID_ID;
4160                 break;
4161         }
4162
4163         return id;
4164 }
4165
4166 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4167 {
4168         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4169                 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4170                 enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4171                 u64 event_id;
4172
4173                 if (amdgpu_ras_mark_ras_event(adev, type))
4174                         return;
4175
4176                 event_id = amdgpu_ras_acquire_event_id(adev, type);
4177
4178                 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4179                               "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4180
4181                 amdgpu_ras_set_fed(adev, true);
4182                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4183                 amdgpu_ras_reset_gpu(adev);
4184         }
4185 }
4186
4187 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4188 {
4189         if (adev->asic_type == CHIP_VEGA20 &&
4190             adev->pm.fw_version <= 0x283400) {
4191                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4192                                 amdgpu_ras_intr_triggered();
4193         }
4194
4195         return false;
4196 }
4197
4198 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4199 {
4200         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4201
4202         if (!con)
4203                 return;
4204
4205         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4206                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4207                 amdgpu_ras_set_context(adev, NULL);
4208                 kfree(con);
4209         }
4210 }
4211
4212 #ifdef CONFIG_X86_MCE_AMD
4213 static struct amdgpu_device *find_adev(uint32_t node_id)
4214 {
4215         int i;
4216         struct amdgpu_device *adev = NULL;
4217
4218         for (i = 0; i < mce_adev_list.num_gpu; i++) {
4219                 adev = mce_adev_list.devs[i];
4220
4221                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
4222                     adev->gmc.xgmi.physical_node_id == node_id)
4223                         break;
4224                 adev = NULL;
4225         }
4226
4227         return adev;
4228 }
4229
4230 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
4231 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
4232 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4233 #define GPU_ID_OFFSET           8
4234
4235 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4236                                     unsigned long val, void *data)
4237 {
4238         struct mce *m = (struct mce *)data;
4239         struct amdgpu_device *adev = NULL;
4240         uint32_t gpu_id = 0;
4241         uint32_t umc_inst = 0, ch_inst = 0;
4242
4243         /*
4244          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4245          * and error occurred in DramECC (Extended error code = 0) then only
4246          * process the error, else bail out.
4247          */
4248         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4249                     (XEC(m->status, 0x3f) == 0x0)))
4250                 return NOTIFY_DONE;
4251
4252         /*
4253          * If it is correctable error, return.
4254          */
4255         if (mce_is_correctable(m))
4256                 return NOTIFY_OK;
4257
4258         /*
4259          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4260          */
4261         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4262
4263         adev = find_adev(gpu_id);
4264         if (!adev) {
4265                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4266                                                                 gpu_id);
4267                 return NOTIFY_DONE;
4268         }
4269
4270         /*
4271          * If it is uncorrectable error, then find out UMC instance and
4272          * channel index.
4273          */
4274         umc_inst = GET_UMC_INST(m->ipid);
4275         ch_inst = GET_CHAN_INDEX(m->ipid);
4276
4277         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4278                              umc_inst, ch_inst);
4279
4280         if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4281                 return NOTIFY_OK;
4282         else
4283                 return NOTIFY_DONE;
4284 }
4285
4286 static struct notifier_block amdgpu_bad_page_nb = {
4287         .notifier_call  = amdgpu_bad_page_notifier,
4288         .priority       = MCE_PRIO_UC,
4289 };
4290
4291 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4292 {
4293         /*
4294          * Add the adev to the mce_adev_list.
4295          * During mode2 reset, amdgpu device is temporarily
4296          * removed from the mgpu_info list which can cause
4297          * page retirement to fail.
4298          * Use this list instead of mgpu_info to find the amdgpu
4299          * device on which the UMC error was reported.
4300          */
4301         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4302
4303         /*
4304          * Register the x86 notifier only once
4305          * with MCE subsystem.
4306          */
4307         if (notifier_registered == false) {
4308                 mce_register_decode_chain(&amdgpu_bad_page_nb);
4309                 notifier_registered = true;
4310         }
4311 }
4312 #endif
4313
4314 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4315 {
4316         if (!adev)
4317                 return NULL;
4318
4319         return adev->psp.ras_context.ras;
4320 }
4321
4322 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4323 {
4324         if (!adev)
4325                 return -EINVAL;
4326
4327         adev->psp.ras_context.ras = ras_con;
4328         return 0;
4329 }
4330
4331 /* check if ras is supported on block, say, sdma, gfx */
4332 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4333                 unsigned int block)
4334 {
4335         int ret = 0;
4336         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4337
4338         if (block >= AMDGPU_RAS_BLOCK_COUNT)
4339                 return 0;
4340
4341         ret = ras && (adev->ras_enabled & (1 << block));
4342
4343         /* For the special asic with mem ecc enabled but sram ecc
4344          * not enabled, even if the ras block is not supported on
4345          * .ras_enabled, if the asic supports poison mode and the
4346          * ras block has ras configuration, it can be considered
4347          * that the ras block supports ras function.
4348          */
4349         if (!ret &&
4350             (block == AMDGPU_RAS_BLOCK__GFX ||
4351              block == AMDGPU_RAS_BLOCK__SDMA ||
4352              block == AMDGPU_RAS_BLOCK__VCN ||
4353              block == AMDGPU_RAS_BLOCK__JPEG) &&
4354                 (amdgpu_ras_mask & (1 << block)) &&
4355             amdgpu_ras_is_poison_mode_supported(adev) &&
4356             amdgpu_ras_get_ras_block(adev, block, 0))
4357                 ret = 1;
4358
4359         return ret;
4360 }
4361
4362 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4363 {
4364         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4365
4366         /* mode1 is the only selection for RMA status */
4367         if (amdgpu_ras_is_rma(adev)) {
4368                 ras->gpu_reset_flags = 0;
4369                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4370         }
4371
4372         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4373                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4374                 int hive_ras_recovery = 0;
4375
4376                 if (hive) {
4377                         hive_ras_recovery = atomic_read(&hive->ras_recovery);
4378                         amdgpu_put_xgmi_hive(hive);
4379                 }
4380                 /* In the case of multiple GPUs, after a GPU has started
4381                  * resetting all GPUs on hive, other GPUs do not need to
4382                  * trigger GPU reset again.
4383                  */
4384                 if (!hive_ras_recovery)
4385                         amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4386                 else
4387                         atomic_set(&ras->in_recovery, 0);
4388         } else {
4389                 flush_work(&ras->recovery_work);
4390                 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4391         }
4392
4393         return 0;
4394 }
4395
4396 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4397 {
4398         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4399         int ret = 0;
4400
4401         if (con) {
4402                 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4403                 if (!ret)
4404                         con->is_aca_debug_mode = enable;
4405         }
4406
4407         return ret;
4408 }
4409
4410 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4411 {
4412         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4413         int ret = 0;
4414
4415         if (con) {
4416                 if (amdgpu_aca_is_enabled(adev))
4417                         ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4418                 else
4419                         ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4420                 if (!ret)
4421                         con->is_aca_debug_mode = enable;
4422         }
4423
4424         return ret;
4425 }
4426
4427 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4428 {
4429         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4430         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4431         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4432
4433         if (!con)
4434                 return false;
4435
4436         if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4437             (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4438                 return con->is_aca_debug_mode;
4439         else
4440                 return true;
4441 }
4442
4443 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4444                                      unsigned int *error_query_mode)
4445 {
4446         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4447         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4448         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4449
4450         if (!con) {
4451                 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4452                 return false;
4453         }
4454
4455         if (amdgpu_sriov_vf(adev)) {
4456                 *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
4457         } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
4458                 *error_query_mode =
4459                         (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4460         } else {
4461                 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4462         }
4463
4464         return true;
4465 }
4466
4467 /* Register each ip ras block into amdgpu ras */
4468 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4469                 struct amdgpu_ras_block_object *ras_block_obj)
4470 {
4471         struct amdgpu_ras_block_list *ras_node;
4472         if (!adev || !ras_block_obj)
4473                 return -EINVAL;
4474
4475         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4476         if (!ras_node)
4477                 return -ENOMEM;
4478
4479         INIT_LIST_HEAD(&ras_node->node);
4480         ras_node->ras_obj = ras_block_obj;
4481         list_add_tail(&ras_node->node, &adev->ras_list);
4482
4483         return 0;
4484 }
4485
4486 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4487 {
4488         if (!err_type_name)
4489                 return;
4490
4491         switch (err_type) {
4492         case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4493                 sprintf(err_type_name, "correctable");
4494                 break;
4495         case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4496                 sprintf(err_type_name, "uncorrectable");
4497                 break;
4498         default:
4499                 sprintf(err_type_name, "unknown");
4500                 break;
4501         }
4502 }
4503
4504 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4505                                          const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4506                                          uint32_t instance,
4507                                          uint32_t *memory_id)
4508 {
4509         uint32_t err_status_lo_data, err_status_lo_offset;
4510
4511         if (!reg_entry)
4512                 return false;
4513
4514         err_status_lo_offset =
4515                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4516                                             reg_entry->seg_lo, reg_entry->reg_lo);
4517         err_status_lo_data = RREG32(err_status_lo_offset);
4518
4519         if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4520             !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4521                 return false;
4522
4523         *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4524
4525         return true;
4526 }
4527
4528 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4529                                        const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4530                                        uint32_t instance,
4531                                        unsigned long *err_cnt)
4532 {
4533         uint32_t err_status_hi_data, err_status_hi_offset;
4534
4535         if (!reg_entry)
4536                 return false;
4537
4538         err_status_hi_offset =
4539                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4540                                             reg_entry->seg_hi, reg_entry->reg_hi);
4541         err_status_hi_data = RREG32(err_status_hi_offset);
4542
4543         if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4544             !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4545                 /* keep the check here in case we need to refer to the result later */
4546                 dev_dbg(adev->dev, "Invalid err_info field\n");
4547
4548         /* read err count */
4549         *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4550
4551         return true;
4552 }
4553
4554 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4555                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
4556                                            uint32_t reg_list_size,
4557                                            const struct amdgpu_ras_memory_id_entry *mem_list,
4558                                            uint32_t mem_list_size,
4559                                            uint32_t instance,
4560                                            uint32_t err_type,
4561                                            unsigned long *err_count)
4562 {
4563         uint32_t memory_id;
4564         unsigned long err_cnt;
4565         char err_type_name[16];
4566         uint32_t i, j;
4567
4568         for (i = 0; i < reg_list_size; i++) {
4569                 /* query memory_id from err_status_lo */
4570                 if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4571                                                          instance, &memory_id))
4572                         continue;
4573
4574                 /* query err_cnt from err_status_hi */
4575                 if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4576                                                        instance, &err_cnt) ||
4577                     !err_cnt)
4578                         continue;
4579
4580                 *err_count += err_cnt;
4581
4582                 /* log the errors */
4583                 amdgpu_ras_get_error_type_name(err_type, err_type_name);
4584                 if (!mem_list) {
4585                         /* memory_list is not supported */
4586                         dev_info(adev->dev,
4587                                  "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4588                                  err_cnt, err_type_name,
4589                                  reg_list[i].block_name,
4590                                  instance, memory_id);
4591                 } else {
4592                         for (j = 0; j < mem_list_size; j++) {
4593                                 if (memory_id == mem_list[j].memory_id) {
4594                                         dev_info(adev->dev,
4595                                                  "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4596                                                  err_cnt, err_type_name,
4597                                                  reg_list[i].block_name,
4598                                                  instance, mem_list[j].name);
4599                                         break;
4600                                 }
4601                         }
4602                 }
4603         }
4604 }
4605
4606 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4607                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
4608                                            uint32_t reg_list_size,
4609                                            uint32_t instance)
4610 {
4611         uint32_t err_status_lo_offset, err_status_hi_offset;
4612         uint32_t i;
4613
4614         for (i = 0; i < reg_list_size; i++) {
4615                 err_status_lo_offset =
4616                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4617                                                     reg_list[i].seg_lo, reg_list[i].reg_lo);
4618                 err_status_hi_offset =
4619                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4620                                                     reg_list[i].seg_hi, reg_list[i].reg_hi);
4621                 WREG32(err_status_lo_offset, 0);
4622                 WREG32(err_status_hi_offset, 0);
4623         }
4624 }
4625
4626 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4627 {
4628         memset(err_data, 0, sizeof(*err_data));
4629
4630         INIT_LIST_HEAD(&err_data->err_node_list);
4631
4632         return 0;
4633 }
4634
4635 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4636 {
4637         if (!err_node)
4638                 return;
4639
4640         list_del(&err_node->node);
4641         kvfree(err_node);
4642 }
4643
4644 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4645 {
4646         struct ras_err_node *err_node, *tmp;
4647
4648         list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4649                 amdgpu_ras_error_node_release(err_node);
4650 }
4651
4652 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4653                                                              struct amdgpu_smuio_mcm_config_info *mcm_info)
4654 {
4655         struct ras_err_node *err_node;
4656         struct amdgpu_smuio_mcm_config_info *ref_id;
4657
4658         if (!err_data || !mcm_info)
4659                 return NULL;
4660
4661         for_each_ras_error(err_node, err_data) {
4662                 ref_id = &err_node->err_info.mcm_info;
4663
4664                 if (mcm_info->socket_id == ref_id->socket_id &&
4665                     mcm_info->die_id == ref_id->die_id)
4666                         return err_node;
4667         }
4668
4669         return NULL;
4670 }
4671
4672 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4673 {
4674         struct ras_err_node *err_node;
4675
4676         err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4677         if (!err_node)
4678                 return NULL;
4679
4680         INIT_LIST_HEAD(&err_node->node);
4681
4682         return err_node;
4683 }
4684
4685 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4686 {
4687         struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4688         struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4689         struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4690         struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4691
4692         if (unlikely(infoa->socket_id != infob->socket_id))
4693                 return infoa->socket_id - infob->socket_id;
4694         else
4695                 return infoa->die_id - infob->die_id;
4696
4697         return 0;
4698 }
4699
4700 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4701                                 struct amdgpu_smuio_mcm_config_info *mcm_info)
4702 {
4703         struct ras_err_node *err_node;
4704
4705         err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4706         if (err_node)
4707                 return &err_node->err_info;
4708
4709         err_node = amdgpu_ras_error_node_new();
4710         if (!err_node)
4711                 return NULL;
4712
4713         memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4714
4715         err_data->err_list_count++;
4716         list_add_tail(&err_node->node, &err_data->err_node_list);
4717         list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4718
4719         return &err_node->err_info;
4720 }
4721
4722 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4723                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4724                                         u64 count)
4725 {
4726         struct ras_err_info *err_info;
4727
4728         if (!err_data || !mcm_info)
4729                 return -EINVAL;
4730
4731         if (!count)
4732                 return 0;
4733
4734         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4735         if (!err_info)
4736                 return -EINVAL;
4737
4738         err_info->ue_count += count;
4739         err_data->ue_count += count;
4740
4741         return 0;
4742 }
4743
4744 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4745                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4746                                         u64 count)
4747 {
4748         struct ras_err_info *err_info;
4749
4750         if (!err_data || !mcm_info)
4751                 return -EINVAL;
4752
4753         if (!count)
4754                 return 0;
4755
4756         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4757         if (!err_info)
4758                 return -EINVAL;
4759
4760         err_info->ce_count += count;
4761         err_data->ce_count += count;
4762
4763         return 0;
4764 }
4765
4766 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
4767                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4768                                         u64 count)
4769 {
4770         struct ras_err_info *err_info;
4771
4772         if (!err_data || !mcm_info)
4773                 return -EINVAL;
4774
4775         if (!count)
4776                 return 0;
4777
4778         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4779         if (!err_info)
4780                 return -EINVAL;
4781
4782         err_info->de_count += count;
4783         err_data->de_count += count;
4784
4785         return 0;
4786 }
4787
4788 #define mmMP0_SMN_C2PMSG_92     0x1609C
4789 #define mmMP0_SMN_C2PMSG_126    0x160BE
4790 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
4791                                                  u32 instance)
4792 {
4793         u32 socket_id, aid_id, hbm_id;
4794         u32 fw_status;
4795         u32 boot_error;
4796         u64 reg_addr;
4797
4798         /* The pattern for smn addressing in other SOC could be different from
4799          * the one for aqua_vanjaram. We should revisit the code if the pattern
4800          * is changed. In such case, replace the aqua_vanjaram implementation
4801          * with more common helper */
4802         reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4803                    aqua_vanjaram_encode_ext_smn_addressing(instance);
4804         fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4805
4806         reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
4807                    aqua_vanjaram_encode_ext_smn_addressing(instance);
4808         boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4809
4810         socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
4811         aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
4812         hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
4813
4814         if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
4815                 dev_info(adev->dev,
4816                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
4817                          socket_id, aid_id, hbm_id, fw_status);
4818
4819         if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
4820                 dev_info(adev->dev,
4821                          "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
4822                          socket_id, aid_id, fw_status);
4823
4824         if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
4825                 dev_info(adev->dev,
4826                          "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
4827                          socket_id, aid_id, fw_status);
4828
4829         if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
4830                 dev_info(adev->dev,
4831                          "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
4832                          socket_id, aid_id, fw_status);
4833
4834         if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
4835                 dev_info(adev->dev,
4836                          "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
4837                          socket_id, aid_id, fw_status);
4838
4839         if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
4840                 dev_info(adev->dev,
4841                          "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
4842                          socket_id, aid_id, fw_status);
4843
4844         if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
4845                 dev_info(adev->dev,
4846                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
4847                          socket_id, aid_id, hbm_id, fw_status);
4848
4849         if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
4850                 dev_info(adev->dev,
4851                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
4852                          socket_id, aid_id, hbm_id, fw_status);
4853
4854         if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
4855                 dev_info(adev->dev,
4856                          "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
4857                          socket_id, aid_id, fw_status);
4858
4859         if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
4860                 dev_info(adev->dev,
4861                          "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
4862                          socket_id, aid_id, fw_status);
4863 }
4864
4865 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
4866                                            u32 instance)
4867 {
4868         u64 reg_addr;
4869         u32 reg_data;
4870         int retry_loop;
4871
4872         reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
4873                    aqua_vanjaram_encode_ext_smn_addressing(instance);
4874
4875         for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
4876                 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
4877                 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
4878                         return false;
4879                 else
4880                         msleep(1);
4881         }
4882
4883         return true;
4884 }
4885
4886 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
4887 {
4888         u32 i;
4889
4890         for (i = 0; i < num_instances; i++) {
4891                 if (amdgpu_ras_boot_error_detected(adev, i))
4892                         amdgpu_ras_boot_time_error_reporting(adev, i);
4893         }
4894 }
4895
4896 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
4897 {
4898         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4899         struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
4900         uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
4901         int ret = 0;
4902
4903         mutex_lock(&con->page_rsv_lock);
4904         ret = amdgpu_vram_mgr_query_page_status(mgr, start);
4905         if (ret == -ENOENT)
4906                 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
4907         mutex_unlock(&con->page_rsv_lock);
4908
4909         return ret;
4910 }
4911
4912 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
4913                                 const char *fmt, ...)
4914 {
4915         struct va_format vaf;
4916         va_list args;
4917
4918         va_start(args, fmt);
4919         vaf.fmt = fmt;
4920         vaf.va = &args;
4921
4922         if (RAS_EVENT_ID_IS_VALID(event_id))
4923                 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
4924         else
4925                 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
4926
4927         va_end(args);
4928 }
4929
4930 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
4931 {
4932         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4933
4934         if (!con)
4935                 return false;
4936
4937         return con->is_rma;
4938 }
This page took 0.334045 seconds and 4 git commands to generate.