]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drm/amd: Add the capability to mark certain firmware as "required"
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ras.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
32
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
40 #include "atom.h"
41 #include "amdgpu_reset.h"
42 #include "amdgpu_psp.h"
43
44 #ifdef CONFIG_X86_MCE_AMD
45 #include <asm/mce.h>
46
47 static bool notifier_registered;
48 #endif
49 static const char *RAS_FS_NAME = "ras";
50
51 const char *ras_error_string[] = {
52         "none",
53         "parity",
54         "single_correctable",
55         "multi_uncorrectable",
56         "poison",
57 };
58
59 const char *ras_block_string[] = {
60         "umc",
61         "sdma",
62         "gfx",
63         "mmhub",
64         "athub",
65         "pcie_bif",
66         "hdp",
67         "xgmi_wafl",
68         "df",
69         "smn",
70         "sem",
71         "mp0",
72         "mp1",
73         "fuse",
74         "mca",
75         "vcn",
76         "jpeg",
77         "ih",
78         "mpio",
79 };
80
81 const char *ras_mca_block_string[] = {
82         "mca_mp0",
83         "mca_mp1",
84         "mca_mpio",
85         "mca_iohc",
86 };
87
88 struct amdgpu_ras_block_list {
89         /* ras block link */
90         struct list_head node;
91
92         struct amdgpu_ras_block_object *ras_obj;
93 };
94
95 const char *get_ras_block_str(struct ras_common_if *ras_block)
96 {
97         if (!ras_block)
98                 return "NULL";
99
100         if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
101             ras_block->block >= ARRAY_SIZE(ras_block_string))
102                 return "OUT OF RANGE";
103
104         if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
105                 return ras_mca_block_string[ras_block->sub_block_index];
106
107         return ras_block_string[ras_block->block];
108 }
109
110 #define ras_block_str(_BLOCK_) \
111         (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
112
113 #define ras_err_str(i) (ras_error_string[ffs(i)])
114
115 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
116
117 /* inject address is 52 bits */
118 #define RAS_UMC_INJECT_ADDR_LIMIT       (0x1ULL << 52)
119
120 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
121 #define RAS_BAD_PAGE_COVER              (100 * 1024 * 1024ULL)
122
123 #define MAX_UMC_POISON_POLLING_TIME_ASYNC  300  //ms
124
125 #define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100  //ms
126
127 #define MAX_FLUSH_RETIRE_DWORK_TIMES  100
128
129 enum amdgpu_ras_retire_page_reservation {
130         AMDGPU_RAS_RETIRE_PAGE_RESERVED,
131         AMDGPU_RAS_RETIRE_PAGE_PENDING,
132         AMDGPU_RAS_RETIRE_PAGE_FAULT,
133 };
134
135 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
136
137 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
138                                 uint64_t addr);
139 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
140                                 uint64_t addr);
141 #ifdef CONFIG_X86_MCE_AMD
142 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
143 struct mce_notifier_adev_list {
144         struct amdgpu_device *devs[MAX_GPU_INSTANCE];
145         int num_gpu;
146 };
147 static struct mce_notifier_adev_list mce_adev_list;
148 #endif
149
150 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
151 {
152         if (adev && amdgpu_ras_get_context(adev))
153                 amdgpu_ras_get_context(adev)->error_query_ready = ready;
154 }
155
156 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
157 {
158         if (adev && amdgpu_ras_get_context(adev))
159                 return amdgpu_ras_get_context(adev)->error_query_ready;
160
161         return false;
162 }
163
164 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
165 {
166         struct ras_err_data err_data;
167         struct eeprom_table_record err_rec;
168         int ret;
169
170         if ((address >= adev->gmc.mc_vram_size) ||
171             (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
172                 dev_warn(adev->dev,
173                          "RAS WARN: input address 0x%llx is invalid.\n",
174                          address);
175                 return -EINVAL;
176         }
177
178         if (amdgpu_ras_check_bad_page(adev, address)) {
179                 dev_warn(adev->dev,
180                          "RAS WARN: 0x%llx has already been marked as bad page!\n",
181                          address);
182                 return 0;
183         }
184
185         ret = amdgpu_ras_error_data_init(&err_data);
186         if (ret)
187                 return ret;
188
189         memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
190         err_data.err_addr = &err_rec;
191         amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
192
193         if (amdgpu_bad_page_threshold != 0) {
194                 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
195                                          err_data.err_addr_cnt, false);
196                 amdgpu_ras_save_bad_pages(adev, NULL);
197         }
198
199         amdgpu_ras_error_data_fini(&err_data);
200
201         dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
202         dev_warn(adev->dev, "Clear EEPROM:\n");
203         dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
204
205         return 0;
206 }
207
208 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
209                                         size_t size, loff_t *pos)
210 {
211         struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
212         struct ras_query_if info = {
213                 .head = obj->head,
214         };
215         ssize_t s;
216         char val[128];
217
218         if (amdgpu_ras_query_error_status(obj->adev, &info))
219                 return -EINVAL;
220
221         /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
222         if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
223             amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
224                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
225                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
226         }
227
228         s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
229                         "ue", info.ue_count,
230                         "ce", info.ce_count);
231         if (*pos >= s)
232                 return 0;
233
234         s -= *pos;
235         s = min_t(u64, s, size);
236
237
238         if (copy_to_user(buf, &val[*pos], s))
239                 return -EINVAL;
240
241         *pos += s;
242
243         return s;
244 }
245
246 static const struct file_operations amdgpu_ras_debugfs_ops = {
247         .owner = THIS_MODULE,
248         .read = amdgpu_ras_debugfs_read,
249         .write = NULL,
250         .llseek = default_llseek
251 };
252
253 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
254 {
255         int i;
256
257         for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
258                 *block_id = i;
259                 if (strcmp(name, ras_block_string[i]) == 0)
260                         return 0;
261         }
262         return -EINVAL;
263 }
264
265 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
266                 const char __user *buf, size_t size,
267                 loff_t *pos, struct ras_debug_if *data)
268 {
269         ssize_t s = min_t(u64, 64, size);
270         char str[65];
271         char block_name[33];
272         char err[9] = "ue";
273         int op = -1;
274         int block_id;
275         uint32_t sub_block;
276         u64 address, value;
277         /* default value is 0 if the mask is not set by user */
278         u32 instance_mask = 0;
279
280         if (*pos)
281                 return -EINVAL;
282         *pos = size;
283
284         memset(str, 0, sizeof(str));
285         memset(data, 0, sizeof(*data));
286
287         if (copy_from_user(str, buf, s))
288                 return -EINVAL;
289
290         if (sscanf(str, "disable %32s", block_name) == 1)
291                 op = 0;
292         else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
293                 op = 1;
294         else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
295                 op = 2;
296         else if (strstr(str, "retire_page") != NULL)
297                 op = 3;
298         else if (str[0] && str[1] && str[2] && str[3])
299                 /* ascii string, but commands are not matched. */
300                 return -EINVAL;
301
302         if (op != -1) {
303                 if (op == 3) {
304                         if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
305                             sscanf(str, "%*s %llu", &address) != 1)
306                                 return -EINVAL;
307
308                         data->op = op;
309                         data->inject.address = address;
310
311                         return 0;
312                 }
313
314                 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
315                         return -EINVAL;
316
317                 data->head.block = block_id;
318                 /* only ue, ce and poison errors are supported */
319                 if (!memcmp("ue", err, 2))
320                         data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
321                 else if (!memcmp("ce", err, 2))
322                         data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
323                 else if (!memcmp("poison", err, 6))
324                         data->head.type = AMDGPU_RAS_ERROR__POISON;
325                 else
326                         return -EINVAL;
327
328                 data->op = op;
329
330                 if (op == 2) {
331                         if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
332                                    &sub_block, &address, &value, &instance_mask) != 4 &&
333                             sscanf(str, "%*s %*s %*s %u %llu %llu %u",
334                                    &sub_block, &address, &value, &instance_mask) != 4 &&
335                                 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
336                                    &sub_block, &address, &value) != 3 &&
337                             sscanf(str, "%*s %*s %*s %u %llu %llu",
338                                    &sub_block, &address, &value) != 3)
339                                 return -EINVAL;
340                         data->head.sub_block_index = sub_block;
341                         data->inject.address = address;
342                         data->inject.value = value;
343                         data->inject.instance_mask = instance_mask;
344                 }
345         } else {
346                 if (size < sizeof(*data))
347                         return -EINVAL;
348
349                 if (copy_from_user(data, buf, sizeof(*data)))
350                         return -EINVAL;
351         }
352
353         return 0;
354 }
355
356 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
357                                 struct ras_debug_if *data)
358 {
359         int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
360         uint32_t mask, inst_mask = data->inject.instance_mask;
361
362         /* no need to set instance mask if there is only one instance */
363         if (num_xcc <= 1 && inst_mask) {
364                 data->inject.instance_mask = 0;
365                 dev_dbg(adev->dev,
366                         "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
367                         inst_mask);
368
369                 return;
370         }
371
372         switch (data->head.block) {
373         case AMDGPU_RAS_BLOCK__GFX:
374                 mask = GENMASK(num_xcc - 1, 0);
375                 break;
376         case AMDGPU_RAS_BLOCK__SDMA:
377                 mask = GENMASK(adev->sdma.num_instances - 1, 0);
378                 break;
379         case AMDGPU_RAS_BLOCK__VCN:
380         case AMDGPU_RAS_BLOCK__JPEG:
381                 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
382                 break;
383         default:
384                 mask = inst_mask;
385                 break;
386         }
387
388         /* remove invalid bits in instance mask */
389         data->inject.instance_mask &= mask;
390         if (inst_mask != data->inject.instance_mask)
391                 dev_dbg(adev->dev,
392                         "Adjust RAS inject mask 0x%x to 0x%x\n",
393                         inst_mask, data->inject.instance_mask);
394 }
395
396 /**
397  * DOC: AMDGPU RAS debugfs control interface
398  *
399  * The control interface accepts struct ras_debug_if which has two members.
400  *
401  * First member: ras_debug_if::head or ras_debug_if::inject.
402  *
403  * head is used to indicate which IP block will be under control.
404  *
405  * head has four members, they are block, type, sub_block_index, name.
406  * block: which IP will be under control.
407  * type: what kind of error will be enabled/disabled/injected.
408  * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
409  * name: the name of IP.
410  *
411  * inject has three more members than head, they are address, value and mask.
412  * As their names indicate, inject operation will write the
413  * value to the address.
414  *
415  * The second member: struct ras_debug_if::op.
416  * It has three kinds of operations.
417  *
418  * - 0: disable RAS on the block. Take ::head as its data.
419  * - 1: enable RAS on the block. Take ::head as its data.
420  * - 2: inject errors on the block. Take ::inject as its data.
421  *
422  * How to use the interface?
423  *
424  * In a program
425  *
426  * Copy the struct ras_debug_if in your code and initialize it.
427  * Write the struct to the control interface.
428  *
429  * From shell
430  *
431  * .. code-block:: bash
432  *
433  *      echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
434  *      echo "enable  <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
435  *      echo "inject  <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
436  *
437  * Where N, is the card which you want to affect.
438  *
439  * "disable" requires only the block.
440  * "enable" requires the block and error type.
441  * "inject" requires the block, error type, address, and value.
442  *
443  * The block is one of: umc, sdma, gfx, etc.
444  *      see ras_block_string[] for details
445  *
446  * The error type is one of: ue, ce and poison where,
447  *      ue is multi-uncorrectable
448  *      ce is single-correctable
449  *      poison is poison
450  *
451  * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
452  * The address and value are hexadecimal numbers, leading 0x is optional.
453  * The mask means instance mask, is optional, default value is 0x1.
454  *
455  * For instance,
456  *
457  * .. code-block:: bash
458  *
459  *      echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
460  *      echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
461  *      echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
462  *
463  * How to check the result of the operation?
464  *
465  * To check disable/enable, see "ras" features at,
466  * /sys/class/drm/card[0/1/2...]/device/ras/features
467  *
468  * To check inject, see the corresponding error count at,
469  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
470  *
471  * .. note::
472  *      Operations are only allowed on blocks which are supported.
473  *      Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
474  *      to see which blocks support RAS on a particular asic.
475  *
476  */
477 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
478                                              const char __user *buf,
479                                              size_t size, loff_t *pos)
480 {
481         struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
482         struct ras_debug_if data;
483         int ret = 0;
484
485         if (!amdgpu_ras_get_error_query_ready(adev)) {
486                 dev_warn(adev->dev, "RAS WARN: error injection "
487                                 "currently inaccessible\n");
488                 return size;
489         }
490
491         ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
492         if (ret)
493                 return ret;
494
495         if (data.op == 3) {
496                 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
497                 if (!ret)
498                         return size;
499                 else
500                         return ret;
501         }
502
503         if (!amdgpu_ras_is_supported(adev, data.head.block))
504                 return -EINVAL;
505
506         switch (data.op) {
507         case 0:
508                 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
509                 break;
510         case 1:
511                 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
512                 break;
513         case 2:
514                 if ((data.inject.address >= adev->gmc.mc_vram_size &&
515                     adev->gmc.mc_vram_size) ||
516                     (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
517                         dev_warn(adev->dev, "RAS WARN: input address "
518                                         "0x%llx is invalid.",
519                                         data.inject.address);
520                         ret = -EINVAL;
521                         break;
522                 }
523
524                 /* umc ce/ue error injection for a bad page is not allowed */
525                 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
526                     amdgpu_ras_check_bad_page(adev, data.inject.address)) {
527                         dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
528                                  "already been marked as bad!\n",
529                                  data.inject.address);
530                         break;
531                 }
532
533                 amdgpu_ras_instance_mask_check(adev, &data);
534
535                 /* data.inject.address is offset instead of absolute gpu address */
536                 ret = amdgpu_ras_error_inject(adev, &data.inject);
537                 break;
538         default:
539                 ret = -EINVAL;
540                 break;
541         }
542
543         if (ret)
544                 return ret;
545
546         return size;
547 }
548
549 /**
550  * DOC: AMDGPU RAS debugfs EEPROM table reset interface
551  *
552  * Some boards contain an EEPROM which is used to persistently store a list of
553  * bad pages which experiences ECC errors in vram.  This interface provides
554  * a way to reset the EEPROM, e.g., after testing error injection.
555  *
556  * Usage:
557  *
558  * .. code-block:: bash
559  *
560  *      echo 1 > ../ras/ras_eeprom_reset
561  *
562  * will reset EEPROM table to 0 entries.
563  *
564  */
565 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
566                                                const char __user *buf,
567                                                size_t size, loff_t *pos)
568 {
569         struct amdgpu_device *adev =
570                 (struct amdgpu_device *)file_inode(f)->i_private;
571         int ret;
572
573         ret = amdgpu_ras_eeprom_reset_table(
574                 &(amdgpu_ras_get_context(adev)->eeprom_control));
575
576         if (!ret) {
577                 /* Something was written to EEPROM.
578                  */
579                 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
580                 return size;
581         } else {
582                 return ret;
583         }
584 }
585
586 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
587         .owner = THIS_MODULE,
588         .read = NULL,
589         .write = amdgpu_ras_debugfs_ctrl_write,
590         .llseek = default_llseek
591 };
592
593 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
594         .owner = THIS_MODULE,
595         .read = NULL,
596         .write = amdgpu_ras_debugfs_eeprom_write,
597         .llseek = default_llseek
598 };
599
600 /**
601  * DOC: AMDGPU RAS sysfs Error Count Interface
602  *
603  * It allows the user to read the error count for each IP block on the gpu through
604  * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
605  *
606  * It outputs the multiple lines which report the uncorrected (ue) and corrected
607  * (ce) error counts.
608  *
609  * The format of one line is below,
610  *
611  * [ce|ue]: count
612  *
613  * Example:
614  *
615  * .. code-block:: bash
616  *
617  *      ue: 0
618  *      ce: 1
619  *
620  */
621 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
622                 struct device_attribute *attr, char *buf)
623 {
624         struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
625         struct ras_query_if info = {
626                 .head = obj->head,
627         };
628
629         if (!amdgpu_ras_get_error_query_ready(obj->adev))
630                 return sysfs_emit(buf, "Query currently inaccessible\n");
631
632         if (amdgpu_ras_query_error_status(obj->adev, &info))
633                 return -EINVAL;
634
635         if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
636             amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
637                 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
638                         dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
639         }
640
641         if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
642                 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
643                                 "ce", info.ce_count, "de", info.de_count);
644         else
645                 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
646                                 "ce", info.ce_count);
647 }
648
649 /* obj begin */
650
651 #define get_obj(obj) do { (obj)->use++; } while (0)
652 #define alive_obj(obj) ((obj)->use)
653
654 static inline void put_obj(struct ras_manager *obj)
655 {
656         if (obj && (--obj->use == 0)) {
657                 list_del(&obj->node);
658                 amdgpu_ras_error_data_fini(&obj->err_data);
659         }
660
661         if (obj && (obj->use < 0))
662                 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
663 }
664
665 /* make one obj and return it. */
666 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
667                 struct ras_common_if *head)
668 {
669         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
670         struct ras_manager *obj;
671
672         if (!adev->ras_enabled || !con)
673                 return NULL;
674
675         if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
676                 return NULL;
677
678         if (head->block == AMDGPU_RAS_BLOCK__MCA) {
679                 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
680                         return NULL;
681
682                 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
683         } else
684                 obj = &con->objs[head->block];
685
686         /* already exist. return obj? */
687         if (alive_obj(obj))
688                 return NULL;
689
690         if (amdgpu_ras_error_data_init(&obj->err_data))
691                 return NULL;
692
693         obj->head = *head;
694         obj->adev = adev;
695         list_add(&obj->node, &con->head);
696         get_obj(obj);
697
698         return obj;
699 }
700
701 /* return an obj equal to head, or the first when head is NULL */
702 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
703                 struct ras_common_if *head)
704 {
705         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
706         struct ras_manager *obj;
707         int i;
708
709         if (!adev->ras_enabled || !con)
710                 return NULL;
711
712         if (head) {
713                 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
714                         return NULL;
715
716                 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
717                         if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
718                                 return NULL;
719
720                         obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
721                 } else
722                         obj = &con->objs[head->block];
723
724                 if (alive_obj(obj))
725                         return obj;
726         } else {
727                 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
728                         obj = &con->objs[i];
729                         if (alive_obj(obj))
730                                 return obj;
731                 }
732         }
733
734         return NULL;
735 }
736 /* obj end */
737
738 /* feature ctl begin */
739 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
740                                          struct ras_common_if *head)
741 {
742         return adev->ras_hw_enabled & BIT(head->block);
743 }
744
745 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
746                 struct ras_common_if *head)
747 {
748         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
749
750         return con->features & BIT(head->block);
751 }
752
753 /*
754  * if obj is not created, then create one.
755  * set feature enable flag.
756  */
757 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
758                 struct ras_common_if *head, int enable)
759 {
760         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
761         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
762
763         /* If hardware does not support ras, then do not create obj.
764          * But if hardware support ras, we can create the obj.
765          * Ras framework checks con->hw_supported to see if it need do
766          * corresponding initialization.
767          * IP checks con->support to see if it need disable ras.
768          */
769         if (!amdgpu_ras_is_feature_allowed(adev, head))
770                 return 0;
771
772         if (enable) {
773                 if (!obj) {
774                         obj = amdgpu_ras_create_obj(adev, head);
775                         if (!obj)
776                                 return -EINVAL;
777                 } else {
778                         /* In case we create obj somewhere else */
779                         get_obj(obj);
780                 }
781                 con->features |= BIT(head->block);
782         } else {
783                 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
784                         con->features &= ~BIT(head->block);
785                         put_obj(obj);
786                 }
787         }
788
789         return 0;
790 }
791
792 /* wrapper of psp_ras_enable_features */
793 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
794                 struct ras_common_if *head, bool enable)
795 {
796         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
797         union ta_ras_cmd_input *info;
798         int ret;
799
800         if (!con)
801                 return -EINVAL;
802
803         /* For non-gfx ip, do not enable ras feature if it is not allowed */
804         /* For gfx ip, regardless of feature support status, */
805         /* Force issue enable or disable ras feature commands */
806         if (head->block != AMDGPU_RAS_BLOCK__GFX &&
807             !amdgpu_ras_is_feature_allowed(adev, head))
808                 return 0;
809
810         /* Only enable gfx ras feature from host side */
811         if (head->block == AMDGPU_RAS_BLOCK__GFX &&
812             !amdgpu_sriov_vf(adev) &&
813             !amdgpu_ras_intr_triggered()) {
814                 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
815                 if (!info)
816                         return -ENOMEM;
817
818                 if (!enable) {
819                         info->disable_features = (struct ta_ras_disable_features_input) {
820                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
821                                 .error_type = amdgpu_ras_error_to_ta(head->type),
822                         };
823                 } else {
824                         info->enable_features = (struct ta_ras_enable_features_input) {
825                                 .block_id =  amdgpu_ras_block_to_ta(head->block),
826                                 .error_type = amdgpu_ras_error_to_ta(head->type),
827                         };
828                 }
829
830                 ret = psp_ras_enable_features(&adev->psp, info, enable);
831                 if (ret) {
832                         dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
833                                 enable ? "enable":"disable",
834                                 get_ras_block_str(head),
835                                 amdgpu_ras_is_poison_mode_supported(adev), ret);
836                         kfree(info);
837                         return ret;
838                 }
839
840                 kfree(info);
841         }
842
843         /* setup the obj */
844         __amdgpu_ras_feature_enable(adev, head, enable);
845
846         return 0;
847 }
848
849 /* Only used in device probe stage and called only once. */
850 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
851                 struct ras_common_if *head, bool enable)
852 {
853         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
854         int ret;
855
856         if (!con)
857                 return -EINVAL;
858
859         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
860                 if (enable) {
861                         /* There is no harm to issue a ras TA cmd regardless of
862                          * the currecnt ras state.
863                          * If current state == target state, it will do nothing
864                          * But sometimes it requests driver to reset and repost
865                          * with error code -EAGAIN.
866                          */
867                         ret = amdgpu_ras_feature_enable(adev, head, 1);
868                         /* With old ras TA, we might fail to enable ras.
869                          * Log it and just setup the object.
870                          * TODO need remove this WA in the future.
871                          */
872                         if (ret == -EINVAL) {
873                                 ret = __amdgpu_ras_feature_enable(adev, head, 1);
874                                 if (!ret)
875                                         dev_info(adev->dev,
876                                                 "RAS INFO: %s setup object\n",
877                                                 get_ras_block_str(head));
878                         }
879                 } else {
880                         /* setup the object then issue a ras TA disable cmd.*/
881                         ret = __amdgpu_ras_feature_enable(adev, head, 1);
882                         if (ret)
883                                 return ret;
884
885                         /* gfx block ras disable cmd must send to ras-ta */
886                         if (head->block == AMDGPU_RAS_BLOCK__GFX)
887                                 con->features |= BIT(head->block);
888
889                         ret = amdgpu_ras_feature_enable(adev, head, 0);
890
891                         /* clean gfx block ras features flag */
892                         if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
893                                 con->features &= ~BIT(head->block);
894                 }
895         } else
896                 ret = amdgpu_ras_feature_enable(adev, head, enable);
897
898         return ret;
899 }
900
901 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
902                 bool bypass)
903 {
904         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
905         struct ras_manager *obj, *tmp;
906
907         list_for_each_entry_safe(obj, tmp, &con->head, node) {
908                 /* bypass psp.
909                  * aka just release the obj and corresponding flags
910                  */
911                 if (bypass) {
912                         if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
913                                 break;
914                 } else {
915                         if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
916                                 break;
917                 }
918         }
919
920         return con->features;
921 }
922
923 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
924                 bool bypass)
925 {
926         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
927         int i;
928         const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
929
930         for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
931                 struct ras_common_if head = {
932                         .block = i,
933                         .type = default_ras_type,
934                         .sub_block_index = 0,
935                 };
936
937                 if (i == AMDGPU_RAS_BLOCK__MCA)
938                         continue;
939
940                 if (bypass) {
941                         /*
942                          * bypass psp. vbios enable ras for us.
943                          * so just create the obj
944                          */
945                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
946                                 break;
947                 } else {
948                         if (amdgpu_ras_feature_enable(adev, &head, 1))
949                                 break;
950                 }
951         }
952
953         for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
954                 struct ras_common_if head = {
955                         .block = AMDGPU_RAS_BLOCK__MCA,
956                         .type = default_ras_type,
957                         .sub_block_index = i,
958                 };
959
960                 if (bypass) {
961                         /*
962                          * bypass psp. vbios enable ras for us.
963                          * so just create the obj
964                          */
965                         if (__amdgpu_ras_feature_enable(adev, &head, 1))
966                                 break;
967                 } else {
968                         if (amdgpu_ras_feature_enable(adev, &head, 1))
969                                 break;
970                 }
971         }
972
973         return con->features;
974 }
975 /* feature ctl end */
976
977 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
978                 enum amdgpu_ras_block block)
979 {
980         if (!block_obj)
981                 return -EINVAL;
982
983         if (block_obj->ras_comm.block == block)
984                 return 0;
985
986         return -EINVAL;
987 }
988
989 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
990                                         enum amdgpu_ras_block block, uint32_t sub_block_index)
991 {
992         struct amdgpu_ras_block_list *node, *tmp;
993         struct amdgpu_ras_block_object *obj;
994
995         if (block >= AMDGPU_RAS_BLOCK__LAST)
996                 return NULL;
997
998         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
999                 if (!node->ras_obj) {
1000                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
1001                         continue;
1002                 }
1003
1004                 obj = node->ras_obj;
1005                 if (obj->ras_block_match) {
1006                         if (obj->ras_block_match(obj, block, sub_block_index) == 0)
1007                                 return obj;
1008                 } else {
1009                         if (amdgpu_ras_block_match_default(obj, block) == 0)
1010                                 return obj;
1011                 }
1012         }
1013
1014         return NULL;
1015 }
1016
1017 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1018 {
1019         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1020         int ret = 0;
1021
1022         /*
1023          * choosing right query method according to
1024          * whether smu support query error information
1025          */
1026         ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1027         if (ret == -EOPNOTSUPP) {
1028                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1029                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1030                         adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1031
1032                 /* umc query_ras_error_address is also responsible for clearing
1033                  * error status
1034                  */
1035                 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1036                     adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1037                         adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1038         } else if (!ret) {
1039                 if (adev->umc.ras &&
1040                         adev->umc.ras->ecc_info_query_ras_error_count)
1041                         adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1042
1043                 if (adev->umc.ras &&
1044                         adev->umc.ras->ecc_info_query_ras_error_address)
1045                         adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1046         }
1047 }
1048
1049 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1050                                               struct ras_manager *ras_mgr,
1051                                               struct ras_err_data *err_data,
1052                                               struct ras_query_context *qctx,
1053                                               const char *blk_name,
1054                                               bool is_ue,
1055                                               bool is_de)
1056 {
1057         struct amdgpu_smuio_mcm_config_info *mcm_info;
1058         struct ras_err_node *err_node;
1059         struct ras_err_info *err_info;
1060         u64 event_id = qctx->evid.event_id;
1061
1062         if (is_ue) {
1063                 for_each_ras_error(err_node, err_data) {
1064                         err_info = &err_node->err_info;
1065                         mcm_info = &err_info->mcm_info;
1066                         if (err_info->ue_count) {
1067                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1068                                               "%lld new uncorrectable hardware errors detected in %s block\n",
1069                                               mcm_info->socket_id,
1070                                               mcm_info->die_id,
1071                                               err_info->ue_count,
1072                                               blk_name);
1073                         }
1074                 }
1075
1076                 for_each_ras_error(err_node, &ras_mgr->err_data) {
1077                         err_info = &err_node->err_info;
1078                         mcm_info = &err_info->mcm_info;
1079                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1080                                       "%lld uncorrectable hardware errors detected in total in %s block\n",
1081                                       mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1082                 }
1083
1084         } else {
1085                 if (is_de) {
1086                         for_each_ras_error(err_node, err_data) {
1087                                 err_info = &err_node->err_info;
1088                                 mcm_info = &err_info->mcm_info;
1089                                 if (err_info->de_count) {
1090                                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1091                                                       "%lld new deferred hardware errors detected in %s block\n",
1092                                                       mcm_info->socket_id,
1093                                                       mcm_info->die_id,
1094                                                       err_info->de_count,
1095                                                       blk_name);
1096                                 }
1097                         }
1098
1099                         for_each_ras_error(err_node, &ras_mgr->err_data) {
1100                                 err_info = &err_node->err_info;
1101                                 mcm_info = &err_info->mcm_info;
1102                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1103                                               "%lld deferred hardware errors detected in total in %s block\n",
1104                                               mcm_info->socket_id, mcm_info->die_id,
1105                                               err_info->de_count, blk_name);
1106                         }
1107                 } else {
1108                         for_each_ras_error(err_node, err_data) {
1109                                 err_info = &err_node->err_info;
1110                                 mcm_info = &err_info->mcm_info;
1111                                 if (err_info->ce_count) {
1112                                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1113                                                       "%lld new correctable hardware errors detected in %s block\n",
1114                                                       mcm_info->socket_id,
1115                                                       mcm_info->die_id,
1116                                                       err_info->ce_count,
1117                                                       blk_name);
1118                                 }
1119                         }
1120
1121                         for_each_ras_error(err_node, &ras_mgr->err_data) {
1122                                 err_info = &err_node->err_info;
1123                                 mcm_info = &err_info->mcm_info;
1124                                 RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
1125                                               "%lld correctable hardware errors detected in total in %s block\n",
1126                                               mcm_info->socket_id, mcm_info->die_id,
1127                                               err_info->ce_count, blk_name);
1128                         }
1129                 }
1130         }
1131 }
1132
1133 static inline bool err_data_has_source_info(struct ras_err_data *data)
1134 {
1135         return !list_empty(&data->err_node_list);
1136 }
1137
1138 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1139                                              struct ras_query_if *query_if,
1140                                              struct ras_err_data *err_data,
1141                                              struct ras_query_context *qctx)
1142 {
1143         struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1144         const char *blk_name = get_ras_block_str(&query_if->head);
1145         u64 event_id = qctx->evid.event_id;
1146
1147         if (err_data->ce_count) {
1148                 if (err_data_has_source_info(err_data)) {
1149                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1150                                                           blk_name, false, false);
1151                 } else if (!adev->aid_mask &&
1152                            adev->smuio.funcs &&
1153                            adev->smuio.funcs->get_socket_id &&
1154                            adev->smuio.funcs->get_die_id) {
1155                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1156                                       "%ld correctable hardware errors "
1157                                       "detected in %s block\n",
1158                                       adev->smuio.funcs->get_socket_id(adev),
1159                                       adev->smuio.funcs->get_die_id(adev),
1160                                       ras_mgr->err_data.ce_count,
1161                                       blk_name);
1162                 } else {
1163                         RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
1164                                       "detected in %s block\n",
1165                                       ras_mgr->err_data.ce_count,
1166                                       blk_name);
1167                 }
1168         }
1169
1170         if (err_data->ue_count) {
1171                 if (err_data_has_source_info(err_data)) {
1172                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1173                                                           blk_name, true, false);
1174                 } else if (!adev->aid_mask &&
1175                            adev->smuio.funcs &&
1176                            adev->smuio.funcs->get_socket_id &&
1177                            adev->smuio.funcs->get_die_id) {
1178                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1179                                       "%ld uncorrectable hardware errors "
1180                                       "detected in %s block\n",
1181                                       adev->smuio.funcs->get_socket_id(adev),
1182                                       adev->smuio.funcs->get_die_id(adev),
1183                                       ras_mgr->err_data.ue_count,
1184                                       blk_name);
1185                 } else {
1186                         RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
1187                                       "detected in %s block\n",
1188                                       ras_mgr->err_data.ue_count,
1189                                       blk_name);
1190                 }
1191         }
1192
1193         if (err_data->de_count) {
1194                 if (err_data_has_source_info(err_data)) {
1195                         amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
1196                                                           blk_name, false, true);
1197                 } else if (!adev->aid_mask &&
1198                            adev->smuio.funcs &&
1199                            adev->smuio.funcs->get_socket_id &&
1200                            adev->smuio.funcs->get_die_id) {
1201                         RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
1202                                       "%ld deferred hardware errors "
1203                                       "detected in %s block\n",
1204                                       adev->smuio.funcs->get_socket_id(adev),
1205                                       adev->smuio.funcs->get_die_id(adev),
1206                                       ras_mgr->err_data.de_count,
1207                                       blk_name);
1208                 } else {
1209                         RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
1210                                       "detected in %s block\n",
1211                                       ras_mgr->err_data.de_count,
1212                                       blk_name);
1213                 }
1214         }
1215 }
1216
1217 static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
1218                                                   struct ras_query_if *query_if,
1219                                                   struct ras_err_data *err_data,
1220                                                   struct ras_query_context *qctx)
1221 {
1222         unsigned long new_ue, new_ce, new_de;
1223         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
1224         const char *blk_name = get_ras_block_str(&query_if->head);
1225         u64 event_id = qctx->evid.event_id;
1226
1227         new_ce = err_data->ce_count - obj->err_data.ce_count;
1228         new_ue = err_data->ue_count - obj->err_data.ue_count;
1229         new_de = err_data->de_count - obj->err_data.de_count;
1230
1231         if (new_ce) {
1232                 RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
1233                               "detected in %s block\n",
1234                               new_ce,
1235                               blk_name);
1236         }
1237
1238         if (new_ue) {
1239                 RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
1240                               "detected in %s block\n",
1241                               new_ue,
1242                               blk_name);
1243         }
1244
1245         if (new_de) {
1246                 RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
1247                               "detected in %s block\n",
1248                               new_de,
1249                               blk_name);
1250         }
1251 }
1252
1253 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1254 {
1255         struct ras_err_node *err_node;
1256         struct ras_err_info *err_info;
1257
1258         if (err_data_has_source_info(err_data)) {
1259                 for_each_ras_error(err_node, err_data) {
1260                         err_info = &err_node->err_info;
1261                         amdgpu_ras_error_statistic_de_count(&obj->err_data,
1262                                         &err_info->mcm_info, err_info->de_count);
1263                         amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1264                                         &err_info->mcm_info, err_info->ce_count);
1265                         amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1266                                         &err_info->mcm_info, err_info->ue_count);
1267                 }
1268         } else {
1269                 /* for legacy asic path which doesn't has error source info */
1270                 obj->err_data.ue_count += err_data->ue_count;
1271                 obj->err_data.ce_count += err_data->ce_count;
1272                 obj->err_data.de_count += err_data->de_count;
1273         }
1274 }
1275
1276 static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
1277                                                              struct ras_err_data *err_data)
1278 {
1279         /* Host reports absolute counts */
1280         obj->err_data.ue_count = err_data->ue_count;
1281         obj->err_data.ce_count = err_data->ce_count;
1282         obj->err_data.de_count = err_data->de_count;
1283 }
1284
1285 static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1286 {
1287         struct ras_common_if head;
1288
1289         memset(&head, 0, sizeof(head));
1290         head.block = blk;
1291
1292         return amdgpu_ras_find_obj(adev, &head);
1293 }
1294
1295 int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1296                         const struct aca_info *aca_info, void *data)
1297 {
1298         struct ras_manager *obj;
1299
1300         /* in resume phase, no need to create aca fs node */
1301         if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
1302                 return 0;
1303
1304         obj = get_ras_manager(adev, blk);
1305         if (!obj)
1306                 return -EINVAL;
1307
1308         return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
1309 }
1310
1311 int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
1312 {
1313         struct ras_manager *obj;
1314
1315         obj = get_ras_manager(adev, blk);
1316         if (!obj)
1317                 return -EINVAL;
1318
1319         amdgpu_aca_remove_handle(&obj->aca_handle);
1320
1321         return 0;
1322 }
1323
1324 static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
1325                                          enum aca_error_type type, struct ras_err_data *err_data,
1326                                          struct ras_query_context *qctx)
1327 {
1328         struct ras_manager *obj;
1329
1330         obj = get_ras_manager(adev, blk);
1331         if (!obj)
1332                 return -EINVAL;
1333
1334         return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
1335 }
1336
1337 ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
1338                                   struct aca_handle *handle, char *buf, void *data)
1339 {
1340         struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
1341         struct ras_query_if info = {
1342                 .head = obj->head,
1343         };
1344
1345         if (!amdgpu_ras_get_error_query_ready(obj->adev))
1346                 return sysfs_emit(buf, "Query currently inaccessible\n");
1347
1348         if (amdgpu_ras_query_error_status(obj->adev, &info))
1349                 return -EINVAL;
1350
1351         return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
1352                           "ce", info.ce_count, "de", info.de_count);
1353 }
1354
1355 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1356                                                 struct ras_query_if *info,
1357                                                 struct ras_err_data *err_data,
1358                                                 struct ras_query_context *qctx,
1359                                                 unsigned int error_query_mode)
1360 {
1361         enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1362         struct amdgpu_ras_block_object *block_obj = NULL;
1363         int ret;
1364
1365         if (blk == AMDGPU_RAS_BLOCK_COUNT)
1366                 return -EINVAL;
1367
1368         if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1369                 return -EINVAL;
1370
1371         if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1372                 return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
1373         } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1374                 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1375                         amdgpu_ras_get_ecc_info(adev, err_data);
1376                 } else {
1377                         block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1378                         if (!block_obj || !block_obj->hw_ops) {
1379                                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1380                                              get_ras_block_str(&info->head));
1381                                 return -EINVAL;
1382                         }
1383
1384                         if (block_obj->hw_ops->query_ras_error_count)
1385                                 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1386
1387                         if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1388                             (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1389                             (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1390                                 if (block_obj->hw_ops->query_ras_error_status)
1391                                         block_obj->hw_ops->query_ras_error_status(adev);
1392                         }
1393                 }
1394         } else {
1395                 if (amdgpu_aca_is_enabled(adev)) {
1396                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
1397                         if (ret)
1398                                 return ret;
1399
1400                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
1401                         if (ret)
1402                                 return ret;
1403
1404                         ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
1405                         if (ret)
1406                                 return ret;
1407                 } else {
1408                         /* FIXME: add code to check return value later */
1409                         amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
1410                         amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
1411                 }
1412         }
1413
1414         return 0;
1415 }
1416
1417 /* query/inject/cure begin */
1418 static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
1419                                                     struct ras_query_if *info,
1420                                                     enum ras_event_type type)
1421 {
1422         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1423         struct ras_err_data err_data;
1424         struct ras_query_context qctx;
1425         unsigned int error_query_mode;
1426         int ret;
1427
1428         if (!obj)
1429                 return -EINVAL;
1430
1431         ret = amdgpu_ras_error_data_init(&err_data);
1432         if (ret)
1433                 return ret;
1434
1435         if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1436                 return -EINVAL;
1437
1438         memset(&qctx, 0, sizeof(qctx));
1439         qctx.evid.type = type;
1440         qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
1441
1442         if (!down_read_trylock(&adev->reset_domain->sem)) {
1443                 ret = -EIO;
1444                 goto out_fini_err_data;
1445         }
1446
1447         ret = amdgpu_ras_query_error_status_helper(adev, info,
1448                                                    &err_data,
1449                                                    &qctx,
1450                                                    error_query_mode);
1451         up_read(&adev->reset_domain->sem);
1452         if (ret)
1453                 goto out_fini_err_data;
1454
1455         if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
1456                 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1457                 amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
1458         } else {
1459                 /* Host provides absolute error counts. First generate the report
1460                  * using the previous VF internal count against new host count.
1461                  * Then Update VF internal count.
1462                  */
1463                 amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
1464                 amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
1465         }
1466
1467         info->ue_count = obj->err_data.ue_count;
1468         info->ce_count = obj->err_data.ce_count;
1469         info->de_count = obj->err_data.de_count;
1470
1471 out_fini_err_data:
1472         amdgpu_ras_error_data_fini(&err_data);
1473
1474         return ret;
1475 }
1476
1477 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1478 {
1479         return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
1480 }
1481
1482 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1483                 enum amdgpu_ras_block block)
1484 {
1485         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1486         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1487         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
1488
1489         if (!block_obj || !block_obj->hw_ops) {
1490                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1491                                 ras_block_str(block));
1492                 return -EOPNOTSUPP;
1493         }
1494
1495         if (!amdgpu_ras_is_supported(adev, block) ||
1496             !amdgpu_ras_get_aca_debug_mode(adev))
1497                 return -EOPNOTSUPP;
1498
1499         /* skip ras error reset in gpu reset */
1500         if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
1501             ((smu_funcs && smu_funcs->set_debug_mode) ||
1502              (mca_funcs && mca_funcs->mca_set_debug_mode)))
1503                 return -EOPNOTSUPP;
1504
1505         if (block_obj->hw_ops->reset_ras_error_count)
1506                 block_obj->hw_ops->reset_ras_error_count(adev);
1507
1508         return 0;
1509 }
1510
1511 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1512                 enum amdgpu_ras_block block)
1513 {
1514         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1515
1516         if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1517                 return 0;
1518
1519         if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1520             (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1521                 if (block_obj->hw_ops->reset_ras_error_status)
1522                         block_obj->hw_ops->reset_ras_error_status(adev);
1523         }
1524
1525         return 0;
1526 }
1527
1528 /* wrapper of psp_ras_trigger_error */
1529 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1530                 struct ras_inject_if *info)
1531 {
1532         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1533         struct ta_ras_trigger_error_input block_info = {
1534                 .block_id =  amdgpu_ras_block_to_ta(info->head.block),
1535                 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1536                 .sub_block_index = info->head.sub_block_index,
1537                 .address = info->address,
1538                 .value = info->value,
1539         };
1540         int ret = -EINVAL;
1541         struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1542                                                         info->head.block,
1543                                                         info->head.sub_block_index);
1544
1545         /* inject on guest isn't allowed, return success directly */
1546         if (amdgpu_sriov_vf(adev))
1547                 return 0;
1548
1549         if (!obj)
1550                 return -EINVAL;
1551
1552         if (!block_obj || !block_obj->hw_ops)   {
1553                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1554                              get_ras_block_str(&info->head));
1555                 return -EINVAL;
1556         }
1557
1558         /* Calculate XGMI relative offset */
1559         if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1560             info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1561                 block_info.address =
1562                         amdgpu_xgmi_get_relative_phy_addr(adev,
1563                                                           block_info.address);
1564         }
1565
1566         if (block_obj->hw_ops->ras_error_inject) {
1567                 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1568                         ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1569                 else /* Special ras_error_inject is defined (e.g: xgmi) */
1570                         ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1571                                                 info->instance_mask);
1572         } else {
1573                 /* default path */
1574                 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1575         }
1576
1577         if (ret)
1578                 dev_err(adev->dev, "ras inject %s failed %d\n",
1579                         get_ras_block_str(&info->head), ret);
1580
1581         return ret;
1582 }
1583
1584 /**
1585  * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1586  * @adev: pointer to AMD GPU device
1587  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1588  * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1589  * @query_info: pointer to ras_query_if
1590  *
1591  * Return 0 for query success or do nothing, otherwise return an error
1592  * on failures
1593  */
1594 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1595                                                unsigned long *ce_count,
1596                                                unsigned long *ue_count,
1597                                                struct ras_query_if *query_info)
1598 {
1599         int ret;
1600
1601         if (!query_info)
1602                 /* do nothing if query_info is not specified */
1603                 return 0;
1604
1605         ret = amdgpu_ras_query_error_status(adev, query_info);
1606         if (ret)
1607                 return ret;
1608
1609         *ce_count += query_info->ce_count;
1610         *ue_count += query_info->ue_count;
1611
1612         /* some hardware/IP supports read to clear
1613          * no need to explictly reset the err status after the query call */
1614         if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1615             amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1616                 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1617                         dev_warn(adev->dev,
1618                                  "Failed to reset error counter and error status\n");
1619         }
1620
1621         return 0;
1622 }
1623
1624 /**
1625  * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1626  * @adev: pointer to AMD GPU device
1627  * @ce_count: pointer to an integer to be set to the count of correctible errors.
1628  * @ue_count: pointer to an integer to be set to the count of uncorrectible
1629  * errors.
1630  * @query_info: pointer to ras_query_if if the query request is only for
1631  * specific ip block; if info is NULL, then the qurey request is for
1632  * all the ip blocks that support query ras error counters/status
1633  *
1634  * If set, @ce_count or @ue_count, count and return the corresponding
1635  * error counts in those integer pointers. Return 0 if the device
1636  * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1637  */
1638 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1639                                  unsigned long *ce_count,
1640                                  unsigned long *ue_count,
1641                                  struct ras_query_if *query_info)
1642 {
1643         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1644         struct ras_manager *obj;
1645         unsigned long ce, ue;
1646         int ret;
1647
1648         if (!adev->ras_enabled || !con)
1649                 return -EOPNOTSUPP;
1650
1651         /* Don't count since no reporting.
1652          */
1653         if (!ce_count && !ue_count)
1654                 return 0;
1655
1656         ce = 0;
1657         ue = 0;
1658         if (!query_info) {
1659                 /* query all the ip blocks that support ras query interface */
1660                 list_for_each_entry(obj, &con->head, node) {
1661                         struct ras_query_if info = {
1662                                 .head = obj->head,
1663                         };
1664
1665                         ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1666                 }
1667         } else {
1668                 /* query specific ip block */
1669                 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1670         }
1671
1672         if (ret)
1673                 return ret;
1674
1675         if (ce_count)
1676                 *ce_count = ce;
1677
1678         if (ue_count)
1679                 *ue_count = ue;
1680
1681         return 0;
1682 }
1683 /* query/inject/cure end */
1684
1685
1686 /* sysfs begin */
1687
1688 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1689                 struct ras_badpage **bps, unsigned int *count);
1690
1691 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1692 {
1693         switch (flags) {
1694         case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1695                 return "R";
1696         case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1697                 return "P";
1698         case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1699         default:
1700                 return "F";
1701         }
1702 }
1703
1704 /**
1705  * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1706  *
1707  * It allows user to read the bad pages of vram on the gpu through
1708  * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1709  *
1710  * It outputs multiple lines, and each line stands for one gpu page.
1711  *
1712  * The format of one line is below,
1713  * gpu pfn : gpu page size : flags
1714  *
1715  * gpu pfn and gpu page size are printed in hex format.
1716  * flags can be one of below character,
1717  *
1718  * R: reserved, this gpu page is reserved and not able to use.
1719  *
1720  * P: pending for reserve, this gpu page is marked as bad, will be reserved
1721  * in next window of page_reserve.
1722  *
1723  * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1724  *
1725  * Examples:
1726  *
1727  * .. code-block:: bash
1728  *
1729  *      0x00000001 : 0x00001000 : R
1730  *      0x00000002 : 0x00001000 : P
1731  *
1732  */
1733
1734 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1735                 struct kobject *kobj, struct bin_attribute *attr,
1736                 char *buf, loff_t ppos, size_t count)
1737 {
1738         struct amdgpu_ras *con =
1739                 container_of(attr, struct amdgpu_ras, badpages_attr);
1740         struct amdgpu_device *adev = con->adev;
1741         const unsigned int element_size =
1742                 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1743         unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1744         unsigned int end = div64_ul(ppos + count - 1, element_size);
1745         ssize_t s = 0;
1746         struct ras_badpage *bps = NULL;
1747         unsigned int bps_count = 0;
1748
1749         memset(buf, 0, count);
1750
1751         if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1752                 return 0;
1753
1754         for (; start < end && start < bps_count; start++)
1755                 s += scnprintf(&buf[s], element_size + 1,
1756                                 "0x%08x : 0x%08x : %1s\n",
1757                                 bps[start].bp,
1758                                 bps[start].size,
1759                                 amdgpu_ras_badpage_flags_str(bps[start].flags));
1760
1761         kfree(bps);
1762
1763         return s;
1764 }
1765
1766 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1767                 struct device_attribute *attr, char *buf)
1768 {
1769         struct amdgpu_ras *con =
1770                 container_of(attr, struct amdgpu_ras, features_attr);
1771
1772         return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1773 }
1774
1775 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1776                 struct device_attribute *attr, char *buf)
1777 {
1778         struct amdgpu_ras *con =
1779                 container_of(attr, struct amdgpu_ras, version_attr);
1780         return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1781 }
1782
1783 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1784                 struct device_attribute *attr, char *buf)
1785 {
1786         struct amdgpu_ras *con =
1787                 container_of(attr, struct amdgpu_ras, schema_attr);
1788         return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1789 }
1790
1791 static struct {
1792         enum ras_event_type type;
1793         const char *name;
1794 } dump_event[] = {
1795         {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
1796         {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
1797         {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
1798 };
1799
1800 static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
1801                                                  struct device_attribute *attr, char *buf)
1802 {
1803         struct amdgpu_ras *con =
1804                 container_of(attr, struct amdgpu_ras, event_state_attr);
1805         struct ras_event_manager *event_mgr = con->event_mgr;
1806         struct ras_event_state *event_state;
1807         int i, size = 0;
1808
1809         if (!event_mgr)
1810                 return -EINVAL;
1811
1812         size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
1813         for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
1814                 event_state = &event_mgr->event_state[dump_event[i].type];
1815                 size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
1816                                       dump_event[i].name,
1817                                       atomic64_read(&event_state->count),
1818                                       event_state->last_seqno);
1819         }
1820
1821         return (ssize_t)size;
1822 }
1823
1824 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1825 {
1826         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1827
1828         if (adev->dev->kobj.sd)
1829                 sysfs_remove_file_from_group(&adev->dev->kobj,
1830                                 &con->badpages_attr.attr,
1831                                 RAS_FS_NAME);
1832 }
1833
1834 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1835 {
1836         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1837         struct attribute *attrs[] = {
1838                 &con->features_attr.attr,
1839                 &con->version_attr.attr,
1840                 &con->schema_attr.attr,
1841                 &con->event_state_attr.attr,
1842                 NULL
1843         };
1844         struct attribute_group group = {
1845                 .name = RAS_FS_NAME,
1846                 .attrs = attrs,
1847         };
1848
1849         if (adev->dev->kobj.sd)
1850                 sysfs_remove_group(&adev->dev->kobj, &group);
1851
1852         return 0;
1853 }
1854
1855 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1856                 struct ras_common_if *head)
1857 {
1858         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1859
1860         if (amdgpu_aca_is_enabled(adev))
1861                 return 0;
1862
1863         if (!obj || obj->attr_inuse)
1864                 return -EINVAL;
1865
1866         get_obj(obj);
1867
1868         snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1869                 "%s_err_count", head->name);
1870
1871         obj->sysfs_attr = (struct device_attribute){
1872                 .attr = {
1873                         .name = obj->fs_data.sysfs_name,
1874                         .mode = S_IRUGO,
1875                 },
1876                         .show = amdgpu_ras_sysfs_read,
1877         };
1878         sysfs_attr_init(&obj->sysfs_attr.attr);
1879
1880         if (sysfs_add_file_to_group(&adev->dev->kobj,
1881                                 &obj->sysfs_attr.attr,
1882                                 RAS_FS_NAME)) {
1883                 put_obj(obj);
1884                 return -EINVAL;
1885         }
1886
1887         obj->attr_inuse = 1;
1888
1889         return 0;
1890 }
1891
1892 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1893                 struct ras_common_if *head)
1894 {
1895         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1896
1897         if (amdgpu_aca_is_enabled(adev))
1898                 return 0;
1899
1900         if (!obj || !obj->attr_inuse)
1901                 return -EINVAL;
1902
1903         if (adev->dev->kobj.sd)
1904                 sysfs_remove_file_from_group(&adev->dev->kobj,
1905                                 &obj->sysfs_attr.attr,
1906                                 RAS_FS_NAME);
1907         obj->attr_inuse = 0;
1908         put_obj(obj);
1909
1910         return 0;
1911 }
1912
1913 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1914 {
1915         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1916         struct ras_manager *obj, *tmp;
1917
1918         list_for_each_entry_safe(obj, tmp, &con->head, node) {
1919                 amdgpu_ras_sysfs_remove(adev, &obj->head);
1920         }
1921
1922         if (amdgpu_bad_page_threshold != 0)
1923                 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1924
1925         amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1926
1927         return 0;
1928 }
1929 /* sysfs end */
1930
1931 /**
1932  * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1933  *
1934  * Normally when there is an uncorrectable error, the driver will reset
1935  * the GPU to recover.  However, in the event of an unrecoverable error,
1936  * the driver provides an interface to reboot the system automatically
1937  * in that event.
1938  *
1939  * The following file in debugfs provides that interface:
1940  * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1941  *
1942  * Usage:
1943  *
1944  * .. code-block:: bash
1945  *
1946  *      echo true > .../ras/auto_reboot
1947  *
1948  */
1949 /* debugfs begin */
1950 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1951 {
1952         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1953         struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1954         struct drm_minor  *minor = adev_to_drm(adev)->primary;
1955         struct dentry     *dir;
1956
1957         dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1958         debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1959                             &amdgpu_ras_debugfs_ctrl_ops);
1960         debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1961                             &amdgpu_ras_debugfs_eeprom_ops);
1962         debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1963                            &con->bad_page_cnt_threshold);
1964         debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1965         debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1966         debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1967         debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1968                             &amdgpu_ras_debugfs_eeprom_size_ops);
1969         con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1970                                                        S_IRUGO, dir, adev,
1971                                                        &amdgpu_ras_debugfs_eeprom_table_ops);
1972         amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1973
1974         /*
1975          * After one uncorrectable error happens, usually GPU recovery will
1976          * be scheduled. But due to the known problem in GPU recovery failing
1977          * to bring GPU back, below interface provides one direct way to
1978          * user to reboot system automatically in such case within
1979          * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1980          * will never be called.
1981          */
1982         debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1983
1984         /*
1985          * User could set this not to clean up hardware's error count register
1986          * of RAS IPs during ras recovery.
1987          */
1988         debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1989                             &con->disable_ras_err_cnt_harvest);
1990         return dir;
1991 }
1992
1993 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1994                                       struct ras_fs_if *head,
1995                                       struct dentry *dir)
1996 {
1997         struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1998
1999         if (!obj || !dir)
2000                 return;
2001
2002         get_obj(obj);
2003
2004         memcpy(obj->fs_data.debugfs_name,
2005                         head->debugfs_name,
2006                         sizeof(obj->fs_data.debugfs_name));
2007
2008         debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
2009                             obj, &amdgpu_ras_debugfs_ops);
2010 }
2011
2012 static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
2013 {
2014         bool ret;
2015
2016         switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2017         case IP_VERSION(13, 0, 6):
2018         case IP_VERSION(13, 0, 14):
2019                 ret = true;
2020                 break;
2021         default:
2022                 ret = false;
2023                 break;
2024         }
2025
2026         return ret;
2027 }
2028
2029 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
2030 {
2031         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2032         struct dentry *dir;
2033         struct ras_manager *obj;
2034         struct ras_fs_if fs_info;
2035
2036         /*
2037          * it won't be called in resume path, no need to check
2038          * suspend and gpu reset status
2039          */
2040         if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
2041                 return;
2042
2043         dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
2044
2045         list_for_each_entry(obj, &con->head, node) {
2046                 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
2047                         (obj->attr_inuse == 1)) {
2048                         sprintf(fs_info.debugfs_name, "%s_err_inject",
2049                                         get_ras_block_str(&obj->head));
2050                         fs_info.head = obj->head;
2051                         amdgpu_ras_debugfs_create(adev, &fs_info, dir);
2052                 }
2053         }
2054
2055         if (amdgpu_ras_aca_is_supported(adev)) {
2056                 if (amdgpu_aca_is_enabled(adev))
2057                         amdgpu_aca_smu_debugfs_init(adev, dir);
2058                 else
2059                         amdgpu_mca_smu_debugfs_init(adev, dir);
2060         }
2061 }
2062
2063 /* debugfs end */
2064
2065 /* ras fs */
2066 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
2067                 amdgpu_ras_sysfs_badpages_read, NULL, 0);
2068 static DEVICE_ATTR(features, S_IRUGO,
2069                 amdgpu_ras_sysfs_features_read, NULL);
2070 static DEVICE_ATTR(version, 0444,
2071                 amdgpu_ras_sysfs_version_show, NULL);
2072 static DEVICE_ATTR(schema, 0444,
2073                 amdgpu_ras_sysfs_schema_show, NULL);
2074 static DEVICE_ATTR(event_state, 0444,
2075                    amdgpu_ras_sysfs_event_state_show, NULL);
2076 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
2077 {
2078         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2079         struct attribute_group group = {
2080                 .name = RAS_FS_NAME,
2081         };
2082         struct attribute *attrs[] = {
2083                 &con->features_attr.attr,
2084                 &con->version_attr.attr,
2085                 &con->schema_attr.attr,
2086                 &con->event_state_attr.attr,
2087                 NULL
2088         };
2089         struct bin_attribute *bin_attrs[] = {
2090                 NULL,
2091                 NULL,
2092         };
2093         int r;
2094
2095         group.attrs = attrs;
2096
2097         /* add features entry */
2098         con->features_attr = dev_attr_features;
2099         sysfs_attr_init(attrs[0]);
2100
2101         /* add version entry */
2102         con->version_attr = dev_attr_version;
2103         sysfs_attr_init(attrs[1]);
2104
2105         /* add schema entry */
2106         con->schema_attr = dev_attr_schema;
2107         sysfs_attr_init(attrs[2]);
2108
2109         /* add event_state entry */
2110         con->event_state_attr = dev_attr_event_state;
2111         sysfs_attr_init(attrs[3]);
2112
2113         if (amdgpu_bad_page_threshold != 0) {
2114                 /* add bad_page_features entry */
2115                 bin_attr_gpu_vram_bad_pages.private = NULL;
2116                 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
2117                 bin_attrs[0] = &con->badpages_attr;
2118                 group.bin_attrs = bin_attrs;
2119                 sysfs_bin_attr_init(bin_attrs[0]);
2120         }
2121
2122         r = sysfs_create_group(&adev->dev->kobj, &group);
2123         if (r)
2124                 dev_err(adev->dev, "Failed to create RAS sysfs group!");
2125
2126         return 0;
2127 }
2128
2129 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
2130 {
2131         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2132         struct ras_manager *con_obj, *ip_obj, *tmp;
2133
2134         if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2135                 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
2136                         ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
2137                         if (ip_obj)
2138                                 put_obj(ip_obj);
2139                 }
2140         }
2141
2142         amdgpu_ras_sysfs_remove_all(adev);
2143         return 0;
2144 }
2145 /* ras fs end */
2146
2147 /* ih begin */
2148
2149 /* For the hardware that cannot enable bif ring for both ras_controller_irq
2150  * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
2151  * register to check whether the interrupt is triggered or not, and properly
2152  * ack the interrupt if it is there
2153  */
2154 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
2155 {
2156         /* Fatal error events are handled on host side */
2157         if (amdgpu_sriov_vf(adev))
2158                 return;
2159         /**
2160          * If the current interrupt is caused by a non-fatal RAS error, skip
2161          * check for fatal error. For fatal errors, FED status of all devices
2162          * in XGMI hive gets set when the first device gets fatal error
2163          * interrupt. The error gets propagated to other devices as well, so
2164          * make sure to ack the interrupt regardless of FED status.
2165          */
2166         if (!amdgpu_ras_get_fed_status(adev) &&
2167             amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
2168                 return;
2169
2170         if (adev->nbio.ras &&
2171             adev->nbio.ras->handle_ras_controller_intr_no_bifring)
2172                 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
2173
2174         if (adev->nbio.ras &&
2175             adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
2176                 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
2177 }
2178
2179 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
2180                                 struct amdgpu_iv_entry *entry)
2181 {
2182         bool poison_stat = false;
2183         struct amdgpu_device *adev = obj->adev;
2184         struct amdgpu_ras_block_object *block_obj =
2185                 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
2186         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2187         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
2188         u64 event_id;
2189         int ret;
2190
2191         if (!block_obj || !con)
2192                 return;
2193
2194         ret = amdgpu_ras_mark_ras_event(adev, type);
2195         if (ret)
2196                 return;
2197
2198         amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
2199         /* both query_poison_status and handle_poison_consumption are optional,
2200          * but at least one of them should be implemented if we need poison
2201          * consumption handler
2202          */
2203         if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
2204                 poison_stat = block_obj->hw_ops->query_poison_status(adev);
2205                 if (!poison_stat) {
2206                         /* Not poison consumption interrupt, no need to handle it */
2207                         dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
2208                                         block_obj->ras_comm.name);
2209
2210                         return;
2211                 }
2212         }
2213
2214         amdgpu_umc_poison_handler(adev, obj->head.block, 0);
2215
2216         if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
2217                 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
2218
2219         /* gpu reset is fallback for failed and default cases.
2220          * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
2221          */
2222         if (poison_stat && !amdgpu_ras_is_rma(adev)) {
2223                 event_id = amdgpu_ras_acquire_event_id(adev, type);
2224                 RAS_EVENT_LOG(adev, event_id,
2225                               "GPU reset for %s RAS poison consumption is issued!\n",
2226                               block_obj->ras_comm.name);
2227                 amdgpu_ras_reset_gpu(adev);
2228         }
2229
2230         if (!poison_stat)
2231                 amdgpu_gfx_poison_consumption_handler(adev, entry);
2232 }
2233
2234 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
2235                                 struct amdgpu_iv_entry *entry)
2236 {
2237         struct amdgpu_device *adev = obj->adev;
2238         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
2239         u64 event_id;
2240         int ret;
2241
2242         ret = amdgpu_ras_mark_ras_event(adev, type);
2243         if (ret)
2244                 return;
2245
2246         event_id = amdgpu_ras_acquire_event_id(adev, type);
2247         RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
2248
2249         if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
2250                 struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
2251
2252                 atomic_inc(&con->page_retirement_req_cnt);
2253                 atomic_inc(&con->poison_creation_count);
2254
2255                 wake_up(&con->page_retirement_wq);
2256         }
2257 }
2258
2259 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
2260                                 struct amdgpu_iv_entry *entry)
2261 {
2262         struct ras_ih_data *data = &obj->ih_data;
2263         struct ras_err_data err_data;
2264         int ret;
2265
2266         if (!data->cb)
2267                 return;
2268
2269         ret = amdgpu_ras_error_data_init(&err_data);
2270         if (ret)
2271                 return;
2272
2273         /* Let IP handle its data, maybe we need get the output
2274          * from the callback to update the error type/count, etc
2275          */
2276         amdgpu_ras_set_fed(obj->adev, true);
2277         ret = data->cb(obj->adev, &err_data, entry);
2278         /* ue will trigger an interrupt, and in that case
2279          * we need do a reset to recovery the whole system.
2280          * But leave IP do that recovery, here we just dispatch
2281          * the error.
2282          */
2283         if (ret == AMDGPU_RAS_SUCCESS) {
2284                 /* these counts could be left as 0 if
2285                  * some blocks do not count error number
2286                  */
2287                 obj->err_data.ue_count += err_data.ue_count;
2288                 obj->err_data.ce_count += err_data.ce_count;
2289                 obj->err_data.de_count += err_data.de_count;
2290         }
2291
2292         amdgpu_ras_error_data_fini(&err_data);
2293 }
2294
2295 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
2296 {
2297         struct ras_ih_data *data = &obj->ih_data;
2298         struct amdgpu_iv_entry entry;
2299
2300         while (data->rptr != data->wptr) {
2301                 rmb();
2302                 memcpy(&entry, &data->ring[data->rptr],
2303                                 data->element_size);
2304
2305                 wmb();
2306                 data->rptr = (data->aligned_element_size +
2307                                 data->rptr) % data->ring_size;
2308
2309                 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
2310                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2311                                 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
2312                         else
2313                                 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
2314                 } else {
2315                         if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
2316                                 amdgpu_ras_interrupt_umc_handler(obj, &entry);
2317                         else
2318                                 dev_warn(obj->adev->dev,
2319                                         "No RAS interrupt handler for non-UMC block with poison disabled.\n");
2320                 }
2321         }
2322 }
2323
2324 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
2325 {
2326         struct ras_ih_data *data =
2327                 container_of(work, struct ras_ih_data, ih_work);
2328         struct ras_manager *obj =
2329                 container_of(data, struct ras_manager, ih_data);
2330
2331         amdgpu_ras_interrupt_handler(obj);
2332 }
2333
2334 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2335                 struct ras_dispatch_if *info)
2336 {
2337         struct ras_manager *obj;
2338         struct ras_ih_data *data;
2339
2340         obj = amdgpu_ras_find_obj(adev, &info->head);
2341         if (!obj)
2342                 return -EINVAL;
2343
2344         data = &obj->ih_data;
2345
2346         if (data->inuse == 0)
2347                 return 0;
2348
2349         /* Might be overflow... */
2350         memcpy(&data->ring[data->wptr], info->entry,
2351                         data->element_size);
2352
2353         wmb();
2354         data->wptr = (data->aligned_element_size +
2355                         data->wptr) % data->ring_size;
2356
2357         schedule_work(&data->ih_work);
2358
2359         return 0;
2360 }
2361
2362 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2363                 struct ras_common_if *head)
2364 {
2365         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2366         struct ras_ih_data *data;
2367
2368         if (!obj)
2369                 return -EINVAL;
2370
2371         data = &obj->ih_data;
2372         if (data->inuse == 0)
2373                 return 0;
2374
2375         cancel_work_sync(&data->ih_work);
2376
2377         kfree(data->ring);
2378         memset(data, 0, sizeof(*data));
2379         put_obj(obj);
2380
2381         return 0;
2382 }
2383
2384 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2385                 struct ras_common_if *head)
2386 {
2387         struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2388         struct ras_ih_data *data;
2389         struct amdgpu_ras_block_object *ras_obj;
2390
2391         if (!obj) {
2392                 /* in case we registe the IH before enable ras feature */
2393                 obj = amdgpu_ras_create_obj(adev, head);
2394                 if (!obj)
2395                         return -EINVAL;
2396         } else
2397                 get_obj(obj);
2398
2399         ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2400
2401         data = &obj->ih_data;
2402         /* add the callback.etc */
2403         *data = (struct ras_ih_data) {
2404                 .inuse = 0,
2405                 .cb = ras_obj->ras_cb,
2406                 .element_size = sizeof(struct amdgpu_iv_entry),
2407                 .rptr = 0,
2408                 .wptr = 0,
2409         };
2410
2411         INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2412
2413         data->aligned_element_size = ALIGN(data->element_size, 8);
2414         /* the ring can store 64 iv entries. */
2415         data->ring_size = 64 * data->aligned_element_size;
2416         data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2417         if (!data->ring) {
2418                 put_obj(obj);
2419                 return -ENOMEM;
2420         }
2421
2422         /* IH is ready */
2423         data->inuse = 1;
2424
2425         return 0;
2426 }
2427
2428 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2429 {
2430         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2431         struct ras_manager *obj, *tmp;
2432
2433         list_for_each_entry_safe(obj, tmp, &con->head, node) {
2434                 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2435         }
2436
2437         return 0;
2438 }
2439 /* ih end */
2440
2441 /* traversal all IPs except NBIO to query error counter */
2442 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
2443 {
2444         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2445         struct ras_manager *obj;
2446
2447         if (!adev->ras_enabled || !con)
2448                 return;
2449
2450         list_for_each_entry(obj, &con->head, node) {
2451                 struct ras_query_if info = {
2452                         .head = obj->head,
2453                 };
2454
2455                 /*
2456                  * PCIE_BIF IP has one different isr by ras controller
2457                  * interrupt, the specific ras counter query will be
2458                  * done in that isr. So skip such block from common
2459                  * sync flood interrupt isr calling.
2460                  */
2461                 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2462                         continue;
2463
2464                 /*
2465                  * this is a workaround for aldebaran, skip send msg to
2466                  * smu to get ecc_info table due to smu handle get ecc
2467                  * info table failed temporarily.
2468                  * should be removed until smu fix handle ecc_info table.
2469                  */
2470                 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2471                     (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2472                      IP_VERSION(13, 0, 2)))
2473                         continue;
2474
2475                 amdgpu_ras_query_error_status_with_event(adev, &info, type);
2476
2477                 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2478                             IP_VERSION(11, 0, 2) &&
2479                     amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2480                             IP_VERSION(11, 0, 4) &&
2481                     amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2482                             IP_VERSION(13, 0, 0)) {
2483                         if (amdgpu_ras_reset_error_status(adev, info.head.block))
2484                                 dev_warn(adev->dev, "Failed to reset error counter and error status");
2485                 }
2486         }
2487 }
2488
2489 /* Parse RdRspStatus and WrRspStatus */
2490 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2491                                           struct ras_query_if *info)
2492 {
2493         struct amdgpu_ras_block_object *block_obj;
2494         /*
2495          * Only two block need to query read/write
2496          * RspStatus at current state
2497          */
2498         if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2499                 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2500                 return;
2501
2502         block_obj = amdgpu_ras_get_ras_block(adev,
2503                                         info->head.block,
2504                                         info->head.sub_block_index);
2505
2506         if (!block_obj || !block_obj->hw_ops) {
2507                 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2508                              get_ras_block_str(&info->head));
2509                 return;
2510         }
2511
2512         if (block_obj->hw_ops->query_ras_error_status)
2513                 block_obj->hw_ops->query_ras_error_status(adev);
2514
2515 }
2516
2517 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2518 {
2519         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2520         struct ras_manager *obj;
2521
2522         if (!adev->ras_enabled || !con)
2523                 return;
2524
2525         list_for_each_entry(obj, &con->head, node) {
2526                 struct ras_query_if info = {
2527                         .head = obj->head,
2528                 };
2529
2530                 amdgpu_ras_error_status_query(adev, &info);
2531         }
2532 }
2533
2534 /* recovery begin */
2535
2536 /* return 0 on success.
2537  * caller need free bps.
2538  */
2539 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2540                 struct ras_badpage **bps, unsigned int *count)
2541 {
2542         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2543         struct ras_err_handler_data *data;
2544         int i = 0;
2545         int ret = 0, status;
2546
2547         if (!con || !con->eh_data || !bps || !count)
2548                 return -EINVAL;
2549
2550         mutex_lock(&con->recovery_lock);
2551         data = con->eh_data;
2552         if (!data || data->count == 0) {
2553                 *bps = NULL;
2554                 ret = -EINVAL;
2555                 goto out;
2556         }
2557
2558         *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2559         if (!*bps) {
2560                 ret = -ENOMEM;
2561                 goto out;
2562         }
2563
2564         for (; i < data->count; i++) {
2565                 (*bps)[i] = (struct ras_badpage){
2566                         .bp = data->bps[i].retired_page,
2567                         .size = AMDGPU_GPU_PAGE_SIZE,
2568                         .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2569                 };
2570                 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2571                                 data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
2572                 if (status == -EBUSY)
2573                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2574                 else if (status == -ENOENT)
2575                         (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2576         }
2577
2578         *count = data->count;
2579 out:
2580         mutex_unlock(&con->recovery_lock);
2581         return ret;
2582 }
2583
2584 static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
2585                                    struct amdgpu_hive_info *hive, bool status)
2586 {
2587         struct amdgpu_device *tmp_adev;
2588
2589         if (hive) {
2590                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
2591                         amdgpu_ras_set_fed(tmp_adev, status);
2592         } else {
2593                 amdgpu_ras_set_fed(adev, status);
2594         }
2595 }
2596
2597 bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
2598 {
2599         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2600         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2601         int hive_ras_recovery = 0;
2602
2603         if (hive) {
2604                 hive_ras_recovery = atomic_read(&hive->ras_recovery);
2605                 amdgpu_put_xgmi_hive(hive);
2606         }
2607
2608         if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
2609                 return true;
2610
2611         return false;
2612 }
2613
2614 static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
2615 {
2616         if (amdgpu_ras_intr_triggered())
2617                 return RAS_EVENT_TYPE_FATAL;
2618         else
2619                 return RAS_EVENT_TYPE_POISON_CONSUMPTION;
2620 }
2621
2622 static void amdgpu_ras_do_recovery(struct work_struct *work)
2623 {
2624         struct amdgpu_ras *ras =
2625                 container_of(work, struct amdgpu_ras, recovery_work);
2626         struct amdgpu_device *remote_adev = NULL;
2627         struct amdgpu_device *adev = ras->adev;
2628         struct list_head device_list, *device_list_handle =  NULL;
2629         struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2630         enum ras_event_type type;
2631
2632         if (hive) {
2633                 atomic_set(&hive->ras_recovery, 1);
2634
2635                 /* If any device which is part of the hive received RAS fatal
2636                  * error interrupt, set fatal error status on all. This
2637                  * condition will need a recovery, and flag will be cleared
2638                  * as part of recovery.
2639                  */
2640                 list_for_each_entry(remote_adev, &hive->device_list,
2641                                     gmc.xgmi.head)
2642                         if (amdgpu_ras_get_fed_status(remote_adev)) {
2643                                 amdgpu_ras_set_fed_all(adev, hive, true);
2644                                 break;
2645                         }
2646         }
2647         if (!ras->disable_ras_err_cnt_harvest) {
2648
2649                 /* Build list of devices to query RAS related errors */
2650                 if  (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2651                         device_list_handle = &hive->device_list;
2652                 } else {
2653                         INIT_LIST_HEAD(&device_list);
2654                         list_add_tail(&adev->gmc.xgmi.head, &device_list);
2655                         device_list_handle = &device_list;
2656                 }
2657
2658                 type = amdgpu_ras_get_fatal_error_event(adev);
2659                 list_for_each_entry(remote_adev,
2660                                 device_list_handle, gmc.xgmi.head) {
2661                         amdgpu_ras_query_err_status(remote_adev);
2662                         amdgpu_ras_log_on_err_counter(remote_adev, type);
2663                 }
2664
2665         }
2666
2667         if (amdgpu_device_should_recover_gpu(ras->adev)) {
2668                 struct amdgpu_reset_context reset_context;
2669                 memset(&reset_context, 0, sizeof(reset_context));
2670
2671                 reset_context.method = AMD_RESET_METHOD_NONE;
2672                 reset_context.reset_req_dev = adev;
2673                 reset_context.src = AMDGPU_RESET_SRC_RAS;
2674                 set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2675
2676                 /* Perform full reset in fatal error mode */
2677                 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2678                         set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2679                 else {
2680                         clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2681
2682                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2683                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2684                                 reset_context.method = AMD_RESET_METHOD_MODE2;
2685                         }
2686
2687                         /* Fatal error occurs in poison mode, mode1 reset is used to
2688                          * recover gpu.
2689                          */
2690                         if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2691                                 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2692                                 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2693
2694                                 psp_fatal_error_recovery_quirk(&adev->psp);
2695                         }
2696                 }
2697
2698                 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2699         }
2700         atomic_set(&ras->in_recovery, 0);
2701         if (hive) {
2702                 atomic_set(&hive->ras_recovery, 0);
2703                 amdgpu_put_xgmi_hive(hive);
2704         }
2705 }
2706
2707 /* alloc/realloc bps array */
2708 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2709                 struct ras_err_handler_data *data, int pages)
2710 {
2711         unsigned int old_space = data->count + data->space_left;
2712         unsigned int new_space = old_space + pages;
2713         unsigned int align_space = ALIGN(new_space, 512);
2714         void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2715
2716         if (!bps) {
2717                 return -ENOMEM;
2718         }
2719
2720         if (data->bps) {
2721                 memcpy(bps, data->bps,
2722                                 data->count * sizeof(*data->bps));
2723                 kfree(data->bps);
2724         }
2725
2726         data->bps = bps;
2727         data->space_left += align_space - old_space;
2728         return 0;
2729 }
2730
2731 static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
2732                         struct eeprom_table_record *bps,
2733                         struct ras_err_data *err_data)
2734 {
2735         struct ta_ras_query_address_input addr_in;
2736         uint32_t socket = 0;
2737         int ret = 0;
2738
2739         if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2740                 socket = adev->smuio.funcs->get_socket_id(adev);
2741
2742         /* reinit err_data */
2743         err_data->err_addr_cnt = 0;
2744         err_data->err_addr_len = adev->umc.retire_unit;
2745
2746         memset(&addr_in, 0, sizeof(addr_in));
2747         addr_in.ma.err_addr = bps->address;
2748         addr_in.ma.socket_id = socket;
2749         addr_in.ma.ch_inst = bps->mem_channel;
2750         /* tell RAS TA the node instance is not used */
2751         addr_in.ma.node_inst = TA_RAS_INV_NODE;
2752
2753         if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2754                 ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
2755                                 &addr_in, NULL, false);
2756
2757         return ret;
2758 }
2759
2760 static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
2761                         struct eeprom_table_record *bps,
2762                         struct ras_err_data *err_data)
2763 {
2764         struct ta_ras_query_address_input addr_in;
2765         uint32_t die_id, socket = 0;
2766
2767         if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
2768                 socket = adev->smuio.funcs->get_socket_id(adev);
2769
2770         /* although die id is gotten from PA in nps1 mode, the id is
2771          * fitable for any nps mode
2772          */
2773         if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
2774                 die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
2775                                         bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
2776         else
2777                 return -EINVAL;
2778
2779         /* reinit err_data */
2780         err_data->err_addr_cnt = 0;
2781         err_data->err_addr_len = adev->umc.retire_unit;
2782
2783         memset(&addr_in, 0, sizeof(addr_in));
2784         addr_in.ma.err_addr = bps->address;
2785         addr_in.ma.ch_inst = bps->mem_channel;
2786         addr_in.ma.umc_inst = bps->mcumc_id;
2787         addr_in.ma.node_inst = die_id;
2788         addr_in.ma.socket_id = socket;
2789
2790         if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
2791                 return adev->umc.ras->convert_ras_err_addr(adev, err_data,
2792                                         &addr_in, NULL, false);
2793         else
2794                 return  -EINVAL;
2795 }
2796
2797 /* it deal with vram only. */
2798 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2799                 struct eeprom_table_record *bps, int pages, bool from_rom)
2800 {
2801         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2802         struct ras_err_handler_data *data;
2803         struct ras_err_data err_data;
2804         struct eeprom_table_record *err_rec;
2805         struct amdgpu_ras_eeprom_control *control =
2806                         &adev->psp.ras_context.ras->eeprom_control;
2807         enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
2808         int ret = 0;
2809         uint32_t i, j, loop_cnt = 1;
2810         bool find_pages_per_pa = false;
2811
2812         if (!con || !con->eh_data || !bps || pages <= 0)
2813                 return 0;
2814
2815         if (from_rom) {
2816                 err_data.err_addr =
2817                         kcalloc(adev->umc.retire_unit,
2818                                 sizeof(struct eeprom_table_record), GFP_KERNEL);
2819                 if (!err_data.err_addr) {
2820                         dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
2821                         ret = -ENOMEM;
2822                         goto out;
2823                 }
2824
2825                 err_rec = err_data.err_addr;
2826                 loop_cnt = adev->umc.retire_unit;
2827                 if (adev->gmc.gmc_funcs->query_mem_partition_mode)
2828                         nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
2829         }
2830
2831         mutex_lock(&con->recovery_lock);
2832         data = con->eh_data;
2833         if (!data)
2834                 goto free;
2835
2836         for (i = 0; i < pages; i++) {
2837                 if (from_rom &&
2838                     control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA) {
2839                         if (!find_pages_per_pa) {
2840                                 if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data)) {
2841                                         if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
2842                                                 /* may use old RAS TA, use PA to find pages in
2843                                                  * one row
2844                                                  */
2845                                                 if (amdgpu_umc_pages_in_a_row(adev, &err_data,
2846                                                                 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2847                                                         goto free;
2848                                                 else
2849                                                         find_pages_per_pa = true;
2850                                         } else {
2851                                                 /* unsupported cases */
2852                                                 goto free;
2853                                         }
2854                                 }
2855                         } else {
2856                                 if (amdgpu_umc_pages_in_a_row(adev, &err_data,
2857                                                 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2858                                         goto free;
2859                         }
2860                 } else {
2861                         if (from_rom && !find_pages_per_pa) {
2862                                 if (bps[i].retired_page & UMC_CHANNEL_IDX_V2) {
2863                                         /* bad page in any NPS mode in eeprom */
2864                                         if (amdgpu_ras_mca2pa_by_idx(adev, &bps[i], &err_data))
2865                                                 goto free;
2866                                 } else {
2867                                         /* legacy bad page in eeprom, generated only in
2868                                          * NPS1 mode
2869                                          */
2870                                         if (amdgpu_ras_mca2pa(adev, &bps[i], &err_data)) {
2871                                                 /* old RAS TA or ASICs which don't support to
2872                                                  * convert addrss via mca address
2873                                                  */
2874                                                 if (!i && nps == AMDGPU_NPS1_PARTITION_MODE) {
2875                                                         find_pages_per_pa = true;
2876                                                         err_rec = &bps[i];
2877                                                         loop_cnt = 1;
2878                                                 } else {
2879                                                         /* non-nps1 mode, old RAS TA
2880                                                          * can't support it
2881                                                          */
2882                                                         goto free;
2883                                                 }
2884                                         }
2885                                 }
2886
2887                                 if (!find_pages_per_pa)
2888                                         i += (adev->umc.retire_unit - 1);
2889                         } else {
2890                                 err_rec = &bps[i];
2891                         }
2892                 }
2893
2894                 for (j = 0; j < loop_cnt; j++) {
2895                         if (amdgpu_ras_check_bad_page_unlock(con,
2896                                 err_rec[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2897                                 continue;
2898
2899                         if (!data->space_left &&
2900                             amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2901                                 ret = -ENOMEM;
2902                                 goto free;
2903                         }
2904
2905                         amdgpu_ras_reserve_page(adev, err_rec[j].retired_page);
2906
2907                         memcpy(&data->bps[data->count], &(err_rec[j]),
2908                                         sizeof(struct eeprom_table_record));
2909                         data->count++;
2910                         data->space_left--;
2911                 }
2912         }
2913
2914 free:
2915         if (from_rom)
2916                 kfree(err_data.err_addr);
2917 out:
2918         mutex_unlock(&con->recovery_lock);
2919
2920         return ret;
2921 }
2922
2923 /*
2924  * write error record array to eeprom, the function should be
2925  * protected by recovery_lock
2926  * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2927  */
2928 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2929                 unsigned long *new_cnt)
2930 {
2931         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2932         struct ras_err_handler_data *data;
2933         struct amdgpu_ras_eeprom_control *control;
2934         int save_count, unit_num, bad_page_num, i;
2935
2936         if (!con || !con->eh_data) {
2937                 if (new_cnt)
2938                         *new_cnt = 0;
2939
2940                 return 0;
2941         }
2942
2943         mutex_lock(&con->recovery_lock);
2944         control = &con->eeprom_control;
2945         data = con->eh_data;
2946         bad_page_num = control->ras_num_recs;
2947         /* one record on eeprom stands for all pages in one memory row
2948          * in this mode
2949          */
2950         if (control->rec_type == AMDGPU_RAS_EEPROM_REC_MCA)
2951                 bad_page_num = control->ras_num_recs * adev->umc.retire_unit;
2952
2953         save_count = data->count - bad_page_num;
2954         mutex_unlock(&con->recovery_lock);
2955
2956         unit_num = save_count / adev->umc.retire_unit;
2957         if (new_cnt)
2958                 *new_cnt = unit_num;
2959
2960         /* only new entries are saved */
2961         if (save_count > 0) {
2962                 if (control->rec_type == AMDGPU_RAS_EEPROM_REC_PA) {
2963                         if (amdgpu_ras_eeprom_append(control,
2964                                                      &data->bps[control->ras_num_recs],
2965                                                      save_count)) {
2966                                 dev_err(adev->dev, "Failed to save EEPROM table data!");
2967                                 return -EIO;
2968                         }
2969                 } else {
2970                         for (i = 0; i < unit_num; i++) {
2971                                 if (amdgpu_ras_eeprom_append(control,
2972                                                 &data->bps[bad_page_num + i * adev->umc.retire_unit],
2973                                                 1)) {
2974                                         dev_err(adev->dev, "Failed to save EEPROM table data!");
2975                                         return -EIO;
2976                                 }
2977                         }
2978                 }
2979
2980                 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2981         }
2982
2983         return 0;
2984 }
2985
2986 /*
2987  * read error record array in eeprom and reserve enough space for
2988  * storing new bad pages
2989  */
2990 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2991 {
2992         struct amdgpu_ras_eeprom_control *control =
2993                 &adev->psp.ras_context.ras->eeprom_control;
2994         struct eeprom_table_record *bps;
2995         int ret;
2996
2997         /* no bad page record, skip eeprom access */
2998         if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2999                 return 0;
3000
3001         bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
3002         if (!bps)
3003                 return -ENOMEM;
3004
3005         ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
3006         if (ret) {
3007                 dev_err(adev->dev, "Failed to load EEPROM table records!");
3008         } else {
3009                 if (control->ras_num_recs > 1 &&
3010                     adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
3011                         if ((bps[0].address == bps[1].address) &&
3012                             (bps[0].mem_channel == bps[1].mem_channel))
3013                                 control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
3014                         else
3015                                 control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
3016                 }
3017
3018                 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
3019         }
3020
3021         kfree(bps);
3022         return ret;
3023 }
3024
3025 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
3026                                 uint64_t addr)
3027 {
3028         struct ras_err_handler_data *data = con->eh_data;
3029         int i;
3030
3031         addr >>= AMDGPU_GPU_PAGE_SHIFT;
3032         for (i = 0; i < data->count; i++)
3033                 if (addr == data->bps[i].retired_page)
3034                         return true;
3035
3036         return false;
3037 }
3038
3039 /*
3040  * check if an address belongs to bad page
3041  *
3042  * Note: this check is only for umc block
3043  */
3044 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
3045                                 uint64_t addr)
3046 {
3047         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3048         bool ret = false;
3049
3050         if (!con || !con->eh_data)
3051                 return ret;
3052
3053         mutex_lock(&con->recovery_lock);
3054         ret = amdgpu_ras_check_bad_page_unlock(con, addr);
3055         mutex_unlock(&con->recovery_lock);
3056         return ret;
3057 }
3058
3059 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
3060                                           uint32_t max_count)
3061 {
3062         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3063
3064         /*
3065          * Justification of value bad_page_cnt_threshold in ras structure
3066          *
3067          * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
3068          * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
3069          * scenarios accordingly.
3070          *
3071          * Bad page retirement enablement:
3072          *    - If amdgpu_bad_page_threshold = -2,
3073          *      bad_page_cnt_threshold = typical value by formula.
3074          *
3075          *    - When the value from user is 0 < amdgpu_bad_page_threshold <
3076          *      max record length in eeprom, use it directly.
3077          *
3078          * Bad page retirement disablement:
3079          *    - If amdgpu_bad_page_threshold = 0, bad page retirement
3080          *      functionality is disabled, and bad_page_cnt_threshold will
3081          *      take no effect.
3082          */
3083
3084         if (amdgpu_bad_page_threshold < 0) {
3085                 u64 val = adev->gmc.mc_vram_size;
3086
3087                 do_div(val, RAS_BAD_PAGE_COVER);
3088                 con->bad_page_cnt_threshold = min(lower_32_bits(val),
3089                                                   max_count);
3090         } else {
3091                 con->bad_page_cnt_threshold = min_t(int, max_count,
3092                                                     amdgpu_bad_page_threshold);
3093         }
3094 }
3095
3096 int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
3097                 enum amdgpu_ras_block block, uint16_t pasid,
3098                 pasid_notify pasid_fn, void *data, uint32_t reset)
3099 {
3100         int ret = 0;
3101         struct ras_poison_msg poison_msg;
3102         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3103
3104         memset(&poison_msg, 0, sizeof(poison_msg));
3105         poison_msg.block = block;
3106         poison_msg.pasid = pasid;
3107         poison_msg.reset = reset;
3108         poison_msg.pasid_fn = pasid_fn;
3109         poison_msg.data = data;
3110
3111         ret = kfifo_put(&con->poison_fifo, poison_msg);
3112         if (!ret) {
3113                 dev_err(adev->dev, "Poison message fifo is full!\n");
3114                 return -ENOSPC;
3115         }
3116
3117         return 0;
3118 }
3119
3120 static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
3121                 struct ras_poison_msg *poison_msg)
3122 {
3123         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3124
3125         return kfifo_get(&con->poison_fifo, poison_msg);
3126 }
3127
3128 static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
3129 {
3130         mutex_init(&ecc_log->lock);
3131
3132         INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
3133         ecc_log->de_queried_count = 0;
3134         ecc_log->prev_de_queried_count = 0;
3135 }
3136
3137 static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
3138 {
3139         struct radix_tree_iter iter;
3140         void __rcu **slot;
3141         struct ras_ecc_err *ecc_err;
3142
3143         mutex_lock(&ecc_log->lock);
3144         radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
3145                 ecc_err = radix_tree_deref_slot(slot);
3146                 kfree(ecc_err->err_pages.pfn);
3147                 kfree(ecc_err);
3148                 radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
3149         }
3150         mutex_unlock(&ecc_log->lock);
3151
3152         mutex_destroy(&ecc_log->lock);
3153         ecc_log->de_queried_count = 0;
3154         ecc_log->prev_de_queried_count = 0;
3155 }
3156
3157 static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
3158                                 uint32_t delayed_ms)
3159 {
3160         int ret;
3161
3162         mutex_lock(&con->umc_ecc_log.lock);
3163         ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
3164                         UMC_ECC_NEW_DETECTED_TAG);
3165         mutex_unlock(&con->umc_ecc_log.lock);
3166
3167         if (ret)
3168                 schedule_delayed_work(&con->page_retirement_dwork,
3169                         msecs_to_jiffies(delayed_ms));
3170
3171         return ret ? true : false;
3172 }
3173
3174 static void amdgpu_ras_do_page_retirement(struct work_struct *work)
3175 {
3176         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3177                                               page_retirement_dwork.work);
3178         struct amdgpu_device *adev = con->adev;
3179         struct ras_err_data err_data;
3180         unsigned long err_cnt;
3181
3182         /* If gpu reset is ongoing, delay retiring the bad pages */
3183         if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
3184                 amdgpu_ras_schedule_retirement_dwork(con,
3185                                 AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
3186                 return;
3187         }
3188
3189         amdgpu_ras_error_data_init(&err_data);
3190
3191         amdgpu_umc_handle_bad_pages(adev, &err_data);
3192         err_cnt = err_data.err_addr_cnt;
3193
3194         amdgpu_ras_error_data_fini(&err_data);
3195
3196         if (err_cnt && amdgpu_ras_is_rma(adev))
3197                 amdgpu_ras_reset_gpu(adev);
3198
3199         amdgpu_ras_schedule_retirement_dwork(con,
3200                         AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
3201 }
3202
3203 static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
3204                                 uint32_t poison_creation_count)
3205 {
3206         int ret = 0;
3207         struct ras_ecc_log_info *ecc_log;
3208         struct ras_query_if info;
3209         uint32_t timeout = 0;
3210         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3211         uint64_t de_queried_count;
3212         uint32_t new_detect_count, total_detect_count;
3213         uint32_t need_query_count = poison_creation_count;
3214         bool query_data_timeout = false;
3215         enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
3216
3217         memset(&info, 0, sizeof(info));
3218         info.head.block = AMDGPU_RAS_BLOCK__UMC;
3219
3220         ecc_log = &ras->umc_ecc_log;
3221         total_detect_count = 0;
3222         do {
3223                 ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
3224                 if (ret)
3225                         return ret;
3226
3227                 de_queried_count = ecc_log->de_queried_count;
3228                 if (de_queried_count > ecc_log->prev_de_queried_count) {
3229                         new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
3230                         ecc_log->prev_de_queried_count = de_queried_count;
3231                         timeout = 0;
3232                 } else {
3233                         new_detect_count = 0;
3234                 }
3235
3236                 if (new_detect_count) {
3237                         total_detect_count += new_detect_count;
3238                 } else {
3239                         if (!timeout && need_query_count)
3240                                 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
3241
3242                         if (timeout) {
3243                                 if (!--timeout) {
3244                                         query_data_timeout = true;
3245                                         break;
3246                                 }
3247                                 msleep(1);
3248                         }
3249                 }
3250         } while (total_detect_count < need_query_count);
3251
3252         if (query_data_timeout) {
3253                 dev_warn(adev->dev, "Can't find deferred error! count: %u\n",
3254                         (need_query_count - total_detect_count));
3255                 return -ENOENT;
3256         }
3257
3258         if (total_detect_count)
3259                 schedule_delayed_work(&ras->page_retirement_dwork, 0);
3260
3261         return 0;
3262 }
3263
3264 static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
3265 {
3266         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3267         struct ras_poison_msg msg;
3268         int ret;
3269
3270         do {
3271                 ret = kfifo_get(&con->poison_fifo, &msg);
3272         } while (ret);
3273 }
3274
3275 static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
3276                         uint32_t msg_count, uint32_t *gpu_reset)
3277 {
3278         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3279         uint32_t reset_flags = 0, reset = 0;
3280         struct ras_poison_msg msg;
3281         int ret, i;
3282
3283         kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
3284
3285         for (i = 0; i < msg_count; i++) {
3286                 ret = amdgpu_ras_get_poison_req(adev, &msg);
3287                 if (!ret)
3288                         continue;
3289
3290                 if (msg.pasid_fn)
3291                         msg.pasid_fn(adev, msg.pasid, msg.data);
3292
3293                 reset_flags |= msg.reset;
3294         }
3295
3296         /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
3297         if (reset_flags && !amdgpu_ras_is_rma(adev)) {
3298                 if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
3299                         reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3300                 else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
3301                         reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
3302                 else
3303                         reset = reset_flags;
3304
3305                 flush_delayed_work(&con->page_retirement_dwork);
3306
3307                 con->gpu_reset_flags |= reset;
3308                 amdgpu_ras_reset_gpu(adev);
3309
3310                 *gpu_reset = reset;
3311
3312                 /* Wait for gpu recovery to complete */
3313                 flush_work(&con->recovery_work);
3314         }
3315
3316         return 0;
3317 }
3318
3319 static int amdgpu_ras_page_retirement_thread(void *param)
3320 {
3321         struct amdgpu_device *adev = (struct amdgpu_device *)param;
3322         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3323         uint32_t poison_creation_count, msg_count;
3324         uint32_t gpu_reset;
3325         int ret;
3326
3327         while (!kthread_should_stop()) {
3328
3329                 wait_event_interruptible(con->page_retirement_wq,
3330                                 kthread_should_stop() ||
3331                                 atomic_read(&con->page_retirement_req_cnt));
3332
3333                 if (kthread_should_stop())
3334                         break;
3335
3336                 gpu_reset = 0;
3337
3338                 do {
3339                         poison_creation_count = atomic_read(&con->poison_creation_count);
3340                         ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
3341                         if (ret == -EIO)
3342                                 break;
3343
3344                         if (poison_creation_count) {
3345                                 atomic_sub(poison_creation_count, &con->poison_creation_count);
3346                                 atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
3347                         }
3348                 } while (atomic_read(&con->poison_creation_count));
3349
3350                 if (ret != -EIO) {
3351                         msg_count = kfifo_len(&con->poison_fifo);
3352                         if (msg_count) {
3353                                 ret = amdgpu_ras_poison_consumption_handler(adev,
3354                                                 msg_count, &gpu_reset);
3355                                 if ((ret != -EIO) &&
3356                                     (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
3357                                         atomic_sub(msg_count, &con->page_retirement_req_cnt);
3358                         }
3359                 }
3360
3361                 if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
3362                         /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
3363                         /* Clear poison creation request */
3364                         atomic_set(&con->poison_creation_count, 0);
3365
3366                         /* Clear poison fifo */
3367                         amdgpu_ras_clear_poison_fifo(adev);
3368
3369                         /* Clear all poison requests */
3370                         atomic_set(&con->page_retirement_req_cnt, 0);
3371
3372                         if (ret == -EIO) {
3373                                 /* Wait for mode-1 reset to complete */
3374                                 down_read(&adev->reset_domain->sem);
3375                                 up_read(&adev->reset_domain->sem);
3376                         }
3377
3378                         /* Wake up work to save bad pages to eeprom */
3379                         schedule_delayed_work(&con->page_retirement_dwork, 0);
3380                 } else if (gpu_reset) {
3381                         /* gpu just completed mode-2 reset or other reset */
3382                         /* Clear poison consumption messages cached in fifo */
3383                         msg_count = kfifo_len(&con->poison_fifo);
3384                         if (msg_count) {
3385                                 amdgpu_ras_clear_poison_fifo(adev);
3386                                 atomic_sub(msg_count, &con->page_retirement_req_cnt);
3387                         }
3388
3389                         /* Wake up work to save bad pages to eeprom */
3390                         schedule_delayed_work(&con->page_retirement_dwork, 0);
3391                 }
3392         }
3393
3394         return 0;
3395 }
3396
3397 int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
3398 {
3399         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3400         struct amdgpu_ras_eeprom_control *control;
3401         int ret;
3402
3403         if (!con || amdgpu_sriov_vf(adev))
3404                 return 0;
3405
3406         control = &con->eeprom_control;
3407         ret = amdgpu_ras_eeprom_init(control);
3408         if (ret)
3409                 return ret;
3410
3411         /* HW not usable */
3412         if (amdgpu_ras_is_rma(adev))
3413                 return -EHWPOISON;
3414
3415         if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
3416                 control->rec_type = AMDGPU_RAS_EEPROM_REC_PA;
3417
3418         /* default status is MCA storage */
3419         if (control->ras_num_recs <= 1 &&
3420             adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
3421                 control->rec_type = AMDGPU_RAS_EEPROM_REC_MCA;
3422
3423         if (control->ras_num_recs) {
3424                 ret = amdgpu_ras_load_bad_pages(adev);
3425                 if (ret)
3426                         return ret;
3427
3428                 amdgpu_dpm_send_hbm_bad_pages_num(
3429                         adev, control->ras_num_recs);
3430
3431                 if (con->update_channel_flag == true) {
3432                         amdgpu_dpm_send_hbm_bad_channel_flag(
3433                                 adev, control->bad_channel_bitmap);
3434                         con->update_channel_flag = false;
3435                 }
3436         }
3437
3438         return ret;
3439 }
3440
3441 int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
3442 {
3443         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3444         struct ras_err_handler_data **data;
3445         u32  max_eeprom_records_count = 0;
3446         int ret;
3447
3448         if (!con || amdgpu_sriov_vf(adev))
3449                 return 0;
3450
3451         /* Allow access to RAS EEPROM via debugfs, when the ASIC
3452          * supports RAS and debugfs is enabled, but when
3453          * adev->ras_enabled is unset, i.e. when "ras_enable"
3454          * module parameter is set to 0.
3455          */
3456         con->adev = adev;
3457
3458         if (!adev->ras_enabled)
3459                 return 0;
3460
3461         data = &con->eh_data;
3462         *data = kzalloc(sizeof(**data), GFP_KERNEL);
3463         if (!*data) {
3464                 ret = -ENOMEM;
3465                 goto out;
3466         }
3467
3468         mutex_init(&con->recovery_lock);
3469         INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
3470         atomic_set(&con->in_recovery, 0);
3471         con->eeprom_control.bad_channel_bitmap = 0;
3472
3473         max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
3474         amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
3475
3476         if (init_bp_info) {
3477                 ret = amdgpu_ras_init_badpage_info(adev);
3478                 if (ret)
3479                         goto free;
3480         }
3481
3482         mutex_init(&con->page_rsv_lock);
3483         INIT_KFIFO(con->poison_fifo);
3484         mutex_init(&con->page_retirement_lock);
3485         init_waitqueue_head(&con->page_retirement_wq);
3486         atomic_set(&con->page_retirement_req_cnt, 0);
3487         atomic_set(&con->poison_creation_count, 0);
3488         con->page_retirement_thread =
3489                 kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
3490         if (IS_ERR(con->page_retirement_thread)) {
3491                 con->page_retirement_thread = NULL;
3492                 dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
3493         }
3494
3495         INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
3496         amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
3497 #ifdef CONFIG_X86_MCE_AMD
3498         if ((adev->asic_type == CHIP_ALDEBARAN) &&
3499             (adev->gmc.xgmi.connected_to_cpu))
3500                 amdgpu_register_bad_pages_mca_notifier(adev);
3501 #endif
3502         return 0;
3503
3504 free:
3505         kfree((*data)->bps);
3506         kfree(*data);
3507         con->eh_data = NULL;
3508 out:
3509         dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
3510
3511         /*
3512          * Except error threshold exceeding case, other failure cases in this
3513          * function would not fail amdgpu driver init.
3514          */
3515         if (!amdgpu_ras_is_rma(adev))
3516                 ret = 0;
3517         else
3518                 ret = -EINVAL;
3519
3520         return ret;
3521 }
3522
3523 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
3524 {
3525         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3526         struct ras_err_handler_data *data = con->eh_data;
3527         int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
3528         bool ret;
3529
3530         /* recovery_init failed to init it, fini is useless */
3531         if (!data)
3532                 return 0;
3533
3534         /* Save all cached bad pages to eeprom */
3535         do {
3536                 flush_delayed_work(&con->page_retirement_dwork);
3537                 ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
3538         } while (ret && max_flush_timeout--);
3539
3540         if (con->page_retirement_thread)
3541                 kthread_stop(con->page_retirement_thread);
3542
3543         atomic_set(&con->page_retirement_req_cnt, 0);
3544         atomic_set(&con->poison_creation_count, 0);
3545
3546         mutex_destroy(&con->page_rsv_lock);
3547
3548         cancel_work_sync(&con->recovery_work);
3549
3550         cancel_delayed_work_sync(&con->page_retirement_dwork);
3551
3552         amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
3553
3554         mutex_lock(&con->recovery_lock);
3555         con->eh_data = NULL;
3556         kfree(data->bps);
3557         kfree(data);
3558         mutex_unlock(&con->recovery_lock);
3559
3560         return 0;
3561 }
3562 /* recovery end */
3563
3564 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
3565 {
3566         if (amdgpu_sriov_vf(adev)) {
3567                 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3568                 case IP_VERSION(13, 0, 2):
3569                 case IP_VERSION(13, 0, 6):
3570                 case IP_VERSION(13, 0, 14):
3571                         return true;
3572                 default:
3573                         return false;
3574                 }
3575         }
3576
3577         if (adev->asic_type == CHIP_IP_DISCOVERY) {
3578                 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3579                 case IP_VERSION(13, 0, 0):
3580                 case IP_VERSION(13, 0, 6):
3581                 case IP_VERSION(13, 0, 10):
3582                 case IP_VERSION(13, 0, 14):
3583                         return true;
3584                 default:
3585                         return false;
3586                 }
3587         }
3588
3589         return adev->asic_type == CHIP_VEGA10 ||
3590                 adev->asic_type == CHIP_VEGA20 ||
3591                 adev->asic_type == CHIP_ARCTURUS ||
3592                 adev->asic_type == CHIP_ALDEBARAN ||
3593                 adev->asic_type == CHIP_SIENNA_CICHLID;
3594 }
3595
3596 /*
3597  * this is workaround for vega20 workstation sku,
3598  * force enable gfx ras, ignore vbios gfx ras flag
3599  * due to GC EDC can not write
3600  */
3601 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
3602 {
3603         struct atom_context *ctx = adev->mode_info.atom_context;
3604
3605         if (!ctx)
3606                 return;
3607
3608         if (strnstr(ctx->vbios_pn, "D16406",
3609                     sizeof(ctx->vbios_pn)) ||
3610                 strnstr(ctx->vbios_pn, "D36002",
3611                         sizeof(ctx->vbios_pn)))
3612                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
3613 }
3614
3615 /* Query ras capablity via atomfirmware interface */
3616 static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
3617 {
3618         /* mem_ecc cap */
3619         if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
3620                 dev_info(adev->dev, "MEM ECC is active.\n");
3621                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
3622                                          1 << AMDGPU_RAS_BLOCK__DF);
3623         } else {
3624                 dev_info(adev->dev, "MEM ECC is not presented.\n");
3625         }
3626
3627         /* sram_ecc cap */
3628         if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
3629                 dev_info(adev->dev, "SRAM ECC is active.\n");
3630                 if (!amdgpu_sriov_vf(adev))
3631                         adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
3632                                                   1 << AMDGPU_RAS_BLOCK__DF);
3633                 else
3634                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
3635                                                  1 << AMDGPU_RAS_BLOCK__SDMA |
3636                                                  1 << AMDGPU_RAS_BLOCK__GFX);
3637
3638                 /*
3639                  * VCN/JPEG RAS can be supported on both bare metal and
3640                  * SRIOV environment
3641                  */
3642                 if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
3643                     amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
3644                     amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3))
3645                         adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
3646                                                  1 << AMDGPU_RAS_BLOCK__JPEG);
3647                 else
3648                         adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
3649                                                   1 << AMDGPU_RAS_BLOCK__JPEG);
3650
3651                 /*
3652                  * XGMI RAS is not supported if xgmi num physical nodes
3653                  * is zero
3654                  */
3655                 if (!adev->gmc.xgmi.num_physical_nodes)
3656                         adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
3657         } else {
3658                 dev_info(adev->dev, "SRAM ECC is not presented.\n");
3659         }
3660 }
3661
3662 /* Query poison mode from umc/df IP callbacks */
3663 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
3664 {
3665         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3666         bool df_poison, umc_poison;
3667
3668         /* poison setting is useless on SRIOV guest */
3669         if (amdgpu_sriov_vf(adev) || !con)
3670                 return;
3671
3672         /* Init poison supported flag, the default value is false */
3673         if (adev->gmc.xgmi.connected_to_cpu ||
3674             adev->gmc.is_app_apu) {
3675                 /* enabled by default when GPU is connected to CPU */
3676                 con->poison_supported = true;
3677         } else if (adev->df.funcs &&
3678             adev->df.funcs->query_ras_poison_mode &&
3679             adev->umc.ras &&
3680             adev->umc.ras->query_ras_poison_mode) {
3681                 df_poison =
3682                         adev->df.funcs->query_ras_poison_mode(adev);
3683                 umc_poison =
3684                         adev->umc.ras->query_ras_poison_mode(adev);
3685
3686                 /* Only poison is set in both DF and UMC, we can support it */
3687                 if (df_poison && umc_poison)
3688                         con->poison_supported = true;
3689                 else if (df_poison != umc_poison)
3690                         dev_warn(adev->dev,
3691                                 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
3692                                 df_poison, umc_poison);
3693         }
3694 }
3695
3696 /*
3697  * check hardware's ras ability which will be saved in hw_supported.
3698  * if hardware does not support ras, we can skip some ras initializtion and
3699  * forbid some ras operations from IP.
3700  * if software itself, say boot parameter, limit the ras ability. We still
3701  * need allow IP do some limited operations, like disable. In such case,
3702  * we have to initialize ras as normal. but need check if operation is
3703  * allowed or not in each function.
3704  */
3705 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
3706 {
3707         adev->ras_hw_enabled = adev->ras_enabled = 0;
3708
3709         if (!amdgpu_ras_asic_supported(adev))
3710                 return;
3711
3712         if (amdgpu_sriov_vf(adev)) {
3713                 if (amdgpu_virt_get_ras_capability(adev))
3714                         goto init_ras_enabled_flag;
3715         }
3716
3717         /* query ras capability from psp */
3718         if (amdgpu_psp_get_ras_capability(&adev->psp))
3719                 goto init_ras_enabled_flag;
3720
3721         /* query ras capablity from bios */
3722         if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3723                 amdgpu_ras_query_ras_capablity_from_vbios(adev);
3724         } else {
3725                 /* driver only manages a few IP blocks RAS feature
3726                  * when GPU is connected cpu through XGMI */
3727                 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
3728                                            1 << AMDGPU_RAS_BLOCK__SDMA |
3729                                            1 << AMDGPU_RAS_BLOCK__MMHUB);
3730         }
3731
3732         /* apply asic specific settings (vega20 only for now) */
3733         amdgpu_ras_get_quirks(adev);
3734
3735         /* query poison mode from umc/df ip callback */
3736         amdgpu_ras_query_poison_mode(adev);
3737
3738 init_ras_enabled_flag:
3739         /* hw_supported needs to be aligned with RAS block mask. */
3740         adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
3741
3742         adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
3743                 adev->ras_hw_enabled & amdgpu_ras_mask;
3744
3745         /* aca is disabled by default */
3746         adev->aca.is_enabled = false;
3747
3748         /* bad page feature is not applicable to specific app platform */
3749         if (adev->gmc.is_app_apu &&
3750             amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
3751                 amdgpu_bad_page_threshold = 0;
3752 }
3753
3754 static void amdgpu_ras_counte_dw(struct work_struct *work)
3755 {
3756         struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
3757                                               ras_counte_delay_work.work);
3758         struct amdgpu_device *adev = con->adev;
3759         struct drm_device *dev = adev_to_drm(adev);
3760         unsigned long ce_count, ue_count;
3761         int res;
3762
3763         res = pm_runtime_get_sync(dev->dev);
3764         if (res < 0)
3765                 goto Out;
3766
3767         /* Cache new values.
3768          */
3769         if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
3770                 atomic_set(&con->ras_ce_count, ce_count);
3771                 atomic_set(&con->ras_ue_count, ue_count);
3772         }
3773
3774         pm_runtime_mark_last_busy(dev->dev);
3775 Out:
3776         pm_runtime_put_autosuspend(dev->dev);
3777 }
3778
3779 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
3780 {
3781         return  amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
3782                         AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
3783                         AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
3784                         AMDGPU_RAS_ERROR__PARITY;
3785 }
3786
3787 static void ras_event_mgr_init(struct ras_event_manager *mgr)
3788 {
3789         struct ras_event_state *event_state;
3790         int i;
3791
3792         memset(mgr, 0, sizeof(*mgr));
3793         atomic64_set(&mgr->seqno, 0);
3794
3795         for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
3796                 event_state = &mgr->event_state[i];
3797                 event_state->last_seqno = RAS_EVENT_INVALID_ID;
3798                 atomic64_set(&event_state->count, 0);
3799         }
3800 }
3801
3802 static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
3803 {
3804         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3805         struct amdgpu_hive_info *hive;
3806
3807         if (!ras)
3808                 return;
3809
3810         hive = amdgpu_get_xgmi_hive(adev);
3811         ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
3812
3813         /* init event manager with node 0 on xgmi system */
3814         if (!amdgpu_reset_in_recovery(adev)) {
3815                 if (!hive || adev->gmc.xgmi.node_id == 0)
3816                         ras_event_mgr_init(ras->event_mgr);
3817         }
3818
3819         if (hive)
3820                 amdgpu_put_xgmi_hive(hive);
3821 }
3822
3823 static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
3824 {
3825         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3826
3827         if (!con || (adev->flags & AMD_IS_APU))
3828                 return;
3829
3830         switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
3831         case IP_VERSION(13, 0, 2):
3832         case IP_VERSION(13, 0, 6):
3833         case IP_VERSION(13, 0, 14):
3834                 con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE;
3835                 break;
3836         default:
3837                 break;
3838         }
3839 }
3840
3841 int amdgpu_ras_init(struct amdgpu_device *adev)
3842 {
3843         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3844         int r;
3845
3846         if (con)
3847                 return 0;
3848
3849         con = kzalloc(sizeof(*con) +
3850                         sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
3851                         sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
3852                         GFP_KERNEL);
3853         if (!con)
3854                 return -ENOMEM;
3855
3856         con->adev = adev;
3857         INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
3858         atomic_set(&con->ras_ce_count, 0);
3859         atomic_set(&con->ras_ue_count, 0);
3860
3861         con->objs = (struct ras_manager *)(con + 1);
3862
3863         amdgpu_ras_set_context(adev, con);
3864
3865         amdgpu_ras_check_supported(adev);
3866
3867         if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
3868                 /* set gfx block ras context feature for VEGA20 Gaming
3869                  * send ras disable cmd to ras ta during ras late init.
3870                  */
3871                 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
3872                         con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
3873
3874                         return 0;
3875                 }
3876
3877                 r = 0;
3878                 goto release_con;
3879         }
3880
3881         con->update_channel_flag = false;
3882         con->features = 0;
3883         con->schema = 0;
3884         INIT_LIST_HEAD(&con->head);
3885         /* Might need get this flag from vbios. */
3886         con->flags = RAS_DEFAULT_FLAGS;
3887
3888         /* initialize nbio ras function ahead of any other
3889          * ras functions so hardware fatal error interrupt
3890          * can be enabled as early as possible */
3891         switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
3892         case IP_VERSION(7, 4, 0):
3893         case IP_VERSION(7, 4, 1):
3894         case IP_VERSION(7, 4, 4):
3895                 if (!adev->gmc.xgmi.connected_to_cpu)
3896                         adev->nbio.ras = &nbio_v7_4_ras;
3897                 break;
3898         case IP_VERSION(4, 3, 0):
3899                 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
3900                         /* unlike other generation of nbio ras,
3901                          * nbio v4_3 only support fatal error interrupt
3902                          * to inform software that DF is freezed due to
3903                          * system fatal error event. driver should not
3904                          * enable nbio ras in such case. Instead,
3905                          * check DF RAS */
3906                         adev->nbio.ras = &nbio_v4_3_ras;
3907                 break;
3908         case IP_VERSION(7, 9, 0):
3909                 if (!adev->gmc.is_app_apu)
3910                         adev->nbio.ras = &nbio_v7_9_ras;
3911                 break;
3912         default:
3913                 /* nbio ras is not available */
3914                 break;
3915         }
3916
3917         /* nbio ras block needs to be enabled ahead of other ras blocks
3918          * to handle fatal error */
3919         r = amdgpu_nbio_ras_sw_init(adev);
3920         if (r)
3921                 return r;
3922
3923         if (adev->nbio.ras &&
3924             adev->nbio.ras->init_ras_controller_interrupt) {
3925                 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
3926                 if (r)
3927                         goto release_con;
3928         }
3929
3930         if (adev->nbio.ras &&
3931             adev->nbio.ras->init_ras_err_event_athub_interrupt) {
3932                 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
3933                 if (r)
3934                         goto release_con;
3935         }
3936
3937         /* Packed socket_id to ras feature mask bits[31:29] */
3938         if (adev->smuio.funcs &&
3939             adev->smuio.funcs->get_socket_id)
3940                 con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
3941                                         AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
3942
3943         /* Get RAS schema for particular SOC */
3944         con->schema = amdgpu_get_ras_schema(adev);
3945
3946         amdgpu_ras_init_reserved_vram_size(adev);
3947
3948         if (amdgpu_ras_fs_init(adev)) {
3949                 r = -EINVAL;
3950                 goto release_con;
3951         }
3952
3953         if (amdgpu_ras_aca_is_supported(adev)) {
3954                 if (amdgpu_aca_is_enabled(adev))
3955                         r = amdgpu_aca_init(adev);
3956                 else
3957                         r = amdgpu_mca_init(adev);
3958                 if (r)
3959                         goto release_con;
3960         }
3961
3962         dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
3963                  "hardware ability[%x] ras_mask[%x]\n",
3964                  adev->ras_hw_enabled, adev->ras_enabled);
3965
3966         return 0;
3967 release_con:
3968         amdgpu_ras_set_context(adev, NULL);
3969         kfree(con);
3970
3971         return r;
3972 }
3973
3974 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
3975 {
3976         if (adev->gmc.xgmi.connected_to_cpu ||
3977             adev->gmc.is_app_apu)
3978                 return 1;
3979         return 0;
3980 }
3981
3982 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
3983                                         struct ras_common_if *ras_block)
3984 {
3985         struct ras_query_if info = {
3986                 .head = *ras_block,
3987         };
3988
3989         if (!amdgpu_persistent_edc_harvesting_supported(adev))
3990                 return 0;
3991
3992         if (amdgpu_ras_query_error_status(adev, &info) != 0)
3993                 DRM_WARN("RAS init harvest failure");
3994
3995         if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
3996                 DRM_WARN("RAS init harvest reset failure");
3997
3998         return 0;
3999 }
4000
4001 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
4002 {
4003        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4004
4005        if (!con)
4006                return false;
4007
4008        return con->poison_supported;
4009 }
4010
4011 /* helper function to handle common stuff in ip late init phase */
4012 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
4013                          struct ras_common_if *ras_block)
4014 {
4015         struct amdgpu_ras_block_object *ras_obj = NULL;
4016         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4017         struct ras_query_if *query_info;
4018         unsigned long ue_count, ce_count;
4019         int r;
4020
4021         /* disable RAS feature per IP block if it is not supported */
4022         if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
4023                 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
4024                 return 0;
4025         }
4026
4027         r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
4028         if (r) {
4029                 if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
4030                         /* in resume phase, if fail to enable ras,
4031                          * clean up all ras fs nodes, and disable ras */
4032                         goto cleanup;
4033                 } else
4034                         return r;
4035         }
4036
4037         /* check for errors on warm reset edc persisant supported ASIC */
4038         amdgpu_persistent_edc_harvesting(adev, ras_block);
4039
4040         /* in resume phase, no need to create ras fs node */
4041         if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
4042                 return 0;
4043
4044         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4045         if (ras_obj->ras_cb || (ras_obj->hw_ops &&
4046             (ras_obj->hw_ops->query_poison_status ||
4047             ras_obj->hw_ops->handle_poison_consumption))) {
4048                 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
4049                 if (r)
4050                         goto cleanup;
4051         }
4052
4053         if (ras_obj->hw_ops &&
4054             (ras_obj->hw_ops->query_ras_error_count ||
4055              ras_obj->hw_ops->query_ras_error_status)) {
4056                 r = amdgpu_ras_sysfs_create(adev, ras_block);
4057                 if (r)
4058                         goto interrupt;
4059
4060                 /* Those are the cached values at init.
4061                  */
4062                 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
4063                 if (!query_info)
4064                         return -ENOMEM;
4065                 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
4066
4067                 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
4068                         atomic_set(&con->ras_ce_count, ce_count);
4069                         atomic_set(&con->ras_ue_count, ue_count);
4070                 }
4071
4072                 kfree(query_info);
4073         }
4074
4075         return 0;
4076
4077 interrupt:
4078         if (ras_obj->ras_cb)
4079                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4080 cleanup:
4081         amdgpu_ras_feature_enable(adev, ras_block, 0);
4082         return r;
4083 }
4084
4085 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
4086                          struct ras_common_if *ras_block)
4087 {
4088         return amdgpu_ras_block_late_init(adev, ras_block);
4089 }
4090
4091 /* helper function to remove ras fs node and interrupt handler */
4092 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
4093                           struct ras_common_if *ras_block)
4094 {
4095         struct amdgpu_ras_block_object *ras_obj;
4096         if (!ras_block)
4097                 return;
4098
4099         amdgpu_ras_sysfs_remove(adev, ras_block);
4100
4101         ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
4102         if (ras_obj->ras_cb)
4103                 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
4104 }
4105
4106 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
4107                           struct ras_common_if *ras_block)
4108 {
4109         return amdgpu_ras_block_late_fini(adev, ras_block);
4110 }
4111
4112 /* do some init work after IP late init as dependence.
4113  * and it runs in resume/gpu reset/booting up cases.
4114  */
4115 void amdgpu_ras_resume(struct amdgpu_device *adev)
4116 {
4117         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4118         struct ras_manager *obj, *tmp;
4119
4120         if (!adev->ras_enabled || !con) {
4121                 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
4122                 amdgpu_release_ras_context(adev);
4123
4124                 return;
4125         }
4126
4127         if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
4128                 /* Set up all other IPs which are not implemented. There is a
4129                  * tricky thing that IP's actual ras error type should be
4130                  * MULTI_UNCORRECTABLE, but as driver does not handle it, so
4131                  * ERROR_NONE make sense anyway.
4132                  */
4133                 amdgpu_ras_enable_all_features(adev, 1);
4134
4135                 /* We enable ras on all hw_supported block, but as boot
4136                  * parameter might disable some of them and one or more IP has
4137                  * not implemented yet. So we disable them on behalf.
4138                  */
4139                 list_for_each_entry_safe(obj, tmp, &con->head, node) {
4140                         if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
4141                                 amdgpu_ras_feature_enable(adev, &obj->head, 0);
4142                                 /* there should be no any reference. */
4143                                 WARN_ON(alive_obj(obj));
4144                         }
4145                 }
4146         }
4147 }
4148
4149 void amdgpu_ras_suspend(struct amdgpu_device *adev)
4150 {
4151         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4152
4153         if (!adev->ras_enabled || !con)
4154                 return;
4155
4156         amdgpu_ras_disable_all_features(adev, 0);
4157         /* Make sure all ras objects are disabled. */
4158         if (AMDGPU_RAS_GET_FEATURES(con->features))
4159                 amdgpu_ras_disable_all_features(adev, 1);
4160 }
4161
4162 int amdgpu_ras_late_init(struct amdgpu_device *adev)
4163 {
4164         struct amdgpu_ras_block_list *node, *tmp;
4165         struct amdgpu_ras_block_object *obj;
4166         int r;
4167
4168         amdgpu_ras_event_mgr_init(adev);
4169
4170         if (amdgpu_ras_aca_is_supported(adev)) {
4171                 if (amdgpu_reset_in_recovery(adev)) {
4172                         if (amdgpu_aca_is_enabled(adev))
4173                                 r = amdgpu_aca_reset(adev);
4174                         else
4175                                 r = amdgpu_mca_reset(adev);
4176                         if (r)
4177                                 return r;
4178                 }
4179
4180                 if (!amdgpu_sriov_vf(adev)) {
4181                         if (amdgpu_aca_is_enabled(adev))
4182                                 amdgpu_ras_set_aca_debug_mode(adev, false);
4183                         else
4184                                 amdgpu_ras_set_mca_debug_mode(adev, false);
4185                 }
4186         }
4187
4188         /* Guest side doesn't need init ras feature */
4189         if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
4190                 return 0;
4191
4192         list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
4193                 obj = node->ras_obj;
4194                 if (!obj) {
4195                         dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
4196                         continue;
4197                 }
4198
4199                 if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
4200                         continue;
4201
4202                 if (obj->ras_late_init) {
4203                         r = obj->ras_late_init(adev, &obj->ras_comm);
4204                         if (r) {
4205                                 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
4206                                         obj->ras_comm.name, r);
4207                                 return r;
4208                         }
4209                 } else
4210                         amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
4211         }
4212
4213         return 0;
4214 }
4215
4216 /* do some fini work before IP fini as dependence */
4217 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
4218 {
4219         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4220
4221         if (!adev->ras_enabled || !con)
4222                 return 0;
4223
4224
4225         /* Need disable ras on all IPs here before ip [hw/sw]fini */
4226         if (AMDGPU_RAS_GET_FEATURES(con->features))
4227                 amdgpu_ras_disable_all_features(adev, 0);
4228         amdgpu_ras_recovery_fini(adev);
4229         return 0;
4230 }
4231
4232 int amdgpu_ras_fini(struct amdgpu_device *adev)
4233 {
4234         struct amdgpu_ras_block_list *ras_node, *tmp;
4235         struct amdgpu_ras_block_object *obj = NULL;
4236         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4237
4238         if (!adev->ras_enabled || !con)
4239                 return 0;
4240
4241         list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
4242                 if (ras_node->ras_obj) {
4243                         obj = ras_node->ras_obj;
4244                         if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
4245                             obj->ras_fini)
4246                                 obj->ras_fini(adev, &obj->ras_comm);
4247                         else
4248                                 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
4249                 }
4250
4251                 /* Clear ras blocks from ras_list and free ras block list node */
4252                 list_del(&ras_node->node);
4253                 kfree(ras_node);
4254         }
4255
4256         amdgpu_ras_fs_fini(adev);
4257         amdgpu_ras_interrupt_remove_all(adev);
4258
4259         if (amdgpu_ras_aca_is_supported(adev)) {
4260                 if (amdgpu_aca_is_enabled(adev))
4261                         amdgpu_aca_fini(adev);
4262                 else
4263                         amdgpu_mca_fini(adev);
4264         }
4265
4266         WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
4267
4268         if (AMDGPU_RAS_GET_FEATURES(con->features))
4269                 amdgpu_ras_disable_all_features(adev, 0);
4270
4271         cancel_delayed_work_sync(&con->ras_counte_delay_work);
4272
4273         amdgpu_ras_set_context(adev, NULL);
4274         kfree(con);
4275
4276         return 0;
4277 }
4278
4279 bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
4280 {
4281         struct amdgpu_ras *ras;
4282
4283         ras = amdgpu_ras_get_context(adev);
4284         if (!ras)
4285                 return false;
4286
4287         return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4288 }
4289
4290 void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
4291 {
4292         struct amdgpu_ras *ras;
4293
4294         ras = amdgpu_ras_get_context(adev);
4295         if (ras) {
4296                 if (status)
4297                         set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4298                 else
4299                         clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
4300         }
4301 }
4302
4303 void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
4304 {
4305         struct amdgpu_ras *ras;
4306
4307         ras = amdgpu_ras_get_context(adev);
4308         if (ras)
4309                 ras->ras_err_state = 0;
4310 }
4311
4312 void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
4313                                enum amdgpu_ras_block block)
4314 {
4315         struct amdgpu_ras *ras;
4316
4317         ras = amdgpu_ras_get_context(adev);
4318         if (ras)
4319                 set_bit(block, &ras->ras_err_state);
4320 }
4321
4322 bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
4323 {
4324         struct amdgpu_ras *ras;
4325
4326         ras = amdgpu_ras_get_context(adev);
4327         if (ras) {
4328                 if (block == AMDGPU_RAS_BLOCK__ANY)
4329                         return (ras->ras_err_state != 0);
4330                 else
4331                         return test_bit(block, &ras->ras_err_state) ||
4332                                test_bit(AMDGPU_RAS_BLOCK__LAST,
4333                                         &ras->ras_err_state);
4334         }
4335
4336         return false;
4337 }
4338
4339 static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
4340 {
4341         struct amdgpu_ras *ras;
4342
4343         ras = amdgpu_ras_get_context(adev);
4344         if (!ras)
4345                 return NULL;
4346
4347         return ras->event_mgr;
4348 }
4349
4350 int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
4351                                      const void *caller)
4352 {
4353         struct ras_event_manager *event_mgr;
4354         struct ras_event_state *event_state;
4355         int ret = 0;
4356
4357         if (type >= RAS_EVENT_TYPE_COUNT) {
4358                 ret = -EINVAL;
4359                 goto out;
4360         }
4361
4362         event_mgr = __get_ras_event_mgr(adev);
4363         if (!event_mgr) {
4364                 ret = -EINVAL;
4365                 goto out;
4366         }
4367
4368         event_state = &event_mgr->event_state[type];
4369         event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
4370         atomic64_inc(&event_state->count);
4371
4372 out:
4373         if (ret && caller)
4374                 dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
4375                          (int)type, caller, ret);
4376
4377         return ret;
4378 }
4379
4380 u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
4381 {
4382         struct ras_event_manager *event_mgr;
4383         u64 id;
4384
4385         if (type >= RAS_EVENT_TYPE_COUNT)
4386                 return RAS_EVENT_INVALID_ID;
4387
4388         switch (type) {
4389         case RAS_EVENT_TYPE_FATAL:
4390         case RAS_EVENT_TYPE_POISON_CREATION:
4391         case RAS_EVENT_TYPE_POISON_CONSUMPTION:
4392                 event_mgr = __get_ras_event_mgr(adev);
4393                 if (!event_mgr)
4394                         return RAS_EVENT_INVALID_ID;
4395
4396                 id = event_mgr->event_state[type].last_seqno;
4397                 break;
4398         case RAS_EVENT_TYPE_INVALID:
4399         default:
4400                 id = RAS_EVENT_INVALID_ID;
4401                 break;
4402         }
4403
4404         return id;
4405 }
4406
4407 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
4408 {
4409         if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
4410                 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4411                 enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
4412                 u64 event_id;
4413
4414                 if (amdgpu_ras_mark_ras_event(adev, type))
4415                         return;
4416
4417                 event_id = amdgpu_ras_acquire_event_id(adev, type);
4418
4419                 RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
4420                               "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
4421
4422                 amdgpu_ras_set_fed(adev, true);
4423                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4424                 amdgpu_ras_reset_gpu(adev);
4425         }
4426 }
4427
4428 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
4429 {
4430         if (adev->asic_type == CHIP_VEGA20 &&
4431             adev->pm.fw_version <= 0x283400) {
4432                 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
4433                                 amdgpu_ras_intr_triggered();
4434         }
4435
4436         return false;
4437 }
4438
4439 void amdgpu_release_ras_context(struct amdgpu_device *adev)
4440 {
4441         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4442
4443         if (!con)
4444                 return;
4445
4446         if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
4447                 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
4448                 amdgpu_ras_set_context(adev, NULL);
4449                 kfree(con);
4450         }
4451 }
4452
4453 #ifdef CONFIG_X86_MCE_AMD
4454 static struct amdgpu_device *find_adev(uint32_t node_id)
4455 {
4456         int i;
4457         struct amdgpu_device *adev = NULL;
4458
4459         for (i = 0; i < mce_adev_list.num_gpu; i++) {
4460                 adev = mce_adev_list.devs[i];
4461
4462                 if (adev && adev->gmc.xgmi.connected_to_cpu &&
4463                     adev->gmc.xgmi.physical_node_id == node_id)
4464                         break;
4465                 adev = NULL;
4466         }
4467
4468         return adev;
4469 }
4470
4471 #define GET_MCA_IPID_GPUID(m)   (((m) >> 44) & 0xF)
4472 #define GET_UMC_INST(m)         (((m) >> 21) & 0x7)
4473 #define GET_CHAN_INDEX(m)       ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
4474 #define GPU_ID_OFFSET           8
4475
4476 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
4477                                     unsigned long val, void *data)
4478 {
4479         struct mce *m = (struct mce *)data;
4480         struct amdgpu_device *adev = NULL;
4481         uint32_t gpu_id = 0;
4482         uint32_t umc_inst = 0, ch_inst = 0;
4483
4484         /*
4485          * If the error was generated in UMC_V2, which belongs to GPU UMCs,
4486          * and error occurred in DramECC (Extended error code = 0) then only
4487          * process the error, else bail out.
4488          */
4489         if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
4490                     (XEC(m->status, 0x3f) == 0x0)))
4491                 return NOTIFY_DONE;
4492
4493         /*
4494          * If it is correctable error, return.
4495          */
4496         if (mce_is_correctable(m))
4497                 return NOTIFY_OK;
4498
4499         /*
4500          * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
4501          */
4502         gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
4503
4504         adev = find_adev(gpu_id);
4505         if (!adev) {
4506                 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
4507                                                                 gpu_id);
4508                 return NOTIFY_DONE;
4509         }
4510
4511         /*
4512          * If it is uncorrectable error, then find out UMC instance and
4513          * channel index.
4514          */
4515         umc_inst = GET_UMC_INST(m->ipid);
4516         ch_inst = GET_CHAN_INDEX(m->ipid);
4517
4518         dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
4519                              umc_inst, ch_inst);
4520
4521         if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
4522                 return NOTIFY_OK;
4523         else
4524                 return NOTIFY_DONE;
4525 }
4526
4527 static struct notifier_block amdgpu_bad_page_nb = {
4528         .notifier_call  = amdgpu_bad_page_notifier,
4529         .priority       = MCE_PRIO_UC,
4530 };
4531
4532 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
4533 {
4534         /*
4535          * Add the adev to the mce_adev_list.
4536          * During mode2 reset, amdgpu device is temporarily
4537          * removed from the mgpu_info list which can cause
4538          * page retirement to fail.
4539          * Use this list instead of mgpu_info to find the amdgpu
4540          * device on which the UMC error was reported.
4541          */
4542         mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
4543
4544         /*
4545          * Register the x86 notifier only once
4546          * with MCE subsystem.
4547          */
4548         if (notifier_registered == false) {
4549                 mce_register_decode_chain(&amdgpu_bad_page_nb);
4550                 notifier_registered = true;
4551         }
4552 }
4553 #endif
4554
4555 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
4556 {
4557         if (!adev)
4558                 return NULL;
4559
4560         return adev->psp.ras_context.ras;
4561 }
4562
4563 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
4564 {
4565         if (!adev)
4566                 return -EINVAL;
4567
4568         adev->psp.ras_context.ras = ras_con;
4569         return 0;
4570 }
4571
4572 /* check if ras is supported on block, say, sdma, gfx */
4573 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
4574                 unsigned int block)
4575 {
4576         int ret = 0;
4577         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4578
4579         if (block >= AMDGPU_RAS_BLOCK_COUNT)
4580                 return 0;
4581
4582         ret = ras && (adev->ras_enabled & (1 << block));
4583
4584         /* For the special asic with mem ecc enabled but sram ecc
4585          * not enabled, even if the ras block is not supported on
4586          * .ras_enabled, if the asic supports poison mode and the
4587          * ras block has ras configuration, it can be considered
4588          * that the ras block supports ras function.
4589          */
4590         if (!ret &&
4591             (block == AMDGPU_RAS_BLOCK__GFX ||
4592              block == AMDGPU_RAS_BLOCK__SDMA ||
4593              block == AMDGPU_RAS_BLOCK__VCN ||
4594              block == AMDGPU_RAS_BLOCK__JPEG) &&
4595                 (amdgpu_ras_mask & (1 << block)) &&
4596             amdgpu_ras_is_poison_mode_supported(adev) &&
4597             amdgpu_ras_get_ras_block(adev, block, 0))
4598                 ret = 1;
4599
4600         return ret;
4601 }
4602
4603 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
4604 {
4605         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4606
4607         /* mode1 is the only selection for RMA status */
4608         if (amdgpu_ras_is_rma(adev)) {
4609                 ras->gpu_reset_flags = 0;
4610                 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
4611         }
4612
4613         if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
4614                 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
4615                 int hive_ras_recovery = 0;
4616
4617                 if (hive) {
4618                         hive_ras_recovery = atomic_read(&hive->ras_recovery);
4619                         amdgpu_put_xgmi_hive(hive);
4620                 }
4621                 /* In the case of multiple GPUs, after a GPU has started
4622                  * resetting all GPUs on hive, other GPUs do not need to
4623                  * trigger GPU reset again.
4624                  */
4625                 if (!hive_ras_recovery)
4626                         amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4627                 else
4628                         atomic_set(&ras->in_recovery, 0);
4629         } else {
4630                 flush_work(&ras->recovery_work);
4631                 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
4632         }
4633
4634         return 0;
4635 }
4636
4637 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
4638 {
4639         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4640         int ret = 0;
4641
4642         if (con) {
4643                 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4644                 if (!ret)
4645                         con->is_aca_debug_mode = enable;
4646         }
4647
4648         return ret;
4649 }
4650
4651 int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
4652 {
4653         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4654         int ret = 0;
4655
4656         if (con) {
4657                 if (amdgpu_aca_is_enabled(adev))
4658                         ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
4659                 else
4660                         ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
4661                 if (!ret)
4662                         con->is_aca_debug_mode = enable;
4663         }
4664
4665         return ret;
4666 }
4667
4668 bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
4669 {
4670         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4671         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4672         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4673
4674         if (!con)
4675                 return false;
4676
4677         if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
4678             (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
4679                 return con->is_aca_debug_mode;
4680         else
4681                 return true;
4682 }
4683
4684 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
4685                                      unsigned int *error_query_mode)
4686 {
4687         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
4688         const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
4689         const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
4690
4691         if (!con) {
4692                 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
4693                 return false;
4694         }
4695
4696         if (amdgpu_sriov_vf(adev)) {
4697                 *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
4698         } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
4699                 *error_query_mode =
4700                         (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
4701         } else {
4702                 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
4703         }
4704
4705         return true;
4706 }
4707
4708 /* Register each ip ras block into amdgpu ras */
4709 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
4710                 struct amdgpu_ras_block_object *ras_block_obj)
4711 {
4712         struct amdgpu_ras_block_list *ras_node;
4713         if (!adev || !ras_block_obj)
4714                 return -EINVAL;
4715
4716         ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
4717         if (!ras_node)
4718                 return -ENOMEM;
4719
4720         INIT_LIST_HEAD(&ras_node->node);
4721         ras_node->ras_obj = ras_block_obj;
4722         list_add_tail(&ras_node->node, &adev->ras_list);
4723
4724         return 0;
4725 }
4726
4727 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
4728 {
4729         if (!err_type_name)
4730                 return;
4731
4732         switch (err_type) {
4733         case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
4734                 sprintf(err_type_name, "correctable");
4735                 break;
4736         case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
4737                 sprintf(err_type_name, "uncorrectable");
4738                 break;
4739         default:
4740                 sprintf(err_type_name, "unknown");
4741                 break;
4742         }
4743 }
4744
4745 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
4746                                          const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4747                                          uint32_t instance,
4748                                          uint32_t *memory_id)
4749 {
4750         uint32_t err_status_lo_data, err_status_lo_offset;
4751
4752         if (!reg_entry)
4753                 return false;
4754
4755         err_status_lo_offset =
4756                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4757                                             reg_entry->seg_lo, reg_entry->reg_lo);
4758         err_status_lo_data = RREG32(err_status_lo_offset);
4759
4760         if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
4761             !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
4762                 return false;
4763
4764         *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
4765
4766         return true;
4767 }
4768
4769 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
4770                                        const struct amdgpu_ras_err_status_reg_entry *reg_entry,
4771                                        uint32_t instance,
4772                                        unsigned long *err_cnt)
4773 {
4774         uint32_t err_status_hi_data, err_status_hi_offset;
4775
4776         if (!reg_entry)
4777                 return false;
4778
4779         err_status_hi_offset =
4780                 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
4781                                             reg_entry->seg_hi, reg_entry->reg_hi);
4782         err_status_hi_data = RREG32(err_status_hi_offset);
4783
4784         if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
4785             !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
4786                 /* keep the check here in case we need to refer to the result later */
4787                 dev_dbg(adev->dev, "Invalid err_info field\n");
4788
4789         /* read err count */
4790         *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
4791
4792         return true;
4793 }
4794
4795 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
4796                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
4797                                            uint32_t reg_list_size,
4798                                            const struct amdgpu_ras_memory_id_entry *mem_list,
4799                                            uint32_t mem_list_size,
4800                                            uint32_t instance,
4801                                            uint32_t err_type,
4802                                            unsigned long *err_count)
4803 {
4804         uint32_t memory_id;
4805         unsigned long err_cnt;
4806         char err_type_name[16];
4807         uint32_t i, j;
4808
4809         for (i = 0; i < reg_list_size; i++) {
4810                 /* query memory_id from err_status_lo */
4811                 if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
4812                                                          instance, &memory_id))
4813                         continue;
4814
4815                 /* query err_cnt from err_status_hi */
4816                 if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
4817                                                        instance, &err_cnt) ||
4818                     !err_cnt)
4819                         continue;
4820
4821                 *err_count += err_cnt;
4822
4823                 /* log the errors */
4824                 amdgpu_ras_get_error_type_name(err_type, err_type_name);
4825                 if (!mem_list) {
4826                         /* memory_list is not supported */
4827                         dev_info(adev->dev,
4828                                  "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
4829                                  err_cnt, err_type_name,
4830                                  reg_list[i].block_name,
4831                                  instance, memory_id);
4832                 } else {
4833                         for (j = 0; j < mem_list_size; j++) {
4834                                 if (memory_id == mem_list[j].memory_id) {
4835                                         dev_info(adev->dev,
4836                                                  "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
4837                                                  err_cnt, err_type_name,
4838                                                  reg_list[i].block_name,
4839                                                  instance, mem_list[j].name);
4840                                         break;
4841                                 }
4842                         }
4843                 }
4844         }
4845 }
4846
4847 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
4848                                            const struct amdgpu_ras_err_status_reg_entry *reg_list,
4849                                            uint32_t reg_list_size,
4850                                            uint32_t instance)
4851 {
4852         uint32_t err_status_lo_offset, err_status_hi_offset;
4853         uint32_t i;
4854
4855         for (i = 0; i < reg_list_size; i++) {
4856                 err_status_lo_offset =
4857                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4858                                                     reg_list[i].seg_lo, reg_list[i].reg_lo);
4859                 err_status_hi_offset =
4860                         AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
4861                                                     reg_list[i].seg_hi, reg_list[i].reg_hi);
4862                 WREG32(err_status_lo_offset, 0);
4863                 WREG32(err_status_hi_offset, 0);
4864         }
4865 }
4866
4867 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
4868 {
4869         memset(err_data, 0, sizeof(*err_data));
4870
4871         INIT_LIST_HEAD(&err_data->err_node_list);
4872
4873         return 0;
4874 }
4875
4876 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
4877 {
4878         if (!err_node)
4879                 return;
4880
4881         list_del(&err_node->node);
4882         kvfree(err_node);
4883 }
4884
4885 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
4886 {
4887         struct ras_err_node *err_node, *tmp;
4888
4889         list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
4890                 amdgpu_ras_error_node_release(err_node);
4891 }
4892
4893 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
4894                                                              struct amdgpu_smuio_mcm_config_info *mcm_info)
4895 {
4896         struct ras_err_node *err_node;
4897         struct amdgpu_smuio_mcm_config_info *ref_id;
4898
4899         if (!err_data || !mcm_info)
4900                 return NULL;
4901
4902         for_each_ras_error(err_node, err_data) {
4903                 ref_id = &err_node->err_info.mcm_info;
4904
4905                 if (mcm_info->socket_id == ref_id->socket_id &&
4906                     mcm_info->die_id == ref_id->die_id)
4907                         return err_node;
4908         }
4909
4910         return NULL;
4911 }
4912
4913 static struct ras_err_node *amdgpu_ras_error_node_new(void)
4914 {
4915         struct ras_err_node *err_node;
4916
4917         err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
4918         if (!err_node)
4919                 return NULL;
4920
4921         INIT_LIST_HEAD(&err_node->node);
4922
4923         return err_node;
4924 }
4925
4926 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
4927 {
4928         struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
4929         struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
4930         struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
4931         struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
4932
4933         if (unlikely(infoa->socket_id != infob->socket_id))
4934                 return infoa->socket_id - infob->socket_id;
4935         else
4936                 return infoa->die_id - infob->die_id;
4937
4938         return 0;
4939 }
4940
4941 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
4942                                 struct amdgpu_smuio_mcm_config_info *mcm_info)
4943 {
4944         struct ras_err_node *err_node;
4945
4946         err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
4947         if (err_node)
4948                 return &err_node->err_info;
4949
4950         err_node = amdgpu_ras_error_node_new();
4951         if (!err_node)
4952                 return NULL;
4953
4954         memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
4955
4956         err_data->err_list_count++;
4957         list_add_tail(&err_node->node, &err_data->err_node_list);
4958         list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
4959
4960         return &err_node->err_info;
4961 }
4962
4963 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
4964                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4965                                         u64 count)
4966 {
4967         struct ras_err_info *err_info;
4968
4969         if (!err_data || !mcm_info)
4970                 return -EINVAL;
4971
4972         if (!count)
4973                 return 0;
4974
4975         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4976         if (!err_info)
4977                 return -EINVAL;
4978
4979         err_info->ue_count += count;
4980         err_data->ue_count += count;
4981
4982         return 0;
4983 }
4984
4985 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
4986                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
4987                                         u64 count)
4988 {
4989         struct ras_err_info *err_info;
4990
4991         if (!err_data || !mcm_info)
4992                 return -EINVAL;
4993
4994         if (!count)
4995                 return 0;
4996
4997         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
4998         if (!err_info)
4999                 return -EINVAL;
5000
5001         err_info->ce_count += count;
5002         err_data->ce_count += count;
5003
5004         return 0;
5005 }
5006
5007 int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
5008                                         struct amdgpu_smuio_mcm_config_info *mcm_info,
5009                                         u64 count)
5010 {
5011         struct ras_err_info *err_info;
5012
5013         if (!err_data || !mcm_info)
5014                 return -EINVAL;
5015
5016         if (!count)
5017                 return 0;
5018
5019         err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
5020         if (!err_info)
5021                 return -EINVAL;
5022
5023         err_info->de_count += count;
5024         err_data->de_count += count;
5025
5026         return 0;
5027 }
5028
5029 #define mmMP0_SMN_C2PMSG_92     0x1609C
5030 #define mmMP0_SMN_C2PMSG_126    0x160BE
5031 static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
5032                                                  u32 instance)
5033 {
5034         u32 socket_id, aid_id, hbm_id;
5035         u32 fw_status;
5036         u32 boot_error;
5037         u64 reg_addr;
5038
5039         /* The pattern for smn addressing in other SOC could be different from
5040          * the one for aqua_vanjaram. We should revisit the code if the pattern
5041          * is changed. In such case, replace the aqua_vanjaram implementation
5042          * with more common helper */
5043         reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5044                    aqua_vanjaram_encode_ext_smn_addressing(instance);
5045         fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5046
5047         reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
5048                    aqua_vanjaram_encode_ext_smn_addressing(instance);
5049         boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5050
5051         socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
5052         aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
5053         hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
5054
5055         if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
5056                 dev_info(adev->dev,
5057                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
5058                          socket_id, aid_id, hbm_id, fw_status);
5059
5060         if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
5061                 dev_info(adev->dev,
5062                          "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
5063                          socket_id, aid_id, fw_status);
5064
5065         if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
5066                 dev_info(adev->dev,
5067                          "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
5068                          socket_id, aid_id, fw_status);
5069
5070         if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
5071                 dev_info(adev->dev,
5072                          "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
5073                          socket_id, aid_id, fw_status);
5074
5075         if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
5076                 dev_info(adev->dev,
5077                          "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
5078                          socket_id, aid_id, fw_status);
5079
5080         if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
5081                 dev_info(adev->dev,
5082                          "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
5083                          socket_id, aid_id, fw_status);
5084
5085         if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
5086                 dev_info(adev->dev,
5087                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
5088                          socket_id, aid_id, hbm_id, fw_status);
5089
5090         if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
5091                 dev_info(adev->dev,
5092                          "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
5093                          socket_id, aid_id, hbm_id, fw_status);
5094
5095         if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
5096                 dev_info(adev->dev,
5097                          "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
5098                          socket_id, aid_id, fw_status);
5099
5100         if (AMDGPU_RAS_GPU_ERR_UNKNOWN(boot_error))
5101                 dev_info(adev->dev,
5102                          "socket: %d, aid: %d, fw_status: 0x%x, unknown boot time errors\n",
5103                          socket_id, aid_id, fw_status);
5104 }
5105
5106 static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
5107                                            u32 instance)
5108 {
5109         u64 reg_addr;
5110         u32 reg_data;
5111         int retry_loop;
5112
5113         reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
5114                    aqua_vanjaram_encode_ext_smn_addressing(instance);
5115
5116         for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
5117                 reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
5118                 if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
5119                         return false;
5120                 else
5121                         msleep(1);
5122         }
5123
5124         return true;
5125 }
5126
5127 void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
5128 {
5129         u32 i;
5130
5131         for (i = 0; i < num_instances; i++) {
5132                 if (amdgpu_ras_boot_error_detected(adev, i))
5133                         amdgpu_ras_boot_time_error_reporting(adev, i);
5134         }
5135 }
5136
5137 int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
5138 {
5139         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5140         struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
5141         uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
5142         int ret = 0;
5143
5144         mutex_lock(&con->page_rsv_lock);
5145         ret = amdgpu_vram_mgr_query_page_status(mgr, start);
5146         if (ret == -ENOENT)
5147                 ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
5148         mutex_unlock(&con->page_rsv_lock);
5149
5150         return ret;
5151 }
5152
5153 void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
5154                                 const char *fmt, ...)
5155 {
5156         struct va_format vaf;
5157         va_list args;
5158
5159         va_start(args, fmt);
5160         vaf.fmt = fmt;
5161         vaf.va = &args;
5162
5163         if (RAS_EVENT_ID_IS_VALID(event_id))
5164                 dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
5165         else
5166                 dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
5167
5168         va_end(args);
5169 }
5170
5171 bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
5172 {
5173         struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5174
5175         if (!con)
5176                 return false;
5177
5178         return con->is_rma;
5179 }
This page took 0.351967 seconds and 4 git commands to generate.