2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/list_sort.h>
34 #include "amdgpu_ras.h"
35 #include "amdgpu_atomfirmware.h"
36 #include "amdgpu_xgmi.h"
37 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "nbio_v4_3.h"
39 #include "nbio_v7_9.h"
41 #include "amdgpu_reset.h"
43 #ifdef CONFIG_X86_MCE_AMD
46 static bool notifier_registered;
48 static const char *RAS_FS_NAME = "ras";
50 const char *ras_error_string[] = {
54 "multi_uncorrectable",
58 const char *ras_block_string[] = {
78 const char *ras_mca_block_string[] = {
85 struct amdgpu_ras_block_list {
87 struct list_head node;
89 struct amdgpu_ras_block_object *ras_obj;
92 const char *get_ras_block_str(struct ras_common_if *ras_block)
97 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
98 return "OUT OF RANGE";
100 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
101 return ras_mca_block_string[ras_block->sub_block_index];
103 return ras_block_string[ras_block->block];
106 #define ras_block_str(_BLOCK_) \
107 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
109 #define ras_err_str(i) (ras_error_string[ffs(i)])
111 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
113 /* inject address is 52 bits */
114 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
116 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
117 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
119 enum amdgpu_ras_retire_page_reservation {
120 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
121 AMDGPU_RAS_RETIRE_PAGE_PENDING,
122 AMDGPU_RAS_RETIRE_PAGE_FAULT,
125 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
127 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
129 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
131 #ifdef CONFIG_X86_MCE_AMD
132 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
133 struct mce_notifier_adev_list {
134 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
137 static struct mce_notifier_adev_list mce_adev_list;
140 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
142 if (adev && amdgpu_ras_get_context(adev))
143 amdgpu_ras_get_context(adev)->error_query_ready = ready;
146 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
148 if (adev && amdgpu_ras_get_context(adev))
149 return amdgpu_ras_get_context(adev)->error_query_ready;
154 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
156 struct ras_err_data err_data;
157 struct eeprom_table_record err_rec;
160 if ((address >= adev->gmc.mc_vram_size) ||
161 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
163 "RAS WARN: input address 0x%llx is invalid.\n",
168 if (amdgpu_ras_check_bad_page(adev, address)) {
170 "RAS WARN: 0x%llx has already been marked as bad page!\n",
175 ret = amdgpu_ras_error_data_init(&err_data);
179 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
180 err_data.err_addr = &err_rec;
181 amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
183 if (amdgpu_bad_page_threshold != 0) {
184 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
185 err_data.err_addr_cnt);
186 amdgpu_ras_save_bad_pages(adev, NULL);
189 amdgpu_ras_error_data_fini(&err_data);
191 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
192 dev_warn(adev->dev, "Clear EEPROM:\n");
193 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
198 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
199 size_t size, loff_t *pos)
201 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
202 struct ras_query_if info = {
208 if (amdgpu_ras_query_error_status(obj->adev, &info))
211 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
212 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
213 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
214 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
215 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
218 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
220 "ce", info.ce_count);
225 s = min_t(u64, s, size);
228 if (copy_to_user(buf, &val[*pos], s))
236 static const struct file_operations amdgpu_ras_debugfs_ops = {
237 .owner = THIS_MODULE,
238 .read = amdgpu_ras_debugfs_read,
240 .llseek = default_llseek
243 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
247 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
249 if (strcmp(name, ras_block_string[i]) == 0)
255 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
256 const char __user *buf, size_t size,
257 loff_t *pos, struct ras_debug_if *data)
259 ssize_t s = min_t(u64, 64, size);
267 /* default value is 0 if the mask is not set by user */
268 u32 instance_mask = 0;
274 memset(str, 0, sizeof(str));
275 memset(data, 0, sizeof(*data));
277 if (copy_from_user(str, buf, s))
280 if (sscanf(str, "disable %32s", block_name) == 1)
282 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
284 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
286 else if (strstr(str, "retire_page") != NULL)
288 else if (str[0] && str[1] && str[2] && str[3])
289 /* ascii string, but commands are not matched. */
294 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
295 sscanf(str, "%*s %llu", &address) != 1)
299 data->inject.address = address;
304 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
307 data->head.block = block_id;
308 /* only ue and ce errors are supported */
309 if (!memcmp("ue", err, 2))
310 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
311 else if (!memcmp("ce", err, 2))
312 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
319 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
320 &sub_block, &address, &value, &instance_mask) != 4 &&
321 sscanf(str, "%*s %*s %*s %u %llu %llu %u",
322 &sub_block, &address, &value, &instance_mask) != 4 &&
323 sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
324 &sub_block, &address, &value) != 3 &&
325 sscanf(str, "%*s %*s %*s %u %llu %llu",
326 &sub_block, &address, &value) != 3)
328 data->head.sub_block_index = sub_block;
329 data->inject.address = address;
330 data->inject.value = value;
331 data->inject.instance_mask = instance_mask;
334 if (size < sizeof(*data))
337 if (copy_from_user(data, buf, sizeof(*data)))
344 static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
345 struct ras_debug_if *data)
347 int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
348 uint32_t mask, inst_mask = data->inject.instance_mask;
350 /* no need to set instance mask if there is only one instance */
351 if (num_xcc <= 1 && inst_mask) {
352 data->inject.instance_mask = 0;
354 "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
360 switch (data->head.block) {
361 case AMDGPU_RAS_BLOCK__GFX:
362 mask = GENMASK(num_xcc - 1, 0);
364 case AMDGPU_RAS_BLOCK__SDMA:
365 mask = GENMASK(adev->sdma.num_instances - 1, 0);
367 case AMDGPU_RAS_BLOCK__VCN:
368 case AMDGPU_RAS_BLOCK__JPEG:
369 mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
376 /* remove invalid bits in instance mask */
377 data->inject.instance_mask &= mask;
378 if (inst_mask != data->inject.instance_mask)
380 "Adjust RAS inject mask 0x%x to 0x%x\n",
381 inst_mask, data->inject.instance_mask);
385 * DOC: AMDGPU RAS debugfs control interface
387 * The control interface accepts struct ras_debug_if which has two members.
389 * First member: ras_debug_if::head or ras_debug_if::inject.
391 * head is used to indicate which IP block will be under control.
393 * head has four members, they are block, type, sub_block_index, name.
394 * block: which IP will be under control.
395 * type: what kind of error will be enabled/disabled/injected.
396 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
397 * name: the name of IP.
399 * inject has three more members than head, they are address, value and mask.
400 * As their names indicate, inject operation will write the
401 * value to the address.
403 * The second member: struct ras_debug_if::op.
404 * It has three kinds of operations.
406 * - 0: disable RAS on the block. Take ::head as its data.
407 * - 1: enable RAS on the block. Take ::head as its data.
408 * - 2: inject errors on the block. Take ::inject as its data.
410 * How to use the interface?
414 * Copy the struct ras_debug_if in your code and initialize it.
415 * Write the struct to the control interface.
419 * .. code-block:: bash
421 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
422 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
423 * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
425 * Where N, is the card which you want to affect.
427 * "disable" requires only the block.
428 * "enable" requires the block and error type.
429 * "inject" requires the block, error type, address, and value.
431 * The block is one of: umc, sdma, gfx, etc.
432 * see ras_block_string[] for details
434 * The error type is one of: ue, ce, where,
435 * ue is multi-uncorrectable
436 * ce is single-correctable
438 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
439 * The address and value are hexadecimal numbers, leading 0x is optional.
440 * The mask means instance mask, is optional, default value is 0x1.
444 * .. code-block:: bash
446 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
447 * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
448 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
450 * How to check the result of the operation?
452 * To check disable/enable, see "ras" features at,
453 * /sys/class/drm/card[0/1/2...]/device/ras/features
455 * To check inject, see the corresponding error count at,
456 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
459 * Operations are only allowed on blocks which are supported.
460 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
461 * to see which blocks support RAS on a particular asic.
464 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
465 const char __user *buf,
466 size_t size, loff_t *pos)
468 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
469 struct ras_debug_if data;
472 if (!amdgpu_ras_get_error_query_ready(adev)) {
473 dev_warn(adev->dev, "RAS WARN: error injection "
474 "currently inaccessible\n");
478 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
483 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
490 if (!amdgpu_ras_is_supported(adev, data.head.block))
495 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
498 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
501 if ((data.inject.address >= adev->gmc.mc_vram_size &&
502 adev->gmc.mc_vram_size) ||
503 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
504 dev_warn(adev->dev, "RAS WARN: input address "
505 "0x%llx is invalid.",
506 data.inject.address);
511 /* umc ce/ue error injection for a bad page is not allowed */
512 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
513 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
514 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
515 "already been marked as bad!\n",
516 data.inject.address);
520 amdgpu_ras_instance_mask_check(adev, &data);
522 /* data.inject.address is offset instead of absolute gpu address */
523 ret = amdgpu_ras_error_inject(adev, &data.inject);
537 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
539 * Some boards contain an EEPROM which is used to persistently store a list of
540 * bad pages which experiences ECC errors in vram. This interface provides
541 * a way to reset the EEPROM, e.g., after testing error injection.
545 * .. code-block:: bash
547 * echo 1 > ../ras/ras_eeprom_reset
549 * will reset EEPROM table to 0 entries.
552 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
553 const char __user *buf,
554 size_t size, loff_t *pos)
556 struct amdgpu_device *adev =
557 (struct amdgpu_device *)file_inode(f)->i_private;
560 ret = amdgpu_ras_eeprom_reset_table(
561 &(amdgpu_ras_get_context(adev)->eeprom_control));
564 /* Something was written to EEPROM.
566 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
573 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
574 .owner = THIS_MODULE,
576 .write = amdgpu_ras_debugfs_ctrl_write,
577 .llseek = default_llseek
580 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
581 .owner = THIS_MODULE,
583 .write = amdgpu_ras_debugfs_eeprom_write,
584 .llseek = default_llseek
588 * DOC: AMDGPU RAS sysfs Error Count Interface
590 * It allows the user to read the error count for each IP block on the gpu through
591 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
593 * It outputs the multiple lines which report the uncorrected (ue) and corrected
596 * The format of one line is below,
602 * .. code-block:: bash
608 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
609 struct device_attribute *attr, char *buf)
611 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
612 struct ras_query_if info = {
616 if (!amdgpu_ras_get_error_query_ready(obj->adev))
617 return sysfs_emit(buf, "Query currently inaccessible\n");
619 if (amdgpu_ras_query_error_status(obj->adev, &info))
622 if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
623 amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
624 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
625 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
628 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
629 "ce", info.ce_count);
634 #define get_obj(obj) do { (obj)->use++; } while (0)
635 #define alive_obj(obj) ((obj)->use)
637 static inline void put_obj(struct ras_manager *obj)
639 if (obj && (--obj->use == 0)) {
640 list_del(&obj->node);
641 amdgpu_ras_error_data_fini(&obj->err_data);
644 if (obj && (obj->use < 0))
645 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
648 /* make one obj and return it. */
649 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
650 struct ras_common_if *head)
652 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
653 struct ras_manager *obj;
655 if (!adev->ras_enabled || !con)
658 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
661 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
662 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
665 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
667 obj = &con->objs[head->block];
669 /* already exist. return obj? */
673 if (amdgpu_ras_error_data_init(&obj->err_data))
678 list_add(&obj->node, &con->head);
684 /* return an obj equal to head, or the first when head is NULL */
685 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
686 struct ras_common_if *head)
688 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
689 struct ras_manager *obj;
692 if (!adev->ras_enabled || !con)
696 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
699 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
700 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
703 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
705 obj = &con->objs[head->block];
710 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
721 /* feature ctl begin */
722 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
723 struct ras_common_if *head)
725 return adev->ras_hw_enabled & BIT(head->block);
728 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
729 struct ras_common_if *head)
731 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
733 return con->features & BIT(head->block);
737 * if obj is not created, then create one.
738 * set feature enable flag.
740 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
741 struct ras_common_if *head, int enable)
743 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
744 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
746 /* If hardware does not support ras, then do not create obj.
747 * But if hardware support ras, we can create the obj.
748 * Ras framework checks con->hw_supported to see if it need do
749 * corresponding initialization.
750 * IP checks con->support to see if it need disable ras.
752 if (!amdgpu_ras_is_feature_allowed(adev, head))
757 obj = amdgpu_ras_create_obj(adev, head);
761 /* In case we create obj somewhere else */
764 con->features |= BIT(head->block);
766 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
767 con->features &= ~BIT(head->block);
775 /* wrapper of psp_ras_enable_features */
776 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
777 struct ras_common_if *head, bool enable)
779 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
780 union ta_ras_cmd_input *info;
786 /* For non-gfx ip, do not enable ras feature if it is not allowed */
787 /* For gfx ip, regardless of feature support status, */
788 /* Force issue enable or disable ras feature commands */
789 if (head->block != AMDGPU_RAS_BLOCK__GFX &&
790 !amdgpu_ras_is_feature_allowed(adev, head))
793 /* Only enable gfx ras feature from host side */
794 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
795 !amdgpu_sriov_vf(adev) &&
796 !amdgpu_ras_intr_triggered()) {
797 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
802 info->disable_features = (struct ta_ras_disable_features_input) {
803 .block_id = amdgpu_ras_block_to_ta(head->block),
804 .error_type = amdgpu_ras_error_to_ta(head->type),
807 info->enable_features = (struct ta_ras_enable_features_input) {
808 .block_id = amdgpu_ras_block_to_ta(head->block),
809 .error_type = amdgpu_ras_error_to_ta(head->type),
813 ret = psp_ras_enable_features(&adev->psp, info, enable);
815 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
816 enable ? "enable":"disable",
817 get_ras_block_str(head),
818 amdgpu_ras_is_poison_mode_supported(adev), ret);
827 __amdgpu_ras_feature_enable(adev, head, enable);
832 /* Only used in device probe stage and called only once. */
833 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
834 struct ras_common_if *head, bool enable)
836 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
842 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
844 /* There is no harm to issue a ras TA cmd regardless of
845 * the currecnt ras state.
846 * If current state == target state, it will do nothing
847 * But sometimes it requests driver to reset and repost
848 * with error code -EAGAIN.
850 ret = amdgpu_ras_feature_enable(adev, head, 1);
851 /* With old ras TA, we might fail to enable ras.
852 * Log it and just setup the object.
853 * TODO need remove this WA in the future.
855 if (ret == -EINVAL) {
856 ret = __amdgpu_ras_feature_enable(adev, head, 1);
859 "RAS INFO: %s setup object\n",
860 get_ras_block_str(head));
863 /* setup the object then issue a ras TA disable cmd.*/
864 ret = __amdgpu_ras_feature_enable(adev, head, 1);
868 /* gfx block ras dsiable cmd must send to ras-ta */
869 if (head->block == AMDGPU_RAS_BLOCK__GFX)
870 con->features |= BIT(head->block);
872 ret = amdgpu_ras_feature_enable(adev, head, 0);
874 /* clean gfx block ras features flag */
875 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
876 con->features &= ~BIT(head->block);
879 ret = amdgpu_ras_feature_enable(adev, head, enable);
884 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
887 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
888 struct ras_manager *obj, *tmp;
890 list_for_each_entry_safe(obj, tmp, &con->head, node) {
892 * aka just release the obj and corresponding flags
895 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
898 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
903 return con->features;
906 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
909 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
911 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
913 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
914 struct ras_common_if head = {
916 .type = default_ras_type,
917 .sub_block_index = 0,
920 if (i == AMDGPU_RAS_BLOCK__MCA)
925 * bypass psp. vbios enable ras for us.
926 * so just create the obj
928 if (__amdgpu_ras_feature_enable(adev, &head, 1))
931 if (amdgpu_ras_feature_enable(adev, &head, 1))
936 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
937 struct ras_common_if head = {
938 .block = AMDGPU_RAS_BLOCK__MCA,
939 .type = default_ras_type,
940 .sub_block_index = i,
945 * bypass psp. vbios enable ras for us.
946 * so just create the obj
948 if (__amdgpu_ras_feature_enable(adev, &head, 1))
951 if (amdgpu_ras_feature_enable(adev, &head, 1))
956 return con->features;
958 /* feature ctl end */
960 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
961 enum amdgpu_ras_block block)
966 if (block_obj->ras_comm.block == block)
972 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
973 enum amdgpu_ras_block block, uint32_t sub_block_index)
975 struct amdgpu_ras_block_list *node, *tmp;
976 struct amdgpu_ras_block_object *obj;
978 if (block >= AMDGPU_RAS_BLOCK__LAST)
981 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
982 if (!node->ras_obj) {
983 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
988 if (obj->ras_block_match) {
989 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
992 if (amdgpu_ras_block_match_default(obj, block) == 0)
1000 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
1002 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1006 * choosing right query method according to
1007 * whether smu support query error information
1009 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
1010 if (ret == -EOPNOTSUPP) {
1011 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1012 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
1013 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
1015 /* umc query_ras_error_address is also responsible for clearing
1018 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
1019 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
1020 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
1022 if (adev->umc.ras &&
1023 adev->umc.ras->ecc_info_query_ras_error_count)
1024 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
1026 if (adev->umc.ras &&
1027 adev->umc.ras->ecc_info_query_ras_error_address)
1028 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
1032 static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
1033 struct ras_manager *ras_mgr,
1034 struct ras_err_data *err_data,
1035 const char *blk_name,
1038 struct amdgpu_smuio_mcm_config_info *mcm_info;
1039 struct ras_err_node *err_node;
1040 struct ras_err_info *err_info;
1043 for_each_ras_error(err_node, err_data) {
1044 err_info = &err_node->err_info;
1045 mcm_info = &err_info->mcm_info;
1046 if (err_info->ue_count) {
1047 dev_info(adev->dev, "socket: %d, die: %d, "
1048 "%lld new uncorrectable hardware errors detected in %s block\n",
1049 mcm_info->socket_id,
1056 for_each_ras_error(err_node, &ras_mgr->err_data) {
1057 err_info = &err_node->err_info;
1058 mcm_info = &err_info->mcm_info;
1059 dev_info(adev->dev, "socket: %d, die: %d, "
1060 "%lld uncorrectable hardware errors detected in total in %s block\n",
1061 mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
1065 for_each_ras_error(err_node, err_data) {
1066 err_info = &err_node->err_info;
1067 mcm_info = &err_info->mcm_info;
1068 if (err_info->ce_count) {
1069 dev_info(adev->dev, "socket: %d, die: %d, "
1070 "%lld new correctable hardware errors detected in %s block, "
1071 "no user action is needed\n",
1072 mcm_info->socket_id,
1079 for_each_ras_error(err_node, &ras_mgr->err_data) {
1080 err_info = &err_node->err_info;
1081 mcm_info = &err_info->mcm_info;
1082 dev_info(adev->dev, "socket: %d, die: %d, "
1083 "%lld correctable hardware errors detected in total in %s block, "
1084 "no user action is needed\n",
1085 mcm_info->socket_id, mcm_info->die_id, err_info->ce_count, blk_name);
1090 static inline bool err_data_has_source_info(struct ras_err_data *data)
1092 return !list_empty(&data->err_node_list);
1095 static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
1096 struct ras_query_if *query_if,
1097 struct ras_err_data *err_data)
1099 struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
1100 const char *blk_name = get_ras_block_str(&query_if->head);
1102 if (err_data->ce_count) {
1103 if (err_data_has_source_info(err_data)) {
1104 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, false);
1105 } else if (!adev->aid_mask &&
1106 adev->smuio.funcs &&
1107 adev->smuio.funcs->get_socket_id &&
1108 adev->smuio.funcs->get_die_id) {
1109 dev_info(adev->dev, "socket: %d, die: %d "
1110 "%ld correctable hardware errors "
1111 "detected in %s block, no user "
1112 "action is needed.\n",
1113 adev->smuio.funcs->get_socket_id(adev),
1114 adev->smuio.funcs->get_die_id(adev),
1115 ras_mgr->err_data.ce_count,
1118 dev_info(adev->dev, "%ld correctable hardware errors "
1119 "detected in %s block, no user "
1120 "action is needed.\n",
1121 ras_mgr->err_data.ce_count,
1126 if (err_data->ue_count) {
1127 if (err_data_has_source_info(err_data)) {
1128 amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, blk_name, true);
1129 } else if (!adev->aid_mask &&
1130 adev->smuio.funcs &&
1131 adev->smuio.funcs->get_socket_id &&
1132 adev->smuio.funcs->get_die_id) {
1133 dev_info(adev->dev, "socket: %d, die: %d "
1134 "%ld uncorrectable hardware errors "
1135 "detected in %s block\n",
1136 adev->smuio.funcs->get_socket_id(adev),
1137 adev->smuio.funcs->get_die_id(adev),
1138 ras_mgr->err_data.ue_count,
1141 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1142 "detected in %s block\n",
1143 ras_mgr->err_data.ue_count,
1150 static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
1152 struct ras_err_node *err_node;
1153 struct ras_err_info *err_info;
1155 if (err_data_has_source_info(err_data)) {
1156 for_each_ras_error(err_node, err_data) {
1157 err_info = &err_node->err_info;
1159 amdgpu_ras_error_statistic_ce_count(&obj->err_data,
1160 &err_info->mcm_info, NULL, err_info->ce_count);
1161 amdgpu_ras_error_statistic_ue_count(&obj->err_data,
1162 &err_info->mcm_info, NULL, err_info->ue_count);
1165 /* for legacy asic path which doesn't has error source info */
1166 obj->err_data.ue_count += err_data->ue_count;
1167 obj->err_data.ce_count += err_data->ce_count;
1171 static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
1172 struct ras_query_if *info,
1173 struct ras_err_data *err_data,
1174 unsigned int error_query_mode)
1176 enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
1177 struct amdgpu_ras_block_object *block_obj = NULL;
1179 if (blk == AMDGPU_RAS_BLOCK_COUNT)
1182 if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
1185 if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
1186 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
1187 amdgpu_ras_get_ecc_info(adev, err_data);
1189 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
1190 if (!block_obj || !block_obj->hw_ops) {
1191 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1192 get_ras_block_str(&info->head));
1196 if (block_obj->hw_ops->query_ras_error_count)
1197 block_obj->hw_ops->query_ras_error_count(adev, err_data);
1199 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
1200 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
1201 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
1202 if (block_obj->hw_ops->query_ras_error_status)
1203 block_obj->hw_ops->query_ras_error_status(adev);
1207 /* FIXME: add code to check return value later */
1208 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
1209 amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
1215 /* query/inject/cure begin */
1216 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
1218 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1219 struct ras_err_data err_data;
1220 unsigned int error_query_mode;
1226 ret = amdgpu_ras_error_data_init(&err_data);
1230 if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
1233 ret = amdgpu_ras_query_error_status_helper(adev, info,
1237 goto out_fini_err_data;
1239 amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
1241 info->ue_count = obj->err_data.ue_count;
1242 info->ce_count = obj->err_data.ce_count;
1244 amdgpu_ras_error_generate_report(adev, info, &err_data);
1247 amdgpu_ras_error_data_fini(&err_data);
1252 int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
1253 enum amdgpu_ras_block block)
1255 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1256 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1257 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
1258 struct amdgpu_hive_info *hive;
1259 int hive_ras_recovery = 0;
1261 if (!block_obj || !block_obj->hw_ops) {
1262 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1263 ras_block_str(block));
1267 if (!amdgpu_ras_is_supported(adev, block) ||
1268 !amdgpu_ras_get_mca_debug_mode(adev))
1271 hive = amdgpu_get_xgmi_hive(adev);
1273 hive_ras_recovery = atomic_read(&hive->ras_recovery);
1274 amdgpu_put_xgmi_hive(hive);
1277 /* skip ras error reset in gpu reset */
1278 if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
1279 hive_ras_recovery) &&
1280 mca_funcs && mca_funcs->mca_set_debug_mode)
1283 if (block_obj->hw_ops->reset_ras_error_count)
1284 block_obj->hw_ops->reset_ras_error_count(adev);
1289 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1290 enum amdgpu_ras_block block)
1292 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1294 if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
1297 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1298 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1299 if (block_obj->hw_ops->reset_ras_error_status)
1300 block_obj->hw_ops->reset_ras_error_status(adev);
1306 /* wrapper of psp_ras_trigger_error */
1307 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1308 struct ras_inject_if *info)
1310 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1311 struct ta_ras_trigger_error_input block_info = {
1312 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1313 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1314 .sub_block_index = info->head.sub_block_index,
1315 .address = info->address,
1316 .value = info->value,
1319 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1321 info->head.sub_block_index);
1323 /* inject on guest isn't allowed, return success directly */
1324 if (amdgpu_sriov_vf(adev))
1330 if (!block_obj || !block_obj->hw_ops) {
1331 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1332 get_ras_block_str(&info->head));
1336 /* Calculate XGMI relative offset */
1337 if (adev->gmc.xgmi.num_physical_nodes > 1 &&
1338 info->head.block != AMDGPU_RAS_BLOCK__GFX) {
1339 block_info.address =
1340 amdgpu_xgmi_get_relative_phy_addr(adev,
1341 block_info.address);
1344 if (block_obj->hw_ops->ras_error_inject) {
1345 if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
1346 ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
1347 else /* Special ras_error_inject is defined (e.g: xgmi) */
1348 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
1349 info->instance_mask);
1352 ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
1356 dev_err(adev->dev, "ras inject %s failed %d\n",
1357 get_ras_block_str(&info->head), ret);
1363 * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
1364 * @adev: pointer to AMD GPU device
1365 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1366 * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
1367 * @query_info: pointer to ras_query_if
1369 * Return 0 for query success or do nothing, otherwise return an error
1372 static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
1373 unsigned long *ce_count,
1374 unsigned long *ue_count,
1375 struct ras_query_if *query_info)
1380 /* do nothing if query_info is not specified */
1383 ret = amdgpu_ras_query_error_status(adev, query_info);
1387 *ce_count += query_info->ce_count;
1388 *ue_count += query_info->ue_count;
1390 /* some hardware/IP supports read to clear
1391 * no need to explictly reset the err status after the query call */
1392 if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
1393 amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
1394 if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
1396 "Failed to reset error counter and error status\n");
1403 * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
1404 * @adev: pointer to AMD GPU device
1405 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1406 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1408 * @query_info: pointer to ras_query_if if the query request is only for
1409 * specific ip block; if info is NULL, then the qurey request is for
1410 * all the ip blocks that support query ras error counters/status
1412 * If set, @ce_count or @ue_count, count and return the corresponding
1413 * error counts in those integer pointers. Return 0 if the device
1414 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1416 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1417 unsigned long *ce_count,
1418 unsigned long *ue_count,
1419 struct ras_query_if *query_info)
1421 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1422 struct ras_manager *obj;
1423 unsigned long ce, ue;
1426 if (!adev->ras_enabled || !con)
1429 /* Don't count since no reporting.
1431 if (!ce_count && !ue_count)
1437 /* query all the ip blocks that support ras query interface */
1438 list_for_each_entry(obj, &con->head, node) {
1439 struct ras_query_if info = {
1443 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
1446 /* query specific ip block */
1447 ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
1461 /* query/inject/cure end */
1466 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1467 struct ras_badpage **bps, unsigned int *count);
1469 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1472 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1474 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1476 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1483 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1485 * It allows user to read the bad pages of vram on the gpu through
1486 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1488 * It outputs multiple lines, and each line stands for one gpu page.
1490 * The format of one line is below,
1491 * gpu pfn : gpu page size : flags
1493 * gpu pfn and gpu page size are printed in hex format.
1494 * flags can be one of below character,
1496 * R: reserved, this gpu page is reserved and not able to use.
1498 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1499 * in next window of page_reserve.
1501 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1505 * .. code-block:: bash
1507 * 0x00000001 : 0x00001000 : R
1508 * 0x00000002 : 0x00001000 : P
1512 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1513 struct kobject *kobj, struct bin_attribute *attr,
1514 char *buf, loff_t ppos, size_t count)
1516 struct amdgpu_ras *con =
1517 container_of(attr, struct amdgpu_ras, badpages_attr);
1518 struct amdgpu_device *adev = con->adev;
1519 const unsigned int element_size =
1520 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1521 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1522 unsigned int end = div64_ul(ppos + count - 1, element_size);
1524 struct ras_badpage *bps = NULL;
1525 unsigned int bps_count = 0;
1527 memset(buf, 0, count);
1529 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1532 for (; start < end && start < bps_count; start++)
1533 s += scnprintf(&buf[s], element_size + 1,
1534 "0x%08x : 0x%08x : %1s\n",
1537 amdgpu_ras_badpage_flags_str(bps[start].flags));
1544 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1545 struct device_attribute *attr, char *buf)
1547 struct amdgpu_ras *con =
1548 container_of(attr, struct amdgpu_ras, features_attr);
1550 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1553 static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
1554 struct device_attribute *attr, char *buf)
1556 struct amdgpu_ras *con =
1557 container_of(attr, struct amdgpu_ras, version_attr);
1558 return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
1561 static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
1562 struct device_attribute *attr, char *buf)
1564 struct amdgpu_ras *con =
1565 container_of(attr, struct amdgpu_ras, schema_attr);
1566 return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
1569 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1571 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1573 if (adev->dev->kobj.sd)
1574 sysfs_remove_file_from_group(&adev->dev->kobj,
1575 &con->badpages_attr.attr,
1579 static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
1581 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1582 struct attribute *attrs[] = {
1583 &con->features_attr.attr,
1584 &con->version_attr.attr,
1585 &con->schema_attr.attr,
1588 struct attribute_group group = {
1589 .name = RAS_FS_NAME,
1593 if (adev->dev->kobj.sd)
1594 sysfs_remove_group(&adev->dev->kobj, &group);
1599 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1600 struct ras_common_if *head)
1602 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1604 if (!obj || obj->attr_inuse)
1609 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1610 "%s_err_count", head->name);
1612 obj->sysfs_attr = (struct device_attribute){
1614 .name = obj->fs_data.sysfs_name,
1617 .show = amdgpu_ras_sysfs_read,
1619 sysfs_attr_init(&obj->sysfs_attr.attr);
1621 if (sysfs_add_file_to_group(&adev->dev->kobj,
1622 &obj->sysfs_attr.attr,
1628 obj->attr_inuse = 1;
1633 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1634 struct ras_common_if *head)
1636 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1638 if (!obj || !obj->attr_inuse)
1641 if (adev->dev->kobj.sd)
1642 sysfs_remove_file_from_group(&adev->dev->kobj,
1643 &obj->sysfs_attr.attr,
1645 obj->attr_inuse = 0;
1651 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1653 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1654 struct ras_manager *obj, *tmp;
1656 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1657 amdgpu_ras_sysfs_remove(adev, &obj->head);
1660 if (amdgpu_bad_page_threshold != 0)
1661 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1663 amdgpu_ras_sysfs_remove_dev_attr_node(adev);
1670 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1672 * Normally when there is an uncorrectable error, the driver will reset
1673 * the GPU to recover. However, in the event of an unrecoverable error,
1674 * the driver provides an interface to reboot the system automatically
1677 * The following file in debugfs provides that interface:
1678 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1682 * .. code-block:: bash
1684 * echo true > .../ras/auto_reboot
1688 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1690 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1691 struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
1692 struct drm_minor *minor = adev_to_drm(adev)->primary;
1695 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1696 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1697 &amdgpu_ras_debugfs_ctrl_ops);
1698 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1699 &amdgpu_ras_debugfs_eeprom_ops);
1700 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1701 &con->bad_page_cnt_threshold);
1702 debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
1703 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1704 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1705 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1706 &amdgpu_ras_debugfs_eeprom_size_ops);
1707 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1709 &amdgpu_ras_debugfs_eeprom_table_ops);
1710 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1713 * After one uncorrectable error happens, usually GPU recovery will
1714 * be scheduled. But due to the known problem in GPU recovery failing
1715 * to bring GPU back, below interface provides one direct way to
1716 * user to reboot system automatically in such case within
1717 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1718 * will never be called.
1720 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1723 * User could set this not to clean up hardware's error count register
1724 * of RAS IPs during ras recovery.
1726 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1727 &con->disable_ras_err_cnt_harvest);
1731 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1732 struct ras_fs_if *head,
1735 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1742 memcpy(obj->fs_data.debugfs_name,
1744 sizeof(obj->fs_data.debugfs_name));
1746 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1747 obj, &amdgpu_ras_debugfs_ops);
1750 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1752 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1754 struct ras_manager *obj;
1755 struct ras_fs_if fs_info;
1758 * it won't be called in resume path, no need to check
1759 * suspend and gpu reset status
1761 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1764 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1766 list_for_each_entry(obj, &con->head, node) {
1767 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1768 (obj->attr_inuse == 1)) {
1769 sprintf(fs_info.debugfs_name, "%s_err_inject",
1770 get_ras_block_str(&obj->head));
1771 fs_info.head = obj->head;
1772 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1776 amdgpu_mca_smu_debugfs_init(adev, dir);
1782 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1783 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1784 static DEVICE_ATTR(features, S_IRUGO,
1785 amdgpu_ras_sysfs_features_read, NULL);
1786 static DEVICE_ATTR(version, 0444,
1787 amdgpu_ras_sysfs_version_show, NULL);
1788 static DEVICE_ATTR(schema, 0444,
1789 amdgpu_ras_sysfs_schema_show, NULL);
1790 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1792 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1793 struct attribute_group group = {
1794 .name = RAS_FS_NAME,
1796 struct attribute *attrs[] = {
1797 &con->features_attr.attr,
1798 &con->version_attr.attr,
1799 &con->schema_attr.attr,
1802 struct bin_attribute *bin_attrs[] = {
1808 group.attrs = attrs;
1810 /* add features entry */
1811 con->features_attr = dev_attr_features;
1812 sysfs_attr_init(attrs[0]);
1814 /* add version entry */
1815 con->version_attr = dev_attr_version;
1816 sysfs_attr_init(attrs[1]);
1818 /* add schema entry */
1819 con->schema_attr = dev_attr_schema;
1820 sysfs_attr_init(attrs[2]);
1822 if (amdgpu_bad_page_threshold != 0) {
1823 /* add bad_page_features entry */
1824 bin_attr_gpu_vram_bad_pages.private = NULL;
1825 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1826 bin_attrs[0] = &con->badpages_attr;
1827 group.bin_attrs = bin_attrs;
1828 sysfs_bin_attr_init(bin_attrs[0]);
1831 r = sysfs_create_group(&adev->dev->kobj, &group);
1833 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1838 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1840 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1841 struct ras_manager *con_obj, *ip_obj, *tmp;
1843 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1844 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1845 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1851 amdgpu_ras_sysfs_remove_all(adev);
1858 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1859 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1860 * register to check whether the interrupt is triggered or not, and properly
1861 * ack the interrupt if it is there
1863 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1865 /* Fatal error events are handled on host side */
1866 if (amdgpu_sriov_vf(adev))
1869 if (adev->nbio.ras &&
1870 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1871 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1873 if (adev->nbio.ras &&
1874 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1875 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1878 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1879 struct amdgpu_iv_entry *entry)
1881 bool poison_stat = false;
1882 struct amdgpu_device *adev = obj->adev;
1883 struct amdgpu_ras_block_object *block_obj =
1884 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1889 /* both query_poison_status and handle_poison_consumption are optional,
1890 * but at least one of them should be implemented if we need poison
1891 * consumption handler
1893 if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
1894 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1896 /* Not poison consumption interrupt, no need to handle it */
1897 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1898 block_obj->ras_comm.name);
1904 amdgpu_umc_poison_handler(adev, false);
1906 if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
1907 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1909 /* gpu reset is fallback for failed and default cases */
1911 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1912 block_obj->ras_comm.name);
1913 amdgpu_ras_reset_gpu(adev);
1915 amdgpu_gfx_poison_consumption_handler(adev, entry);
1919 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1920 struct amdgpu_iv_entry *entry)
1922 dev_info(obj->adev->dev,
1923 "Poison is created, no user action is needed.\n");
1926 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1927 struct amdgpu_iv_entry *entry)
1929 struct ras_ih_data *data = &obj->ih_data;
1930 struct ras_err_data err_data;
1936 ret = amdgpu_ras_error_data_init(&err_data);
1940 /* Let IP handle its data, maybe we need get the output
1941 * from the callback to update the error type/count, etc
1943 ret = data->cb(obj->adev, &err_data, entry);
1944 /* ue will trigger an interrupt, and in that case
1945 * we need do a reset to recovery the whole system.
1946 * But leave IP do that recovery, here we just dispatch
1949 if (ret == AMDGPU_RAS_SUCCESS) {
1950 /* these counts could be left as 0 if
1951 * some blocks do not count error number
1953 obj->err_data.ue_count += err_data.ue_count;
1954 obj->err_data.ce_count += err_data.ce_count;
1957 amdgpu_ras_error_data_fini(&err_data);
1960 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1962 struct ras_ih_data *data = &obj->ih_data;
1963 struct amdgpu_iv_entry entry;
1965 while (data->rptr != data->wptr) {
1967 memcpy(&entry, &data->ring[data->rptr],
1968 data->element_size);
1971 data->rptr = (data->aligned_element_size +
1972 data->rptr) % data->ring_size;
1974 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1975 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1976 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1978 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1980 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1981 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1983 dev_warn(obj->adev->dev,
1984 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1989 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1991 struct ras_ih_data *data =
1992 container_of(work, struct ras_ih_data, ih_work);
1993 struct ras_manager *obj =
1994 container_of(data, struct ras_manager, ih_data);
1996 amdgpu_ras_interrupt_handler(obj);
1999 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
2000 struct ras_dispatch_if *info)
2002 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
2003 struct ras_ih_data *data = &obj->ih_data;
2008 if (data->inuse == 0)
2011 /* Might be overflow... */
2012 memcpy(&data->ring[data->wptr], info->entry,
2013 data->element_size);
2016 data->wptr = (data->aligned_element_size +
2017 data->wptr) % data->ring_size;
2019 schedule_work(&data->ih_work);
2024 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
2025 struct ras_common_if *head)
2027 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2028 struct ras_ih_data *data;
2033 data = &obj->ih_data;
2034 if (data->inuse == 0)
2037 cancel_work_sync(&data->ih_work);
2040 memset(data, 0, sizeof(*data));
2046 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
2047 struct ras_common_if *head)
2049 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
2050 struct ras_ih_data *data;
2051 struct amdgpu_ras_block_object *ras_obj;
2054 /* in case we registe the IH before enable ras feature */
2055 obj = amdgpu_ras_create_obj(adev, head);
2061 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
2063 data = &obj->ih_data;
2064 /* add the callback.etc */
2065 *data = (struct ras_ih_data) {
2067 .cb = ras_obj->ras_cb,
2068 .element_size = sizeof(struct amdgpu_iv_entry),
2073 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
2075 data->aligned_element_size = ALIGN(data->element_size, 8);
2076 /* the ring can store 64 iv entries. */
2077 data->ring_size = 64 * data->aligned_element_size;
2078 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
2090 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
2092 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2093 struct ras_manager *obj, *tmp;
2095 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2096 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
2103 /* traversal all IPs except NBIO to query error counter */
2104 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
2106 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2107 struct ras_manager *obj;
2109 if (!adev->ras_enabled || !con)
2112 list_for_each_entry(obj, &con->head, node) {
2113 struct ras_query_if info = {
2118 * PCIE_BIF IP has one different isr by ras controller
2119 * interrupt, the specific ras counter query will be
2120 * done in that isr. So skip such block from common
2121 * sync flood interrupt isr calling.
2123 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
2127 * this is a workaround for aldebaran, skip send msg to
2128 * smu to get ecc_info table due to smu handle get ecc
2129 * info table failed temporarily.
2130 * should be removed until smu fix handle ecc_info table.
2132 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
2133 (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2134 IP_VERSION(13, 0, 2)))
2137 amdgpu_ras_query_error_status(adev, &info);
2139 if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2140 IP_VERSION(11, 0, 2) &&
2141 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2142 IP_VERSION(11, 0, 4) &&
2143 amdgpu_ip_version(adev, MP0_HWIP, 0) !=
2144 IP_VERSION(13, 0, 0)) {
2145 if (amdgpu_ras_reset_error_status(adev, info.head.block))
2146 dev_warn(adev->dev, "Failed to reset error counter and error status");
2151 /* Parse RdRspStatus and WrRspStatus */
2152 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
2153 struct ras_query_if *info)
2155 struct amdgpu_ras_block_object *block_obj;
2157 * Only two block need to query read/write
2158 * RspStatus at current state
2160 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
2161 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
2164 block_obj = amdgpu_ras_get_ras_block(adev,
2166 info->head.sub_block_index);
2168 if (!block_obj || !block_obj->hw_ops) {
2169 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
2170 get_ras_block_str(&info->head));
2174 if (block_obj->hw_ops->query_ras_error_status)
2175 block_obj->hw_ops->query_ras_error_status(adev);
2179 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
2181 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2182 struct ras_manager *obj;
2184 if (!adev->ras_enabled || !con)
2187 list_for_each_entry(obj, &con->head, node) {
2188 struct ras_query_if info = {
2192 amdgpu_ras_error_status_query(adev, &info);
2196 /* recovery begin */
2198 /* return 0 on success.
2199 * caller need free bps.
2201 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
2202 struct ras_badpage **bps, unsigned int *count)
2204 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2205 struct ras_err_handler_data *data;
2207 int ret = 0, status;
2209 if (!con || !con->eh_data || !bps || !count)
2212 mutex_lock(&con->recovery_lock);
2213 data = con->eh_data;
2214 if (!data || data->count == 0) {
2220 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
2226 for (; i < data->count; i++) {
2227 (*bps)[i] = (struct ras_badpage){
2228 .bp = data->bps[i].retired_page,
2229 .size = AMDGPU_GPU_PAGE_SIZE,
2230 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
2232 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
2233 data->bps[i].retired_page);
2234 if (status == -EBUSY)
2235 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
2236 else if (status == -ENOENT)
2237 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
2240 *count = data->count;
2242 mutex_unlock(&con->recovery_lock);
2246 static void amdgpu_ras_do_recovery(struct work_struct *work)
2248 struct amdgpu_ras *ras =
2249 container_of(work, struct amdgpu_ras, recovery_work);
2250 struct amdgpu_device *remote_adev = NULL;
2251 struct amdgpu_device *adev = ras->adev;
2252 struct list_head device_list, *device_list_handle = NULL;
2253 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2256 atomic_set(&hive->ras_recovery, 1);
2257 if (!ras->disable_ras_err_cnt_harvest) {
2259 /* Build list of devices to query RAS related errors */
2260 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
2261 device_list_handle = &hive->device_list;
2263 INIT_LIST_HEAD(&device_list);
2264 list_add_tail(&adev->gmc.xgmi.head, &device_list);
2265 device_list_handle = &device_list;
2268 list_for_each_entry(remote_adev,
2269 device_list_handle, gmc.xgmi.head) {
2270 amdgpu_ras_query_err_status(remote_adev);
2271 amdgpu_ras_log_on_err_counter(remote_adev);
2276 if (amdgpu_device_should_recover_gpu(ras->adev)) {
2277 struct amdgpu_reset_context reset_context;
2278 memset(&reset_context, 0, sizeof(reset_context));
2280 reset_context.method = AMD_RESET_METHOD_NONE;
2281 reset_context.reset_req_dev = adev;
2283 /* Perform full reset in fatal error mode */
2284 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
2285 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2287 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2289 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
2290 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
2291 reset_context.method = AMD_RESET_METHOD_MODE2;
2294 /* Fatal error occurs in poison mode, mode1 reset is used to
2297 if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
2298 ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
2299 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2301 psp_fatal_error_recovery_quirk(&adev->psp);
2305 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
2307 atomic_set(&ras->in_recovery, 0);
2309 atomic_set(&hive->ras_recovery, 0);
2310 amdgpu_put_xgmi_hive(hive);
2314 /* alloc/realloc bps array */
2315 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
2316 struct ras_err_handler_data *data, int pages)
2318 unsigned int old_space = data->count + data->space_left;
2319 unsigned int new_space = old_space + pages;
2320 unsigned int align_space = ALIGN(new_space, 512);
2321 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
2328 memcpy(bps, data->bps,
2329 data->count * sizeof(*data->bps));
2334 data->space_left += align_space - old_space;
2338 /* it deal with vram only. */
2339 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
2340 struct eeprom_table_record *bps, int pages)
2342 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2343 struct ras_err_handler_data *data;
2347 if (!con || !con->eh_data || !bps || pages <= 0)
2350 mutex_lock(&con->recovery_lock);
2351 data = con->eh_data;
2355 for (i = 0; i < pages; i++) {
2356 if (amdgpu_ras_check_bad_page_unlock(con,
2357 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2360 if (!data->space_left &&
2361 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2366 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2367 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2368 AMDGPU_GPU_PAGE_SIZE);
2370 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2375 mutex_unlock(&con->recovery_lock);
2381 * write error record array to eeprom, the function should be
2382 * protected by recovery_lock
2383 * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
2385 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
2386 unsigned long *new_cnt)
2388 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2389 struct ras_err_handler_data *data;
2390 struct amdgpu_ras_eeprom_control *control;
2393 if (!con || !con->eh_data) {
2400 mutex_lock(&con->recovery_lock);
2401 control = &con->eeprom_control;
2402 data = con->eh_data;
2403 save_count = data->count - control->ras_num_recs;
2404 mutex_unlock(&con->recovery_lock);
2407 *new_cnt = save_count / adev->umc.retire_unit;
2409 /* only new entries are saved */
2410 if (save_count > 0) {
2411 if (amdgpu_ras_eeprom_append(control,
2412 &data->bps[control->ras_num_recs],
2414 dev_err(adev->dev, "Failed to save EEPROM table data!");
2418 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2425 * read error record array in eeprom and reserve enough space for
2426 * storing new bad pages
2428 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2430 struct amdgpu_ras_eeprom_control *control =
2431 &adev->psp.ras_context.ras->eeprom_control;
2432 struct eeprom_table_record *bps;
2435 /* no bad page record, skip eeprom access */
2436 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2439 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2443 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2445 dev_err(adev->dev, "Failed to load EEPROM table records!");
2447 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2453 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2456 struct ras_err_handler_data *data = con->eh_data;
2459 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2460 for (i = 0; i < data->count; i++)
2461 if (addr == data->bps[i].retired_page)
2468 * check if an address belongs to bad page
2470 * Note: this check is only for umc block
2472 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2475 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2478 if (!con || !con->eh_data)
2481 mutex_lock(&con->recovery_lock);
2482 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2483 mutex_unlock(&con->recovery_lock);
2487 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2490 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2493 * Justification of value bad_page_cnt_threshold in ras structure
2495 * Generally, 0 <= amdgpu_bad_page_threshold <= max record length
2496 * in eeprom or amdgpu_bad_page_threshold == -2, introduce two
2497 * scenarios accordingly.
2499 * Bad page retirement enablement:
2500 * - If amdgpu_bad_page_threshold = -2,
2501 * bad_page_cnt_threshold = typical value by formula.
2503 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2504 * max record length in eeprom, use it directly.
2506 * Bad page retirement disablement:
2507 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2508 * functionality is disabled, and bad_page_cnt_threshold will
2512 if (amdgpu_bad_page_threshold < 0) {
2513 u64 val = adev->gmc.mc_vram_size;
2515 do_div(val, RAS_BAD_PAGE_COVER);
2516 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2519 con->bad_page_cnt_threshold = min_t(int, max_count,
2520 amdgpu_bad_page_threshold);
2524 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2526 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2527 struct ras_err_handler_data **data;
2528 u32 max_eeprom_records_count = 0;
2529 bool exc_err_limit = false;
2532 if (!con || amdgpu_sriov_vf(adev))
2535 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2536 * supports RAS and debugfs is enabled, but when
2537 * adev->ras_enabled is unset, i.e. when "ras_enable"
2538 * module parameter is set to 0.
2542 if (!adev->ras_enabled)
2545 data = &con->eh_data;
2546 *data = kzalloc(sizeof(**data), GFP_KERNEL);
2552 mutex_init(&con->recovery_lock);
2553 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2554 atomic_set(&con->in_recovery, 0);
2555 con->eeprom_control.bad_channel_bitmap = 0;
2557 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
2558 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2560 /* Todo: During test the SMU might fail to read the eeprom through I2C
2561 * when the GPU is pending on XGMI reset during probe time
2562 * (Mostly after second bus reset), skip it now
2564 if (adev->gmc.xgmi.pending_reset)
2566 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2568 * This calling fails when exc_err_limit is true or
2571 if (exc_err_limit || ret)
2574 if (con->eeprom_control.ras_num_recs) {
2575 ret = amdgpu_ras_load_bad_pages(adev);
2579 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2581 if (con->update_channel_flag == true) {
2582 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2583 con->update_channel_flag = false;
2587 #ifdef CONFIG_X86_MCE_AMD
2588 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2589 (adev->gmc.xgmi.connected_to_cpu))
2590 amdgpu_register_bad_pages_mca_notifier(adev);
2595 kfree((*data)->bps);
2597 con->eh_data = NULL;
2599 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2602 * Except error threshold exceeding case, other failure cases in this
2603 * function would not fail amdgpu driver init.
2613 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2615 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2616 struct ras_err_handler_data *data = con->eh_data;
2618 /* recovery_init failed to init it, fini is useless */
2622 cancel_work_sync(&con->recovery_work);
2624 mutex_lock(&con->recovery_lock);
2625 con->eh_data = NULL;
2628 mutex_unlock(&con->recovery_lock);
2634 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2636 if (amdgpu_sriov_vf(adev)) {
2637 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2638 case IP_VERSION(13, 0, 2):
2639 case IP_VERSION(13, 0, 6):
2646 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2647 switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
2648 case IP_VERSION(13, 0, 0):
2649 case IP_VERSION(13, 0, 6):
2650 case IP_VERSION(13, 0, 10):
2657 return adev->asic_type == CHIP_VEGA10 ||
2658 adev->asic_type == CHIP_VEGA20 ||
2659 adev->asic_type == CHIP_ARCTURUS ||
2660 adev->asic_type == CHIP_ALDEBARAN ||
2661 adev->asic_type == CHIP_SIENNA_CICHLID;
2665 * this is workaround for vega20 workstation sku,
2666 * force enable gfx ras, ignore vbios gfx ras flag
2667 * due to GC EDC can not write
2669 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2671 struct atom_context *ctx = adev->mode_info.atom_context;
2676 if (strnstr(ctx->vbios_pn, "D16406",
2677 sizeof(ctx->vbios_pn)) ||
2678 strnstr(ctx->vbios_pn, "D36002",
2679 sizeof(ctx->vbios_pn)))
2680 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2684 * check hardware's ras ability which will be saved in hw_supported.
2685 * if hardware does not support ras, we can skip some ras initializtion and
2686 * forbid some ras operations from IP.
2687 * if software itself, say boot parameter, limit the ras ability. We still
2688 * need allow IP do some limited operations, like disable. In such case,
2689 * we have to initialize ras as normal. but need check if operation is
2690 * allowed or not in each function.
2692 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2694 adev->ras_hw_enabled = adev->ras_enabled = 0;
2696 if (!amdgpu_ras_asic_supported(adev))
2699 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
2700 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2701 dev_info(adev->dev, "MEM ECC is active.\n");
2702 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2703 1 << AMDGPU_RAS_BLOCK__DF);
2705 dev_info(adev->dev, "MEM ECC is not presented.\n");
2708 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2709 dev_info(adev->dev, "SRAM ECC is active.\n");
2710 if (!amdgpu_sriov_vf(adev))
2711 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2712 1 << AMDGPU_RAS_BLOCK__DF);
2714 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2715 1 << AMDGPU_RAS_BLOCK__SDMA |
2716 1 << AMDGPU_RAS_BLOCK__GFX);
2718 /* VCN/JPEG RAS can be supported on both bare metal and
2721 if (amdgpu_ip_version(adev, VCN_HWIP, 0) ==
2722 IP_VERSION(2, 6, 0) ||
2723 amdgpu_ip_version(adev, VCN_HWIP, 0) ==
2724 IP_VERSION(4, 0, 0) ||
2725 amdgpu_ip_version(adev, VCN_HWIP, 0) ==
2726 IP_VERSION(4, 0, 3))
2727 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2728 1 << AMDGPU_RAS_BLOCK__JPEG);
2730 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2731 1 << AMDGPU_RAS_BLOCK__JPEG);
2734 * XGMI RAS is not supported if xgmi num physical nodes
2737 if (!adev->gmc.xgmi.num_physical_nodes)
2738 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
2740 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2743 /* driver only manages a few IP blocks RAS feature
2744 * when GPU is connected cpu through XGMI */
2745 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2746 1 << AMDGPU_RAS_BLOCK__SDMA |
2747 1 << AMDGPU_RAS_BLOCK__MMHUB);
2750 amdgpu_ras_get_quirks(adev);
2752 /* hw_supported needs to be aligned with RAS block mask. */
2753 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2755 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2756 adev->ras_hw_enabled & amdgpu_ras_mask;
2759 static void amdgpu_ras_counte_dw(struct work_struct *work)
2761 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2762 ras_counte_delay_work.work);
2763 struct amdgpu_device *adev = con->adev;
2764 struct drm_device *dev = adev_to_drm(adev);
2765 unsigned long ce_count, ue_count;
2768 res = pm_runtime_get_sync(dev->dev);
2772 /* Cache new values.
2774 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
2775 atomic_set(&con->ras_ce_count, ce_count);
2776 atomic_set(&con->ras_ue_count, ue_count);
2779 pm_runtime_mark_last_busy(dev->dev);
2781 pm_runtime_put_autosuspend(dev->dev);
2784 static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
2786 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2787 bool df_poison, umc_poison;
2789 /* poison setting is useless on SRIOV guest */
2790 if (amdgpu_sriov_vf(adev) || !con)
2793 /* Init poison supported flag, the default value is false */
2794 if (adev->gmc.xgmi.connected_to_cpu ||
2795 adev->gmc.is_app_apu) {
2796 /* enabled by default when GPU is connected to CPU */
2797 con->poison_supported = true;
2798 } else if (adev->df.funcs &&
2799 adev->df.funcs->query_ras_poison_mode &&
2801 adev->umc.ras->query_ras_poison_mode) {
2803 adev->df.funcs->query_ras_poison_mode(adev);
2805 adev->umc.ras->query_ras_poison_mode(adev);
2807 /* Only poison is set in both DF and UMC, we can support it */
2808 if (df_poison && umc_poison)
2809 con->poison_supported = true;
2810 else if (df_poison != umc_poison)
2812 "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2813 df_poison, umc_poison);
2817 static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
2819 return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
2820 AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
2821 AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
2822 AMDGPU_RAS_ERROR__PARITY;
2825 int amdgpu_ras_init(struct amdgpu_device *adev)
2827 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2833 con = kzalloc(sizeof(*con) +
2834 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2835 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2841 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2842 atomic_set(&con->ras_ce_count, 0);
2843 atomic_set(&con->ras_ue_count, 0);
2845 con->objs = (struct ras_manager *)(con + 1);
2847 amdgpu_ras_set_context(adev, con);
2849 amdgpu_ras_check_supported(adev);
2851 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2852 /* set gfx block ras context feature for VEGA20 Gaming
2853 * send ras disable cmd to ras ta during ras late init.
2855 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2856 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2865 con->update_channel_flag = false;
2868 INIT_LIST_HEAD(&con->head);
2869 /* Might need get this flag from vbios. */
2870 con->flags = RAS_DEFAULT_FLAGS;
2872 /* initialize nbio ras function ahead of any other
2873 * ras functions so hardware fatal error interrupt
2874 * can be enabled as early as possible */
2875 switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
2876 case IP_VERSION(7, 4, 0):
2877 case IP_VERSION(7, 4, 1):
2878 case IP_VERSION(7, 4, 4):
2879 if (!adev->gmc.xgmi.connected_to_cpu)
2880 adev->nbio.ras = &nbio_v7_4_ras;
2882 case IP_VERSION(4, 3, 0):
2883 if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
2884 /* unlike other generation of nbio ras,
2885 * nbio v4_3 only support fatal error interrupt
2886 * to inform software that DF is freezed due to
2887 * system fatal error event. driver should not
2888 * enable nbio ras in such case. Instead,
2890 adev->nbio.ras = &nbio_v4_3_ras;
2892 case IP_VERSION(7, 9, 0):
2893 if (!adev->gmc.is_app_apu)
2894 adev->nbio.ras = &nbio_v7_9_ras;
2897 /* nbio ras is not available */
2901 /* nbio ras block needs to be enabled ahead of other ras blocks
2902 * to handle fatal error */
2903 r = amdgpu_nbio_ras_sw_init(adev);
2907 if (adev->nbio.ras &&
2908 adev->nbio.ras->init_ras_controller_interrupt) {
2909 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2914 if (adev->nbio.ras &&
2915 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2916 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2921 amdgpu_ras_query_poison_mode(adev);
2923 /* Get RAS schema for particular SOC */
2924 con->schema = amdgpu_get_ras_schema(adev);
2926 if (amdgpu_ras_fs_init(adev)) {
2931 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2932 "hardware ability[%x] ras_mask[%x]\n",
2933 adev->ras_hw_enabled, adev->ras_enabled);
2937 amdgpu_ras_set_context(adev, NULL);
2943 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2945 if (adev->gmc.xgmi.connected_to_cpu ||
2946 adev->gmc.is_app_apu)
2951 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2952 struct ras_common_if *ras_block)
2954 struct ras_query_if info = {
2958 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2961 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2962 DRM_WARN("RAS init harvest failure");
2964 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2965 DRM_WARN("RAS init harvest reset failure");
2970 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2972 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2977 return con->poison_supported;
2980 /* helper function to handle common stuff in ip late init phase */
2981 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2982 struct ras_common_if *ras_block)
2984 struct amdgpu_ras_block_object *ras_obj = NULL;
2985 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2986 struct ras_query_if *query_info;
2987 unsigned long ue_count, ce_count;
2990 /* disable RAS feature per IP block if it is not supported */
2991 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2992 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2996 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2998 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2999 /* in resume phase, if fail to enable ras,
3000 * clean up all ras fs nodes, and disable ras */
3006 /* check for errors on warm reset edc persisant supported ASIC */
3007 amdgpu_persistent_edc_harvesting(adev, ras_block);
3009 /* in resume phase, no need to create ras fs node */
3010 if (adev->in_suspend || amdgpu_in_reset(adev))
3013 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3014 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
3015 (ras_obj->hw_ops->query_poison_status ||
3016 ras_obj->hw_ops->handle_poison_consumption))) {
3017 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
3022 if (ras_obj->hw_ops &&
3023 (ras_obj->hw_ops->query_ras_error_count ||
3024 ras_obj->hw_ops->query_ras_error_status)) {
3025 r = amdgpu_ras_sysfs_create(adev, ras_block);
3029 /* Those are the cached values at init.
3031 query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
3034 memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
3036 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
3037 atomic_set(&con->ras_ce_count, ce_count);
3038 atomic_set(&con->ras_ue_count, ue_count);
3047 if (ras_obj->ras_cb)
3048 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3050 amdgpu_ras_feature_enable(adev, ras_block, 0);
3054 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
3055 struct ras_common_if *ras_block)
3057 return amdgpu_ras_block_late_init(adev, ras_block);
3060 /* helper function to remove ras fs node and interrupt handler */
3061 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
3062 struct ras_common_if *ras_block)
3064 struct amdgpu_ras_block_object *ras_obj;
3068 amdgpu_ras_sysfs_remove(adev, ras_block);
3070 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
3071 if (ras_obj->ras_cb)
3072 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
3075 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
3076 struct ras_common_if *ras_block)
3078 return amdgpu_ras_block_late_fini(adev, ras_block);
3081 /* do some init work after IP late init as dependence.
3082 * and it runs in resume/gpu reset/booting up cases.
3084 void amdgpu_ras_resume(struct amdgpu_device *adev)
3086 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3087 struct ras_manager *obj, *tmp;
3089 if (!adev->ras_enabled || !con) {
3090 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
3091 amdgpu_release_ras_context(adev);
3096 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
3097 /* Set up all other IPs which are not implemented. There is a
3098 * tricky thing that IP's actual ras error type should be
3099 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
3100 * ERROR_NONE make sense anyway.
3102 amdgpu_ras_enable_all_features(adev, 1);
3104 /* We enable ras on all hw_supported block, but as boot
3105 * parameter might disable some of them and one or more IP has
3106 * not implemented yet. So we disable them on behalf.
3108 list_for_each_entry_safe(obj, tmp, &con->head, node) {
3109 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
3110 amdgpu_ras_feature_enable(adev, &obj->head, 0);
3111 /* there should be no any reference. */
3112 WARN_ON(alive_obj(obj));
3118 void amdgpu_ras_suspend(struct amdgpu_device *adev)
3120 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3122 if (!adev->ras_enabled || !con)
3125 amdgpu_ras_disable_all_features(adev, 0);
3126 /* Make sure all ras objects are disabled. */
3128 amdgpu_ras_disable_all_features(adev, 1);
3131 int amdgpu_ras_late_init(struct amdgpu_device *adev)
3133 struct amdgpu_ras_block_list *node, *tmp;
3134 struct amdgpu_ras_block_object *obj;
3137 /* Guest side doesn't need init ras feature */
3138 if (amdgpu_sriov_vf(adev))
3141 amdgpu_ras_set_mca_debug_mode(adev, false);
3143 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
3144 if (!node->ras_obj) {
3145 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
3149 obj = node->ras_obj;
3150 if (obj->ras_late_init) {
3151 r = obj->ras_late_init(adev, &obj->ras_comm);
3153 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
3154 obj->ras_comm.name, r);
3158 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
3164 /* do some fini work before IP fini as dependence */
3165 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
3167 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3169 if (!adev->ras_enabled || !con)
3173 /* Need disable ras on all IPs here before ip [hw/sw]fini */
3175 amdgpu_ras_disable_all_features(adev, 0);
3176 amdgpu_ras_recovery_fini(adev);
3180 int amdgpu_ras_fini(struct amdgpu_device *adev)
3182 struct amdgpu_ras_block_list *ras_node, *tmp;
3183 struct amdgpu_ras_block_object *obj = NULL;
3184 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3186 if (!adev->ras_enabled || !con)
3189 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
3190 if (ras_node->ras_obj) {
3191 obj = ras_node->ras_obj;
3192 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
3194 obj->ras_fini(adev, &obj->ras_comm);
3196 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
3199 /* Clear ras blocks from ras_list and free ras block list node */
3200 list_del(&ras_node->node);
3204 amdgpu_ras_fs_fini(adev);
3205 amdgpu_ras_interrupt_remove_all(adev);
3207 WARN(con->features, "Feature mask is not cleared");
3210 amdgpu_ras_disable_all_features(adev, 1);
3212 cancel_delayed_work_sync(&con->ras_counte_delay_work);
3214 amdgpu_ras_set_context(adev, NULL);
3220 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
3222 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
3223 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3225 dev_info(adev->dev, "uncorrectable hardware error"
3226 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
3228 ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
3229 amdgpu_ras_reset_gpu(adev);
3233 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
3235 if (adev->asic_type == CHIP_VEGA20 &&
3236 adev->pm.fw_version <= 0x283400) {
3237 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
3238 amdgpu_ras_intr_triggered();
3244 void amdgpu_release_ras_context(struct amdgpu_device *adev)
3246 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3251 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
3252 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
3253 amdgpu_ras_set_context(adev, NULL);
3258 #ifdef CONFIG_X86_MCE_AMD
3259 static struct amdgpu_device *find_adev(uint32_t node_id)
3262 struct amdgpu_device *adev = NULL;
3264 for (i = 0; i < mce_adev_list.num_gpu; i++) {
3265 adev = mce_adev_list.devs[i];
3267 if (adev && adev->gmc.xgmi.connected_to_cpu &&
3268 adev->gmc.xgmi.physical_node_id == node_id)
3276 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
3277 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
3278 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
3279 #define GPU_ID_OFFSET 8
3281 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
3282 unsigned long val, void *data)
3284 struct mce *m = (struct mce *)data;
3285 struct amdgpu_device *adev = NULL;
3286 uint32_t gpu_id = 0;
3287 uint32_t umc_inst = 0, ch_inst = 0;
3290 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
3291 * and error occurred in DramECC (Extended error code = 0) then only
3292 * process the error, else bail out.
3294 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
3295 (XEC(m->status, 0x3f) == 0x0)))
3299 * If it is correctable error, return.
3301 if (mce_is_correctable(m))
3305 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
3307 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
3309 adev = find_adev(gpu_id);
3311 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
3317 * If it is uncorrectable error, then find out UMC instance and
3320 umc_inst = GET_UMC_INST(m->ipid);
3321 ch_inst = GET_CHAN_INDEX(m->ipid);
3323 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
3326 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
3332 static struct notifier_block amdgpu_bad_page_nb = {
3333 .notifier_call = amdgpu_bad_page_notifier,
3334 .priority = MCE_PRIO_UC,
3337 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
3340 * Add the adev to the mce_adev_list.
3341 * During mode2 reset, amdgpu device is temporarily
3342 * removed from the mgpu_info list which can cause
3343 * page retirement to fail.
3344 * Use this list instead of mgpu_info to find the amdgpu
3345 * device on which the UMC error was reported.
3347 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
3350 * Register the x86 notifier only once
3351 * with MCE subsystem.
3353 if (notifier_registered == false) {
3354 mce_register_decode_chain(&amdgpu_bad_page_nb);
3355 notifier_registered = true;
3360 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
3365 return adev->psp.ras_context.ras;
3368 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
3373 adev->psp.ras_context.ras = ras_con;
3377 /* check if ras is supported on block, say, sdma, gfx */
3378 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
3382 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3384 if (block >= AMDGPU_RAS_BLOCK_COUNT)
3387 ret = ras && (adev->ras_enabled & (1 << block));
3389 /* For the special asic with mem ecc enabled but sram ecc
3390 * not enabled, even if the ras block is not supported on
3391 * .ras_enabled, if the asic supports poison mode and the
3392 * ras block has ras configuration, it can be considered
3393 * that the ras block supports ras function.
3396 (block == AMDGPU_RAS_BLOCK__GFX ||
3397 block == AMDGPU_RAS_BLOCK__SDMA ||
3398 block == AMDGPU_RAS_BLOCK__VCN ||
3399 block == AMDGPU_RAS_BLOCK__JPEG) &&
3400 amdgpu_ras_is_poison_mode_supported(adev) &&
3401 amdgpu_ras_get_ras_block(adev, block, 0))
3407 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
3409 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
3411 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
3412 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
3416 int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
3418 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3422 ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
3424 con->is_mca_debug_mode = enable;
3430 bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
3432 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3433 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3438 if (mca_funcs && mca_funcs->mca_set_debug_mode)
3439 return con->is_mca_debug_mode;
3444 bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
3445 unsigned int *error_query_mode)
3447 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
3448 const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
3451 *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
3455 if (mca_funcs && mca_funcs->mca_set_debug_mode)
3457 (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
3459 *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
3464 /* Register each ip ras block into amdgpu ras */
3465 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
3466 struct amdgpu_ras_block_object *ras_block_obj)
3468 struct amdgpu_ras_block_list *ras_node;
3469 if (!adev || !ras_block_obj)
3472 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
3476 INIT_LIST_HEAD(&ras_node->node);
3477 ras_node->ras_obj = ras_block_obj;
3478 list_add_tail(&ras_node->node, &adev->ras_list);
3483 void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
3489 case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
3490 sprintf(err_type_name, "correctable");
3492 case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
3493 sprintf(err_type_name, "uncorrectable");
3496 sprintf(err_type_name, "unknown");
3501 bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
3502 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3504 uint32_t *memory_id)
3506 uint32_t err_status_lo_data, err_status_lo_offset;
3511 err_status_lo_offset =
3512 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3513 reg_entry->seg_lo, reg_entry->reg_lo);
3514 err_status_lo_data = RREG32(err_status_lo_offset);
3516 if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
3517 !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
3520 *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
3525 bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
3526 const struct amdgpu_ras_err_status_reg_entry *reg_entry,
3528 unsigned long *err_cnt)
3530 uint32_t err_status_hi_data, err_status_hi_offset;
3535 err_status_hi_offset =
3536 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
3537 reg_entry->seg_hi, reg_entry->reg_hi);
3538 err_status_hi_data = RREG32(err_status_hi_offset);
3540 if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
3541 !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
3542 /* keep the check here in case we need to refer to the result later */
3543 dev_dbg(adev->dev, "Invalid err_info field\n");
3545 /* read err count */
3546 *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
3551 void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
3552 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3553 uint32_t reg_list_size,
3554 const struct amdgpu_ras_memory_id_entry *mem_list,
3555 uint32_t mem_list_size,
3558 unsigned long *err_count)
3561 unsigned long err_cnt;
3562 char err_type_name[16];
3565 for (i = 0; i < reg_list_size; i++) {
3566 /* query memory_id from err_status_lo */
3567 if (!amdgpu_ras_inst_get_memory_id_field(adev, ®_list[i],
3568 instance, &memory_id))
3571 /* query err_cnt from err_status_hi */
3572 if (!amdgpu_ras_inst_get_err_cnt_field(adev, ®_list[i],
3573 instance, &err_cnt) ||
3577 *err_count += err_cnt;
3579 /* log the errors */
3580 amdgpu_ras_get_error_type_name(err_type, err_type_name);
3582 /* memory_list is not supported */
3584 "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
3585 err_cnt, err_type_name,
3586 reg_list[i].block_name,
3587 instance, memory_id);
3589 for (j = 0; j < mem_list_size; j++) {
3590 if (memory_id == mem_list[j].memory_id) {
3592 "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
3593 err_cnt, err_type_name,
3594 reg_list[i].block_name,
3595 instance, mem_list[j].name);
3603 void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
3604 const struct amdgpu_ras_err_status_reg_entry *reg_list,
3605 uint32_t reg_list_size,
3608 uint32_t err_status_lo_offset, err_status_hi_offset;
3611 for (i = 0; i < reg_list_size; i++) {
3612 err_status_lo_offset =
3613 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3614 reg_list[i].seg_lo, reg_list[i].reg_lo);
3615 err_status_hi_offset =
3616 AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
3617 reg_list[i].seg_hi, reg_list[i].reg_hi);
3618 WREG32(err_status_lo_offset, 0);
3619 WREG32(err_status_hi_offset, 0);
3623 int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
3625 memset(err_data, 0, sizeof(*err_data));
3627 INIT_LIST_HEAD(&err_data->err_node_list);
3632 static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
3637 list_del(&err_node->node);
3641 void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
3643 struct ras_err_node *err_node, *tmp;
3645 list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
3646 amdgpu_ras_error_node_release(err_node);
3649 static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
3650 struct amdgpu_smuio_mcm_config_info *mcm_info)
3652 struct ras_err_node *err_node;
3653 struct amdgpu_smuio_mcm_config_info *ref_id;
3655 if (!err_data || !mcm_info)
3658 for_each_ras_error(err_node, err_data) {
3659 ref_id = &err_node->err_info.mcm_info;
3661 if (mcm_info->socket_id == ref_id->socket_id &&
3662 mcm_info->die_id == ref_id->die_id)
3669 static struct ras_err_node *amdgpu_ras_error_node_new(void)
3671 struct ras_err_node *err_node;
3673 err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
3677 INIT_LIST_HEAD(&err_node->node);
3682 static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
3684 struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
3685 struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
3686 struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
3687 struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
3689 if (unlikely(infoa->socket_id != infob->socket_id))
3690 return infoa->socket_id - infob->socket_id;
3692 return infoa->die_id - infob->die_id;
3697 static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
3698 struct amdgpu_smuio_mcm_config_info *mcm_info,
3699 struct ras_err_addr *err_addr)
3701 struct ras_err_node *err_node;
3703 err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
3705 return &err_node->err_info;
3707 err_node = amdgpu_ras_error_node_new();
3711 memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
3714 memcpy(&err_node->err_info.err_addr, err_addr, sizeof(*err_addr));
3716 err_data->err_list_count++;
3717 list_add_tail(&err_node->node, &err_data->err_node_list);
3718 list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
3720 return &err_node->err_info;
3723 int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
3724 struct amdgpu_smuio_mcm_config_info *mcm_info,
3725 struct ras_err_addr *err_addr, u64 count)
3727 struct ras_err_info *err_info;
3729 if (!err_data || !mcm_info)
3735 err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
3739 err_info->ue_count += count;
3740 err_data->ue_count += count;
3745 int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
3746 struct amdgpu_smuio_mcm_config_info *mcm_info,
3747 struct ras_err_addr *err_addr, u64 count)
3749 struct ras_err_info *err_info;
3751 if (!err_data || !mcm_info)
3757 err_info = amdgpu_ras_error_get_info(err_data, mcm_info, err_addr);
3761 err_info->ce_count += count;
3762 err_data->ce_count += count;