2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
30 #include <linux/pm_runtime.h>
33 #include "amdgpu_ras.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_xgmi.h"
36 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
38 #include "amdgpu_reset.h"
40 #ifdef CONFIG_X86_MCE_AMD
43 static bool notifier_registered;
45 static const char *RAS_FS_NAME = "ras";
47 const char *ras_error_string[] = {
51 "multi_uncorrectable",
55 const char *ras_block_string[] = {
75 const char *ras_mca_block_string[] = {
82 struct amdgpu_ras_block_list {
84 struct list_head node;
86 struct amdgpu_ras_block_object *ras_obj;
89 const char *get_ras_block_str(struct ras_common_if *ras_block)
94 if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
95 return "OUT OF RANGE";
97 if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
98 return ras_mca_block_string[ras_block->sub_block_index];
100 return ras_block_string[ras_block->block];
103 #define ras_block_str(_BLOCK_) \
104 (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
106 #define ras_err_str(i) (ras_error_string[ffs(i)])
108 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
110 /* inject address is 52 bits */
111 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
113 /* typical ECC bad page rate is 1 bad page per 100MB VRAM */
114 #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
116 enum amdgpu_ras_retire_page_reservation {
117 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
118 AMDGPU_RAS_RETIRE_PAGE_PENDING,
119 AMDGPU_RAS_RETIRE_PAGE_FAULT,
122 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
124 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
126 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
128 #ifdef CONFIG_X86_MCE_AMD
129 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
130 struct mce_notifier_adev_list {
131 struct amdgpu_device *devs[MAX_GPU_INSTANCE];
134 static struct mce_notifier_adev_list mce_adev_list;
137 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
139 if (adev && amdgpu_ras_get_context(adev))
140 amdgpu_ras_get_context(adev)->error_query_ready = ready;
143 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
145 if (adev && amdgpu_ras_get_context(adev))
146 return amdgpu_ras_get_context(adev)->error_query_ready;
151 static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
153 struct ras_err_data err_data = {0, 0, 0, NULL};
154 struct eeprom_table_record err_rec;
156 if ((address >= adev->gmc.mc_vram_size) ||
157 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
159 "RAS WARN: input address 0x%llx is invalid.\n",
164 if (amdgpu_ras_check_bad_page(adev, address)) {
166 "RAS WARN: 0x%llx has already been marked as bad page!\n",
171 memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
172 err_data.err_addr = &err_rec;
173 amdgpu_umc_fill_error_record(&err_data, address,
174 (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0);
176 if (amdgpu_bad_page_threshold != 0) {
177 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
178 err_data.err_addr_cnt);
179 amdgpu_ras_save_bad_pages(adev);
182 dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
183 dev_warn(adev->dev, "Clear EEPROM:\n");
184 dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
189 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
190 size_t size, loff_t *pos)
192 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
193 struct ras_query_if info = {
199 if (amdgpu_ras_query_error_status(obj->adev, &info))
202 /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
203 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
204 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
205 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
206 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
209 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
211 "ce", info.ce_count);
216 s = min_t(u64, s, size);
219 if (copy_to_user(buf, &val[*pos], s))
227 static const struct file_operations amdgpu_ras_debugfs_ops = {
228 .owner = THIS_MODULE,
229 .read = amdgpu_ras_debugfs_read,
231 .llseek = default_llseek
234 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
238 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
240 if (strcmp(name, ras_block_string[i]) == 0)
246 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
247 const char __user *buf, size_t size,
248 loff_t *pos, struct ras_debug_if *data)
250 ssize_t s = min_t(u64, 64, size);
263 memset(str, 0, sizeof(str));
264 memset(data, 0, sizeof(*data));
266 if (copy_from_user(str, buf, s))
269 if (sscanf(str, "disable %32s", block_name) == 1)
271 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
273 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
275 else if (strstr(str, "retire_page") != NULL)
277 else if (str[0] && str[1] && str[2] && str[3])
278 /* ascii string, but commands are not matched. */
283 if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
284 sscanf(str, "%*s %llu", &address) != 1)
288 data->inject.address = address;
293 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
296 data->head.block = block_id;
297 /* only ue and ce errors are supported */
298 if (!memcmp("ue", err, 2))
299 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
300 else if (!memcmp("ce", err, 2))
301 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
308 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
309 &sub_block, &address, &value) != 3 &&
310 sscanf(str, "%*s %*s %*s %u %llu %llu",
311 &sub_block, &address, &value) != 3)
313 data->head.sub_block_index = sub_block;
314 data->inject.address = address;
315 data->inject.value = value;
318 if (size < sizeof(*data))
321 if (copy_from_user(data, buf, sizeof(*data)))
329 * DOC: AMDGPU RAS debugfs control interface
331 * The control interface accepts struct ras_debug_if which has two members.
333 * First member: ras_debug_if::head or ras_debug_if::inject.
335 * head is used to indicate which IP block will be under control.
337 * head has four members, they are block, type, sub_block_index, name.
338 * block: which IP will be under control.
339 * type: what kind of error will be enabled/disabled/injected.
340 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
341 * name: the name of IP.
343 * inject has two more members than head, they are address, value.
344 * As their names indicate, inject operation will write the
345 * value to the address.
347 * The second member: struct ras_debug_if::op.
348 * It has three kinds of operations.
350 * - 0: disable RAS on the block. Take ::head as its data.
351 * - 1: enable RAS on the block. Take ::head as its data.
352 * - 2: inject errors on the block. Take ::inject as its data.
354 * How to use the interface?
358 * Copy the struct ras_debug_if in your code and initialize it.
359 * Write the struct to the control interface.
363 * .. code-block:: bash
365 * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
366 * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
367 * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
369 * Where N, is the card which you want to affect.
371 * "disable" requires only the block.
372 * "enable" requires the block and error type.
373 * "inject" requires the block, error type, address, and value.
375 * The block is one of: umc, sdma, gfx, etc.
376 * see ras_block_string[] for details
378 * The error type is one of: ue, ce, where,
379 * ue is multi-uncorrectable
380 * ce is single-correctable
382 * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
383 * The address and value are hexadecimal numbers, leading 0x is optional.
387 * .. code-block:: bash
389 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
390 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
391 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
393 * How to check the result of the operation?
395 * To check disable/enable, see "ras" features at,
396 * /sys/class/drm/card[0/1/2...]/device/ras/features
398 * To check inject, see the corresponding error count at,
399 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
402 * Operations are only allowed on blocks which are supported.
403 * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
404 * to see which blocks support RAS on a particular asic.
407 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
408 const char __user *buf,
409 size_t size, loff_t *pos)
411 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
412 struct ras_debug_if data;
415 if (!amdgpu_ras_get_error_query_ready(adev)) {
416 dev_warn(adev->dev, "RAS WARN: error injection "
417 "currently inaccessible\n");
421 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
426 ret = amdgpu_reserve_page_direct(adev, data.inject.address);
433 if (!amdgpu_ras_is_supported(adev, data.head.block))
438 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
441 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
444 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
445 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
446 dev_warn(adev->dev, "RAS WARN: input address "
447 "0x%llx is invalid.",
448 data.inject.address);
453 /* umc ce/ue error injection for a bad page is not allowed */
454 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
455 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
456 dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
457 "already been marked as bad!\n",
458 data.inject.address);
462 /* data.inject.address is offset instead of absolute gpu address */
463 ret = amdgpu_ras_error_inject(adev, &data.inject);
477 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
479 * Some boards contain an EEPROM which is used to persistently store a list of
480 * bad pages which experiences ECC errors in vram. This interface provides
481 * a way to reset the EEPROM, e.g., after testing error injection.
485 * .. code-block:: bash
487 * echo 1 > ../ras/ras_eeprom_reset
489 * will reset EEPROM table to 0 entries.
492 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
493 const char __user *buf,
494 size_t size, loff_t *pos)
496 struct amdgpu_device *adev =
497 (struct amdgpu_device *)file_inode(f)->i_private;
500 ret = amdgpu_ras_eeprom_reset_table(
501 &(amdgpu_ras_get_context(adev)->eeprom_control));
504 /* Something was written to EEPROM.
506 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
513 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
514 .owner = THIS_MODULE,
516 .write = amdgpu_ras_debugfs_ctrl_write,
517 .llseek = default_llseek
520 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
521 .owner = THIS_MODULE,
523 .write = amdgpu_ras_debugfs_eeprom_write,
524 .llseek = default_llseek
528 * DOC: AMDGPU RAS sysfs Error Count Interface
530 * It allows the user to read the error count for each IP block on the gpu through
531 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
533 * It outputs the multiple lines which report the uncorrected (ue) and corrected
536 * The format of one line is below,
542 * .. code-block:: bash
548 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
549 struct device_attribute *attr, char *buf)
551 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
552 struct ras_query_if info = {
556 if (!amdgpu_ras_get_error_query_ready(obj->adev))
557 return sysfs_emit(buf, "Query currently inaccessible\n");
559 if (amdgpu_ras_query_error_status(obj->adev, &info))
562 if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
563 obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
564 if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
565 dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
568 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
569 "ce", info.ce_count);
574 #define get_obj(obj) do { (obj)->use++; } while (0)
575 #define alive_obj(obj) ((obj)->use)
577 static inline void put_obj(struct ras_manager *obj)
579 if (obj && (--obj->use == 0))
580 list_del(&obj->node);
581 if (obj && (obj->use < 0))
582 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
585 /* make one obj and return it. */
586 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
587 struct ras_common_if *head)
589 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
590 struct ras_manager *obj;
592 if (!adev->ras_enabled || !con)
595 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
598 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
599 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
602 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
604 obj = &con->objs[head->block];
606 /* already exist. return obj? */
612 list_add(&obj->node, &con->head);
618 /* return an obj equal to head, or the first when head is NULL */
619 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
620 struct ras_common_if *head)
622 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
623 struct ras_manager *obj;
626 if (!adev->ras_enabled || !con)
630 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
633 if (head->block == AMDGPU_RAS_BLOCK__MCA) {
634 if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
637 obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
639 obj = &con->objs[head->block];
644 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
655 /* feature ctl begin */
656 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
657 struct ras_common_if *head)
659 return adev->ras_hw_enabled & BIT(head->block);
662 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
663 struct ras_common_if *head)
665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
667 return con->features & BIT(head->block);
671 * if obj is not created, then create one.
672 * set feature enable flag.
674 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
675 struct ras_common_if *head, int enable)
677 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
678 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
680 /* If hardware does not support ras, then do not create obj.
681 * But if hardware support ras, we can create the obj.
682 * Ras framework checks con->hw_supported to see if it need do
683 * corresponding initialization.
684 * IP checks con->support to see if it need disable ras.
686 if (!amdgpu_ras_is_feature_allowed(adev, head))
691 obj = amdgpu_ras_create_obj(adev, head);
695 /* In case we create obj somewhere else */
698 con->features |= BIT(head->block);
700 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
701 con->features &= ~BIT(head->block);
709 /* wrapper of psp_ras_enable_features */
710 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
711 struct ras_common_if *head, bool enable)
713 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
714 union ta_ras_cmd_input *info;
720 if (head->block == AMDGPU_RAS_BLOCK__GFX) {
721 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
726 info->disable_features = (struct ta_ras_disable_features_input) {
727 .block_id = amdgpu_ras_block_to_ta(head->block),
728 .error_type = amdgpu_ras_error_to_ta(head->type),
731 info->enable_features = (struct ta_ras_enable_features_input) {
732 .block_id = amdgpu_ras_block_to_ta(head->block),
733 .error_type = amdgpu_ras_error_to_ta(head->type),
738 /* Do not enable if it is not allowed. */
739 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
741 /* Only enable ras feature operation handle on host side */
742 if (head->block == AMDGPU_RAS_BLOCK__GFX &&
743 !amdgpu_sriov_vf(adev) &&
744 !amdgpu_ras_intr_triggered()) {
745 ret = psp_ras_enable_features(&adev->psp, info, enable);
747 dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
748 enable ? "enable":"disable",
749 get_ras_block_str(head),
750 amdgpu_ras_is_poison_mode_supported(adev), ret);
756 __amdgpu_ras_feature_enable(adev, head, enable);
759 if (head->block == AMDGPU_RAS_BLOCK__GFX)
764 /* Only used in device probe stage and called only once. */
765 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
766 struct ras_common_if *head, bool enable)
768 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
774 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
776 /* There is no harm to issue a ras TA cmd regardless of
777 * the currecnt ras state.
778 * If current state == target state, it will do nothing
779 * But sometimes it requests driver to reset and repost
780 * with error code -EAGAIN.
782 ret = amdgpu_ras_feature_enable(adev, head, 1);
783 /* With old ras TA, we might fail to enable ras.
784 * Log it and just setup the object.
785 * TODO need remove this WA in the future.
787 if (ret == -EINVAL) {
788 ret = __amdgpu_ras_feature_enable(adev, head, 1);
791 "RAS INFO: %s setup object\n",
792 get_ras_block_str(head));
795 /* setup the object then issue a ras TA disable cmd.*/
796 ret = __amdgpu_ras_feature_enable(adev, head, 1);
800 /* gfx block ras dsiable cmd must send to ras-ta */
801 if (head->block == AMDGPU_RAS_BLOCK__GFX)
802 con->features |= BIT(head->block);
804 ret = amdgpu_ras_feature_enable(adev, head, 0);
806 /* clean gfx block ras features flag */
807 if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
808 con->features &= ~BIT(head->block);
811 ret = amdgpu_ras_feature_enable(adev, head, enable);
816 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
819 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
820 struct ras_manager *obj, *tmp;
822 list_for_each_entry_safe(obj, tmp, &con->head, node) {
824 * aka just release the obj and corresponding flags
827 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
830 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
835 return con->features;
838 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
841 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
843 const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
845 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
846 struct ras_common_if head = {
848 .type = default_ras_type,
849 .sub_block_index = 0,
852 if (i == AMDGPU_RAS_BLOCK__MCA)
857 * bypass psp. vbios enable ras for us.
858 * so just create the obj
860 if (__amdgpu_ras_feature_enable(adev, &head, 1))
863 if (amdgpu_ras_feature_enable(adev, &head, 1))
868 for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
869 struct ras_common_if head = {
870 .block = AMDGPU_RAS_BLOCK__MCA,
871 .type = default_ras_type,
872 .sub_block_index = i,
877 * bypass psp. vbios enable ras for us.
878 * so just create the obj
880 if (__amdgpu_ras_feature_enable(adev, &head, 1))
883 if (amdgpu_ras_feature_enable(adev, &head, 1))
888 return con->features;
890 /* feature ctl end */
892 static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
893 enum amdgpu_ras_block block)
898 if (block_obj->ras_comm.block == block)
904 static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
905 enum amdgpu_ras_block block, uint32_t sub_block_index)
907 struct amdgpu_ras_block_list *node, *tmp;
908 struct amdgpu_ras_block_object *obj;
910 if (block >= AMDGPU_RAS_BLOCK__LAST)
913 if (!amdgpu_ras_is_supported(adev, block))
916 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
917 if (!node->ras_obj) {
918 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
923 if (obj->ras_block_match) {
924 if (obj->ras_block_match(obj, block, sub_block_index) == 0)
927 if (amdgpu_ras_block_match_default(obj, block) == 0)
935 static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
937 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
941 * choosing right query method according to
942 * whether smu support query error information
944 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
945 if (ret == -EOPNOTSUPP) {
946 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
947 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
948 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
950 /* umc query_ras_error_address is also responsible for clearing
953 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
954 adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
955 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
958 adev->umc.ras->ecc_info_query_ras_error_count)
959 adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
962 adev->umc.ras->ecc_info_query_ras_error_address)
963 adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
967 /* query/inject/cure begin */
968 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
969 struct ras_query_if *info)
971 struct amdgpu_ras_block_object *block_obj = NULL;
972 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
973 struct ras_err_data err_data = {0, 0, 0, NULL};
978 if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
979 amdgpu_ras_get_ecc_info(adev, &err_data);
981 block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
982 if (!block_obj || !block_obj->hw_ops) {
983 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
984 get_ras_block_str(&info->head));
988 if (block_obj->hw_ops->query_ras_error_count)
989 block_obj->hw_ops->query_ras_error_count(adev, &err_data);
991 if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
992 (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
993 (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
994 if (block_obj->hw_ops->query_ras_error_status)
995 block_obj->hw_ops->query_ras_error_status(adev);
999 obj->err_data.ue_count += err_data.ue_count;
1000 obj->err_data.ce_count += err_data.ce_count;
1002 info->ue_count = obj->err_data.ue_count;
1003 info->ce_count = obj->err_data.ce_count;
1005 if (err_data.ce_count) {
1006 if (adev->smuio.funcs &&
1007 adev->smuio.funcs->get_socket_id &&
1008 adev->smuio.funcs->get_die_id) {
1009 dev_info(adev->dev, "socket: %d, die: %d "
1010 "%ld correctable hardware errors "
1011 "detected in %s block, no user "
1012 "action is needed.\n",
1013 adev->smuio.funcs->get_socket_id(adev),
1014 adev->smuio.funcs->get_die_id(adev),
1015 obj->err_data.ce_count,
1016 get_ras_block_str(&info->head));
1018 dev_info(adev->dev, "%ld correctable hardware errors "
1019 "detected in %s block, no user "
1020 "action is needed.\n",
1021 obj->err_data.ce_count,
1022 get_ras_block_str(&info->head));
1025 if (err_data.ue_count) {
1026 if (adev->smuio.funcs &&
1027 adev->smuio.funcs->get_socket_id &&
1028 adev->smuio.funcs->get_die_id) {
1029 dev_info(adev->dev, "socket: %d, die: %d "
1030 "%ld uncorrectable hardware errors "
1031 "detected in %s block\n",
1032 adev->smuio.funcs->get_socket_id(adev),
1033 adev->smuio.funcs->get_die_id(adev),
1034 obj->err_data.ue_count,
1035 get_ras_block_str(&info->head));
1037 dev_info(adev->dev, "%ld uncorrectable hardware errors "
1038 "detected in %s block\n",
1039 obj->err_data.ue_count,
1040 get_ras_block_str(&info->head));
1047 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
1048 enum amdgpu_ras_block block)
1050 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
1052 if (!amdgpu_ras_is_supported(adev, block))
1055 if (!block_obj || !block_obj->hw_ops) {
1056 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1057 ras_block_str(block));
1061 if (block_obj->hw_ops->reset_ras_error_count)
1062 block_obj->hw_ops->reset_ras_error_count(adev);
1064 if ((block == AMDGPU_RAS_BLOCK__GFX) ||
1065 (block == AMDGPU_RAS_BLOCK__MMHUB)) {
1066 if (block_obj->hw_ops->reset_ras_error_status)
1067 block_obj->hw_ops->reset_ras_error_status(adev);
1073 /* wrapper of psp_ras_trigger_error */
1074 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
1075 struct ras_inject_if *info)
1077 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1078 struct ta_ras_trigger_error_input block_info = {
1079 .block_id = amdgpu_ras_block_to_ta(info->head.block),
1080 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
1081 .sub_block_index = info->head.sub_block_index,
1082 .address = info->address,
1083 .value = info->value,
1086 struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
1088 info->head.sub_block_index);
1093 if (!block_obj || !block_obj->hw_ops) {
1094 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1095 get_ras_block_str(&info->head));
1099 /* Calculate XGMI relative offset */
1100 if (adev->gmc.xgmi.num_physical_nodes > 1) {
1101 block_info.address =
1102 amdgpu_xgmi_get_relative_phy_addr(adev,
1103 block_info.address);
1106 if (info->head.block == AMDGPU_RAS_BLOCK__GFX) {
1107 if (block_obj->hw_ops->ras_error_inject)
1108 ret = block_obj->hw_ops->ras_error_inject(adev, info);
1110 /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */
1111 if (block_obj->hw_ops->ras_error_inject)
1112 ret = block_obj->hw_ops->ras_error_inject(adev, &block_info);
1113 else /*If not defined .ras_error_inject, use default ras_error_inject*/
1114 ret = psp_ras_trigger_error(&adev->psp, &block_info);
1118 dev_err(adev->dev, "ras inject %s failed %d\n",
1119 get_ras_block_str(&info->head), ret);
1125 * amdgpu_ras_query_error_count -- Get error counts of all IPs
1126 * @adev: pointer to AMD GPU device
1127 * @ce_count: pointer to an integer to be set to the count of correctible errors.
1128 * @ue_count: pointer to an integer to be set to the count of uncorrectible
1131 * If set, @ce_count or @ue_count, count and return the corresponding
1132 * error counts in those integer pointers. Return 0 if the device
1133 * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
1135 int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
1136 unsigned long *ce_count,
1137 unsigned long *ue_count)
1139 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1140 struct ras_manager *obj;
1141 unsigned long ce, ue;
1143 if (!adev->ras_enabled || !con)
1146 /* Don't count since no reporting.
1148 if (!ce_count && !ue_count)
1153 list_for_each_entry(obj, &con->head, node) {
1154 struct ras_query_if info = {
1159 res = amdgpu_ras_query_error_status(adev, &info);
1163 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1164 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
1165 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1166 dev_warn(adev->dev, "Failed to reset error counter and error status");
1169 ce += info.ce_count;
1170 ue += info.ue_count;
1181 /* query/inject/cure end */
1186 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1187 struct ras_badpage **bps, unsigned int *count);
1189 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
1192 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
1194 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
1196 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1203 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1205 * It allows user to read the bad pages of vram on the gpu through
1206 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1208 * It outputs multiple lines, and each line stands for one gpu page.
1210 * The format of one line is below,
1211 * gpu pfn : gpu page size : flags
1213 * gpu pfn and gpu page size are printed in hex format.
1214 * flags can be one of below character,
1216 * R: reserved, this gpu page is reserved and not able to use.
1218 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1219 * in next window of page_reserve.
1221 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1225 * .. code-block:: bash
1227 * 0x00000001 : 0x00001000 : R
1228 * 0x00000002 : 0x00001000 : P
1232 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1233 struct kobject *kobj, struct bin_attribute *attr,
1234 char *buf, loff_t ppos, size_t count)
1236 struct amdgpu_ras *con =
1237 container_of(attr, struct amdgpu_ras, badpages_attr);
1238 struct amdgpu_device *adev = con->adev;
1239 const unsigned int element_size =
1240 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1241 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1242 unsigned int end = div64_ul(ppos + count - 1, element_size);
1244 struct ras_badpage *bps = NULL;
1245 unsigned int bps_count = 0;
1247 memset(buf, 0, count);
1249 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1252 for (; start < end && start < bps_count; start++)
1253 s += scnprintf(&buf[s], element_size + 1,
1254 "0x%08x : 0x%08x : %1s\n",
1257 amdgpu_ras_badpage_flags_str(bps[start].flags));
1264 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1265 struct device_attribute *attr, char *buf)
1267 struct amdgpu_ras *con =
1268 container_of(attr, struct amdgpu_ras, features_attr);
1270 return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
1273 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1275 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1277 sysfs_remove_file_from_group(&adev->dev->kobj,
1278 &con->badpages_attr.attr,
1282 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1284 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1285 struct attribute *attrs[] = {
1286 &con->features_attr.attr,
1289 struct attribute_group group = {
1290 .name = RAS_FS_NAME,
1294 sysfs_remove_group(&adev->dev->kobj, &group);
1299 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1300 struct ras_common_if *head)
1302 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1304 if (!obj || obj->attr_inuse)
1309 snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
1310 "%s_err_count", head->name);
1312 obj->sysfs_attr = (struct device_attribute){
1314 .name = obj->fs_data.sysfs_name,
1317 .show = amdgpu_ras_sysfs_read,
1319 sysfs_attr_init(&obj->sysfs_attr.attr);
1321 if (sysfs_add_file_to_group(&adev->dev->kobj,
1322 &obj->sysfs_attr.attr,
1328 obj->attr_inuse = 1;
1333 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1334 struct ras_common_if *head)
1336 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1338 if (!obj || !obj->attr_inuse)
1341 sysfs_remove_file_from_group(&adev->dev->kobj,
1342 &obj->sysfs_attr.attr,
1344 obj->attr_inuse = 0;
1350 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1352 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1353 struct ras_manager *obj, *tmp;
1355 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1356 amdgpu_ras_sysfs_remove(adev, &obj->head);
1359 if (amdgpu_bad_page_threshold != 0)
1360 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1362 amdgpu_ras_sysfs_remove_feature_node(adev);
1369 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1371 * Normally when there is an uncorrectable error, the driver will reset
1372 * the GPU to recover. However, in the event of an unrecoverable error,
1373 * the driver provides an interface to reboot the system automatically
1376 * The following file in debugfs provides that interface:
1377 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1381 * .. code-block:: bash
1383 * echo true > .../ras/auto_reboot
1387 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1389 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1390 struct drm_minor *minor = adev_to_drm(adev)->primary;
1393 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1394 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1395 &amdgpu_ras_debugfs_ctrl_ops);
1396 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1397 &amdgpu_ras_debugfs_eeprom_ops);
1398 debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
1399 &con->bad_page_cnt_threshold);
1400 debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
1401 debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
1402 debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
1403 &amdgpu_ras_debugfs_eeprom_size_ops);
1404 con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
1406 &amdgpu_ras_debugfs_eeprom_table_ops);
1407 amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
1410 * After one uncorrectable error happens, usually GPU recovery will
1411 * be scheduled. But due to the known problem in GPU recovery failing
1412 * to bring GPU back, below interface provides one direct way to
1413 * user to reboot system automatically in such case within
1414 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1415 * will never be called.
1417 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1420 * User could set this not to clean up hardware's error count register
1421 * of RAS IPs during ras recovery.
1423 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1424 &con->disable_ras_err_cnt_harvest);
1428 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1429 struct ras_fs_if *head,
1432 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1439 memcpy(obj->fs_data.debugfs_name,
1441 sizeof(obj->fs_data.debugfs_name));
1443 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1444 obj, &amdgpu_ras_debugfs_ops);
1447 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1449 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1451 struct ras_manager *obj;
1452 struct ras_fs_if fs_info;
1455 * it won't be called in resume path, no need to check
1456 * suspend and gpu reset status
1458 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1461 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1463 list_for_each_entry(obj, &con->head, node) {
1464 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1465 (obj->attr_inuse == 1)) {
1466 sprintf(fs_info.debugfs_name, "%s_err_inject",
1467 get_ras_block_str(&obj->head));
1468 fs_info.head = obj->head;
1469 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1477 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1478 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1479 static DEVICE_ATTR(features, S_IRUGO,
1480 amdgpu_ras_sysfs_features_read, NULL);
1481 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1483 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1484 struct attribute_group group = {
1485 .name = RAS_FS_NAME,
1487 struct attribute *attrs[] = {
1488 &con->features_attr.attr,
1491 struct bin_attribute *bin_attrs[] = {
1497 /* add features entry */
1498 con->features_attr = dev_attr_features;
1499 group.attrs = attrs;
1500 sysfs_attr_init(attrs[0]);
1502 if (amdgpu_bad_page_threshold != 0) {
1503 /* add bad_page_features entry */
1504 bin_attr_gpu_vram_bad_pages.private = NULL;
1505 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1506 bin_attrs[0] = &con->badpages_attr;
1507 group.bin_attrs = bin_attrs;
1508 sysfs_bin_attr_init(bin_attrs[0]);
1511 r = sysfs_create_group(&adev->dev->kobj, &group);
1513 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1518 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1520 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1521 struct ras_manager *con_obj, *ip_obj, *tmp;
1523 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1524 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1525 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1531 amdgpu_ras_sysfs_remove_all(adev);
1538 /* For the hardware that cannot enable bif ring for both ras_controller_irq
1539 * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
1540 * register to check whether the interrupt is triggered or not, and properly
1541 * ack the interrupt if it is there
1543 void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
1545 /* Fatal error events are handled on host side */
1546 if (amdgpu_sriov_vf(adev) ||
1547 !amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF))
1550 if (adev->nbio.ras &&
1551 adev->nbio.ras->handle_ras_controller_intr_no_bifring)
1552 adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
1554 if (adev->nbio.ras &&
1555 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
1556 adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
1559 static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
1560 struct amdgpu_iv_entry *entry)
1562 bool poison_stat = false;
1563 struct amdgpu_device *adev = obj->adev;
1564 struct amdgpu_ras_block_object *block_obj =
1565 amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
1567 if (!block_obj || !block_obj->hw_ops)
1570 /* both query_poison_status and handle_poison_consumption are optional,
1571 * but at least one of them should be implemented if we need poison
1572 * consumption handler
1574 if (block_obj->hw_ops->query_poison_status) {
1575 poison_stat = block_obj->hw_ops->query_poison_status(adev);
1577 /* Not poison consumption interrupt, no need to handle it */
1578 dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
1579 block_obj->ras_comm.name);
1585 if (!adev->gmc.xgmi.connected_to_cpu)
1586 amdgpu_umc_poison_handler(adev, false);
1588 if (block_obj->hw_ops->handle_poison_consumption)
1589 poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
1591 /* gpu reset is fallback for failed and default cases */
1593 dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
1594 block_obj->ras_comm.name);
1595 amdgpu_ras_reset_gpu(adev);
1599 static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
1600 struct amdgpu_iv_entry *entry)
1602 dev_info(obj->adev->dev,
1603 "Poison is created, no user action is needed.\n");
1606 static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
1607 struct amdgpu_iv_entry *entry)
1609 struct ras_ih_data *data = &obj->ih_data;
1610 struct ras_err_data err_data = {0, 0, 0, NULL};
1616 /* Let IP handle its data, maybe we need get the output
1617 * from the callback to update the error type/count, etc
1619 ret = data->cb(obj->adev, &err_data, entry);
1620 /* ue will trigger an interrupt, and in that case
1621 * we need do a reset to recovery the whole system.
1622 * But leave IP do that recovery, here we just dispatch
1625 if (ret == AMDGPU_RAS_SUCCESS) {
1626 /* these counts could be left as 0 if
1627 * some blocks do not count error number
1629 obj->err_data.ue_count += err_data.ue_count;
1630 obj->err_data.ce_count += err_data.ce_count;
1634 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1636 struct ras_ih_data *data = &obj->ih_data;
1637 struct amdgpu_iv_entry entry;
1639 while (data->rptr != data->wptr) {
1641 memcpy(&entry, &data->ring[data->rptr],
1642 data->element_size);
1645 data->rptr = (data->aligned_element_size +
1646 data->rptr) % data->ring_size;
1648 if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
1649 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1650 amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
1652 amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
1654 if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
1655 amdgpu_ras_interrupt_umc_handler(obj, &entry);
1657 dev_warn(obj->adev->dev,
1658 "No RAS interrupt handler for non-UMC block with poison disabled.\n");
1663 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1665 struct ras_ih_data *data =
1666 container_of(work, struct ras_ih_data, ih_work);
1667 struct ras_manager *obj =
1668 container_of(data, struct ras_manager, ih_data);
1670 amdgpu_ras_interrupt_handler(obj);
1673 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1674 struct ras_dispatch_if *info)
1676 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1677 struct ras_ih_data *data = &obj->ih_data;
1682 if (data->inuse == 0)
1685 /* Might be overflow... */
1686 memcpy(&data->ring[data->wptr], info->entry,
1687 data->element_size);
1690 data->wptr = (data->aligned_element_size +
1691 data->wptr) % data->ring_size;
1693 schedule_work(&data->ih_work);
1698 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1699 struct ras_common_if *head)
1701 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1702 struct ras_ih_data *data;
1707 data = &obj->ih_data;
1708 if (data->inuse == 0)
1711 cancel_work_sync(&data->ih_work);
1714 memset(data, 0, sizeof(*data));
1720 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1721 struct ras_common_if *head)
1723 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1724 struct ras_ih_data *data;
1725 struct amdgpu_ras_block_object *ras_obj;
1728 /* in case we registe the IH before enable ras feature */
1729 obj = amdgpu_ras_create_obj(adev, head);
1735 ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
1737 data = &obj->ih_data;
1738 /* add the callback.etc */
1739 *data = (struct ras_ih_data) {
1741 .cb = ras_obj->ras_cb,
1742 .element_size = sizeof(struct amdgpu_iv_entry),
1747 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1749 data->aligned_element_size = ALIGN(data->element_size, 8);
1750 /* the ring can store 64 iv entries. */
1751 data->ring_size = 64 * data->aligned_element_size;
1752 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1764 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1766 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1767 struct ras_manager *obj, *tmp;
1769 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1770 amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
1777 /* traversal all IPs except NBIO to query error counter */
1778 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1780 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1781 struct ras_manager *obj;
1783 if (!adev->ras_enabled || !con)
1786 list_for_each_entry(obj, &con->head, node) {
1787 struct ras_query_if info = {
1792 * PCIE_BIF IP has one different isr by ras controller
1793 * interrupt, the specific ras counter query will be
1794 * done in that isr. So skip such block from common
1795 * sync flood interrupt isr calling.
1797 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1801 * this is a workaround for aldebaran, skip send msg to
1802 * smu to get ecc_info table due to smu handle get ecc
1803 * info table failed temporarily.
1804 * should be removed until smu fix handle ecc_info table.
1806 if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
1807 (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))
1810 amdgpu_ras_query_error_status(adev, &info);
1812 if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
1813 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
1814 adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
1815 if (amdgpu_ras_reset_error_status(adev, info.head.block))
1816 dev_warn(adev->dev, "Failed to reset error counter and error status");
1821 /* Parse RdRspStatus and WrRspStatus */
1822 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1823 struct ras_query_if *info)
1825 struct amdgpu_ras_block_object *block_obj;
1827 * Only two block need to query read/write
1828 * RspStatus at current state
1830 if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
1831 (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
1834 block_obj = amdgpu_ras_get_ras_block(adev,
1836 info->head.sub_block_index);
1838 if (!block_obj || !block_obj->hw_ops) {
1839 dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
1840 get_ras_block_str(&info->head));
1844 if (block_obj->hw_ops->query_ras_error_status)
1845 block_obj->hw_ops->query_ras_error_status(adev);
1849 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1851 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1852 struct ras_manager *obj;
1854 if (!adev->ras_enabled || !con)
1857 list_for_each_entry(obj, &con->head, node) {
1858 struct ras_query_if info = {
1862 amdgpu_ras_error_status_query(adev, &info);
1866 /* recovery begin */
1868 /* return 0 on success.
1869 * caller need free bps.
1871 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1872 struct ras_badpage **bps, unsigned int *count)
1874 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1875 struct ras_err_handler_data *data;
1877 int ret = 0, status;
1879 if (!con || !con->eh_data || !bps || !count)
1882 mutex_lock(&con->recovery_lock);
1883 data = con->eh_data;
1884 if (!data || data->count == 0) {
1890 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1896 for (; i < data->count; i++) {
1897 (*bps)[i] = (struct ras_badpage){
1898 .bp = data->bps[i].retired_page,
1899 .size = AMDGPU_GPU_PAGE_SIZE,
1900 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1902 status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
1903 data->bps[i].retired_page);
1904 if (status == -EBUSY)
1905 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1906 else if (status == -ENOENT)
1907 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1910 *count = data->count;
1912 mutex_unlock(&con->recovery_lock);
1916 static void amdgpu_ras_do_recovery(struct work_struct *work)
1918 struct amdgpu_ras *ras =
1919 container_of(work, struct amdgpu_ras, recovery_work);
1920 struct amdgpu_device *remote_adev = NULL;
1921 struct amdgpu_device *adev = ras->adev;
1922 struct list_head device_list, *device_list_handle = NULL;
1924 if (!ras->disable_ras_err_cnt_harvest) {
1925 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1927 /* Build list of devices to query RAS related errors */
1928 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1929 device_list_handle = &hive->device_list;
1931 INIT_LIST_HEAD(&device_list);
1932 list_add_tail(&adev->gmc.xgmi.head, &device_list);
1933 device_list_handle = &device_list;
1936 list_for_each_entry(remote_adev,
1937 device_list_handle, gmc.xgmi.head) {
1938 amdgpu_ras_query_err_status(remote_adev);
1939 amdgpu_ras_log_on_err_counter(remote_adev);
1942 amdgpu_put_xgmi_hive(hive);
1945 if (amdgpu_device_should_recover_gpu(ras->adev)) {
1946 struct amdgpu_reset_context reset_context;
1947 memset(&reset_context, 0, sizeof(reset_context));
1949 reset_context.method = AMD_RESET_METHOD_NONE;
1950 reset_context.reset_req_dev = adev;
1952 /* Perform full reset in fatal error mode */
1953 if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
1954 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1956 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
1958 amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
1960 atomic_set(&ras->in_recovery, 0);
1963 /* alloc/realloc bps array */
1964 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1965 struct ras_err_handler_data *data, int pages)
1967 unsigned int old_space = data->count + data->space_left;
1968 unsigned int new_space = old_space + pages;
1969 unsigned int align_space = ALIGN(new_space, 512);
1970 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1977 memcpy(bps, data->bps,
1978 data->count * sizeof(*data->bps));
1983 data->space_left += align_space - old_space;
1987 /* it deal with vram only. */
1988 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1989 struct eeprom_table_record *bps, int pages)
1991 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1992 struct ras_err_handler_data *data;
1996 if (!con || !con->eh_data || !bps || pages <= 0)
1999 mutex_lock(&con->recovery_lock);
2000 data = con->eh_data;
2004 for (i = 0; i < pages; i++) {
2005 if (amdgpu_ras_check_bad_page_unlock(con,
2006 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
2009 if (!data->space_left &&
2010 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
2015 amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
2016 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
2017 AMDGPU_GPU_PAGE_SIZE);
2019 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
2024 mutex_unlock(&con->recovery_lock);
2030 * write error record array to eeprom, the function should be
2031 * protected by recovery_lock
2033 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
2035 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2036 struct ras_err_handler_data *data;
2037 struct amdgpu_ras_eeprom_control *control;
2040 if (!con || !con->eh_data)
2043 mutex_lock(&con->recovery_lock);
2044 control = &con->eeprom_control;
2045 data = con->eh_data;
2046 save_count = data->count - control->ras_num_recs;
2047 mutex_unlock(&con->recovery_lock);
2048 /* only new entries are saved */
2049 if (save_count > 0) {
2050 if (amdgpu_ras_eeprom_append(control,
2051 &data->bps[control->ras_num_recs],
2053 dev_err(adev->dev, "Failed to save EEPROM table data!");
2057 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
2064 * read error record array in eeprom and reserve enough space for
2065 * storing new bad pages
2067 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
2069 struct amdgpu_ras_eeprom_control *control =
2070 &adev->psp.ras_context.ras->eeprom_control;
2071 struct eeprom_table_record *bps;
2074 /* no bad page record, skip eeprom access */
2075 if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
2078 bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
2082 ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
2084 dev_err(adev->dev, "Failed to load EEPROM table records!");
2086 ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs);
2092 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
2095 struct ras_err_handler_data *data = con->eh_data;
2098 addr >>= AMDGPU_GPU_PAGE_SHIFT;
2099 for (i = 0; i < data->count; i++)
2100 if (addr == data->bps[i].retired_page)
2107 * check if an address belongs to bad page
2109 * Note: this check is only for umc block
2111 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
2114 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2117 if (!con || !con->eh_data)
2120 mutex_lock(&con->recovery_lock);
2121 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
2122 mutex_unlock(&con->recovery_lock);
2126 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
2129 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2132 * Justification of value bad_page_cnt_threshold in ras structure
2134 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
2135 * in eeprom, and introduce two scenarios accordingly.
2137 * Bad page retirement enablement:
2138 * - If amdgpu_bad_page_threshold = -1,
2139 * bad_page_cnt_threshold = typical value by formula.
2141 * - When the value from user is 0 < amdgpu_bad_page_threshold <
2142 * max record length in eeprom, use it directly.
2144 * Bad page retirement disablement:
2145 * - If amdgpu_bad_page_threshold = 0, bad page retirement
2146 * functionality is disabled, and bad_page_cnt_threshold will
2150 if (amdgpu_bad_page_threshold < 0) {
2151 u64 val = adev->gmc.mc_vram_size;
2153 do_div(val, RAS_BAD_PAGE_COVER);
2154 con->bad_page_cnt_threshold = min(lower_32_bits(val),
2157 con->bad_page_cnt_threshold = min_t(int, max_count,
2158 amdgpu_bad_page_threshold);
2162 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
2164 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2165 struct ras_err_handler_data **data;
2166 u32 max_eeprom_records_count = 0;
2167 bool exc_err_limit = false;
2170 if (!con || amdgpu_sriov_vf(adev))
2173 /* Allow access to RAS EEPROM via debugfs, when the ASIC
2174 * supports RAS and debugfs is enabled, but when
2175 * adev->ras_enabled is unset, i.e. when "ras_enable"
2176 * module parameter is set to 0.
2180 if (!adev->ras_enabled)
2183 data = &con->eh_data;
2184 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
2190 mutex_init(&con->recovery_lock);
2191 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
2192 atomic_set(&con->in_recovery, 0);
2193 con->eeprom_control.bad_channel_bitmap = 0;
2195 max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count();
2196 amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
2198 /* Todo: During test the SMU might fail to read the eeprom through I2C
2199 * when the GPU is pending on XGMI reset during probe time
2200 * (Mostly after second bus reset), skip it now
2202 if (adev->gmc.xgmi.pending_reset)
2204 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
2206 * This calling fails when exc_err_limit is true or
2209 if (exc_err_limit || ret)
2212 if (con->eeprom_control.ras_num_recs) {
2213 ret = amdgpu_ras_load_bad_pages(adev);
2217 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
2219 if (con->update_channel_flag == true) {
2220 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
2221 con->update_channel_flag = false;
2225 #ifdef CONFIG_X86_MCE_AMD
2226 if ((adev->asic_type == CHIP_ALDEBARAN) &&
2227 (adev->gmc.xgmi.connected_to_cpu))
2228 amdgpu_register_bad_pages_mca_notifier(adev);
2233 kfree((*data)->bps);
2235 con->eh_data = NULL;
2237 dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
2240 * Except error threshold exceeding case, other failure cases in this
2241 * function would not fail amdgpu driver init.
2251 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
2253 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2254 struct ras_err_handler_data *data = con->eh_data;
2256 /* recovery_init failed to init it, fini is useless */
2260 cancel_work_sync(&con->recovery_work);
2262 mutex_lock(&con->recovery_lock);
2263 con->eh_data = NULL;
2266 mutex_unlock(&con->recovery_lock);
2272 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
2274 if (amdgpu_sriov_vf(adev)) {
2275 switch (adev->ip_versions[MP0_HWIP][0]) {
2276 case IP_VERSION(13, 0, 2):
2283 if (adev->asic_type == CHIP_IP_DISCOVERY) {
2284 switch (adev->ip_versions[MP0_HWIP][0]) {
2285 case IP_VERSION(13, 0, 0):
2286 case IP_VERSION(13, 0, 10):
2293 return adev->asic_type == CHIP_VEGA10 ||
2294 adev->asic_type == CHIP_VEGA20 ||
2295 adev->asic_type == CHIP_ARCTURUS ||
2296 adev->asic_type == CHIP_ALDEBARAN ||
2297 adev->asic_type == CHIP_SIENNA_CICHLID;
2301 * this is workaround for vega20 workstation sku,
2302 * force enable gfx ras, ignore vbios gfx ras flag
2303 * due to GC EDC can not write
2305 static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
2307 struct atom_context *ctx = adev->mode_info.atom_context;
2312 if (strnstr(ctx->vbios_version, "D16406",
2313 sizeof(ctx->vbios_version)) ||
2314 strnstr(ctx->vbios_version, "D36002",
2315 sizeof(ctx->vbios_version)))
2316 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
2320 * check hardware's ras ability which will be saved in hw_supported.
2321 * if hardware does not support ras, we can skip some ras initializtion and
2322 * forbid some ras operations from IP.
2323 * if software itself, say boot parameter, limit the ras ability. We still
2324 * need allow IP do some limited operations, like disable. In such case,
2325 * we have to initialize ras as normal. but need check if operation is
2326 * allowed or not in each function.
2328 static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
2330 adev->ras_hw_enabled = adev->ras_enabled = 0;
2332 if (!adev->is_atom_fw ||
2333 !amdgpu_ras_asic_supported(adev))
2336 if (!adev->gmc.xgmi.connected_to_cpu) {
2337 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
2338 dev_info(adev->dev, "MEM ECC is active.\n");
2339 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
2340 1 << AMDGPU_RAS_BLOCK__DF);
2342 dev_info(adev->dev, "MEM ECC is not presented.\n");
2345 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
2346 dev_info(adev->dev, "SRAM ECC is active.\n");
2347 if (!amdgpu_sriov_vf(adev)) {
2348 adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
2349 1 << AMDGPU_RAS_BLOCK__DF);
2351 if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
2352 adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
2353 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
2354 1 << AMDGPU_RAS_BLOCK__JPEG);
2356 adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
2357 1 << AMDGPU_RAS_BLOCK__JPEG);
2359 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
2360 1 << AMDGPU_RAS_BLOCK__SDMA |
2361 1 << AMDGPU_RAS_BLOCK__GFX);
2364 dev_info(adev->dev, "SRAM ECC is not presented.\n");
2367 /* driver only manages a few IP blocks RAS feature
2368 * when GPU is connected cpu through XGMI */
2369 adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
2370 1 << AMDGPU_RAS_BLOCK__SDMA |
2371 1 << AMDGPU_RAS_BLOCK__MMHUB);
2374 amdgpu_ras_get_quirks(adev);
2376 /* hw_supported needs to be aligned with RAS block mask. */
2377 adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
2379 adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
2380 adev->ras_hw_enabled & amdgpu_ras_mask;
2383 static void amdgpu_ras_counte_dw(struct work_struct *work)
2385 struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
2386 ras_counte_delay_work.work);
2387 struct amdgpu_device *adev = con->adev;
2388 struct drm_device *dev = adev_to_drm(adev);
2389 unsigned long ce_count, ue_count;
2392 res = pm_runtime_get_sync(dev->dev);
2396 /* Cache new values.
2398 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2399 atomic_set(&con->ras_ce_count, ce_count);
2400 atomic_set(&con->ras_ue_count, ue_count);
2403 pm_runtime_mark_last_busy(dev->dev);
2405 pm_runtime_put_autosuspend(dev->dev);
2408 int amdgpu_ras_init(struct amdgpu_device *adev)
2410 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2412 bool df_poison, umc_poison;
2417 con = kmalloc(sizeof(struct amdgpu_ras) +
2418 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
2419 sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
2420 GFP_KERNEL|__GFP_ZERO);
2425 INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
2426 atomic_set(&con->ras_ce_count, 0);
2427 atomic_set(&con->ras_ue_count, 0);
2429 con->objs = (struct ras_manager *)(con + 1);
2431 amdgpu_ras_set_context(adev, con);
2433 amdgpu_ras_check_supported(adev);
2435 if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
2436 /* set gfx block ras context feature for VEGA20 Gaming
2437 * send ras disable cmd to ras ta during ras late init.
2439 if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
2440 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2449 con->update_channel_flag = false;
2451 INIT_LIST_HEAD(&con->head);
2452 /* Might need get this flag from vbios. */
2453 con->flags = RAS_DEFAULT_FLAGS;
2455 /* initialize nbio ras function ahead of any other
2456 * ras functions so hardware fatal error interrupt
2457 * can be enabled as early as possible */
2458 switch (adev->asic_type) {
2461 case CHIP_ALDEBARAN:
2462 if (!adev->gmc.xgmi.connected_to_cpu) {
2463 adev->nbio.ras = &nbio_v7_4_ras;
2464 amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block);
2465 adev->nbio.ras_if = &adev->nbio.ras->ras_block.ras_comm;
2469 /* nbio ras is not available */
2473 if (adev->nbio.ras &&
2474 adev->nbio.ras->init_ras_controller_interrupt) {
2475 r = adev->nbio.ras->init_ras_controller_interrupt(adev);
2480 if (adev->nbio.ras &&
2481 adev->nbio.ras->init_ras_err_event_athub_interrupt) {
2482 r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
2487 /* Init poison supported flag, the default value is false */
2488 if (adev->gmc.xgmi.connected_to_cpu) {
2489 /* enabled by default when GPU is connected to CPU */
2490 con->poison_supported = true;
2492 else if (adev->df.funcs &&
2493 adev->df.funcs->query_ras_poison_mode &&
2495 adev->umc.ras->query_ras_poison_mode) {
2497 adev->df.funcs->query_ras_poison_mode(adev);
2499 adev->umc.ras->query_ras_poison_mode(adev);
2500 /* Only poison is set in both DF and UMC, we can support it */
2501 if (df_poison && umc_poison)
2502 con->poison_supported = true;
2503 else if (df_poison != umc_poison)
2504 dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
2505 df_poison, umc_poison);
2508 if (amdgpu_ras_fs_init(adev)) {
2513 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2514 "hardware ability[%x] ras_mask[%x]\n",
2515 adev->ras_hw_enabled, adev->ras_enabled);
2519 amdgpu_ras_set_context(adev, NULL);
2525 int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
2527 if (adev->gmc.xgmi.connected_to_cpu)
2532 static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
2533 struct ras_common_if *ras_block)
2535 struct ras_query_if info = {
2539 if (!amdgpu_persistent_edc_harvesting_supported(adev))
2542 if (amdgpu_ras_query_error_status(adev, &info) != 0)
2543 DRM_WARN("RAS init harvest failure");
2545 if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
2546 DRM_WARN("RAS init harvest reset failure");
2551 bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
2553 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2558 return con->poison_supported;
2561 /* helper function to handle common stuff in ip late init phase */
2562 int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
2563 struct ras_common_if *ras_block)
2565 struct amdgpu_ras_block_object *ras_obj = NULL;
2566 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2567 unsigned long ue_count, ce_count;
2570 /* disable RAS feature per IP block if it is not supported */
2571 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2572 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2576 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2578 if (adev->in_suspend || amdgpu_in_reset(adev)) {
2579 /* in resume phase, if fail to enable ras,
2580 * clean up all ras fs nodes, and disable ras */
2586 /* check for errors on warm reset edc persisant supported ASIC */
2587 amdgpu_persistent_edc_harvesting(adev, ras_block);
2589 /* in resume phase, no need to create ras fs node */
2590 if (adev->in_suspend || amdgpu_in_reset(adev))
2593 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2594 if (ras_obj->ras_cb || (ras_obj->hw_ops &&
2595 (ras_obj->hw_ops->query_poison_status ||
2596 ras_obj->hw_ops->handle_poison_consumption))) {
2597 r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
2602 r = amdgpu_ras_sysfs_create(adev, ras_block);
2606 /* Those are the cached values at init.
2608 if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
2609 atomic_set(&con->ras_ce_count, ce_count);
2610 atomic_set(&con->ras_ue_count, ue_count);
2616 if (ras_obj->ras_cb)
2617 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2619 amdgpu_ras_feature_enable(adev, ras_block, 0);
2623 static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
2624 struct ras_common_if *ras_block)
2626 return amdgpu_ras_block_late_init(adev, ras_block);
2629 /* helper function to remove ras fs node and interrupt handler */
2630 void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
2631 struct ras_common_if *ras_block)
2633 struct amdgpu_ras_block_object *ras_obj;
2637 amdgpu_ras_sysfs_remove(adev, ras_block);
2639 ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
2640 if (ras_obj->ras_cb)
2641 amdgpu_ras_interrupt_remove_handler(adev, ras_block);
2644 static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
2645 struct ras_common_if *ras_block)
2647 return amdgpu_ras_block_late_fini(adev, ras_block);
2650 /* do some init work after IP late init as dependence.
2651 * and it runs in resume/gpu reset/booting up cases.
2653 void amdgpu_ras_resume(struct amdgpu_device *adev)
2655 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2656 struct ras_manager *obj, *tmp;
2658 if (!adev->ras_enabled || !con) {
2659 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2660 amdgpu_release_ras_context(adev);
2665 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2666 /* Set up all other IPs which are not implemented. There is a
2667 * tricky thing that IP's actual ras error type should be
2668 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2669 * ERROR_NONE make sense anyway.
2671 amdgpu_ras_enable_all_features(adev, 1);
2673 /* We enable ras on all hw_supported block, but as boot
2674 * parameter might disable some of them and one or more IP has
2675 * not implemented yet. So we disable them on behalf.
2677 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2678 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2679 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2680 /* there should be no any reference. */
2681 WARN_ON(alive_obj(obj));
2687 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2689 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2691 if (!adev->ras_enabled || !con)
2694 amdgpu_ras_disable_all_features(adev, 0);
2695 /* Make sure all ras objects are disabled. */
2697 amdgpu_ras_disable_all_features(adev, 1);
2700 int amdgpu_ras_late_init(struct amdgpu_device *adev)
2702 struct amdgpu_ras_block_list *node, *tmp;
2703 struct amdgpu_ras_block_object *obj;
2706 /* Guest side doesn't need init ras feature */
2707 if (amdgpu_sriov_vf(adev))
2710 list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
2711 if (!node->ras_obj) {
2712 dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
2716 obj = node->ras_obj;
2717 if (obj->ras_late_init) {
2718 r = obj->ras_late_init(adev, &obj->ras_comm);
2720 dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
2721 obj->ras_comm.name, r);
2725 amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
2731 /* do some fini work before IP fini as dependence */
2732 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2734 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2736 if (!adev->ras_enabled || !con)
2740 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2742 amdgpu_ras_disable_all_features(adev, 0);
2743 amdgpu_ras_recovery_fini(adev);
2747 int amdgpu_ras_fini(struct amdgpu_device *adev)
2749 struct amdgpu_ras_block_list *ras_node, *tmp;
2750 struct amdgpu_ras_block_object *obj = NULL;
2751 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2753 if (!adev->ras_enabled || !con)
2756 list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
2757 if (ras_node->ras_obj) {
2758 obj = ras_node->ras_obj;
2759 if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
2761 obj->ras_fini(adev, &obj->ras_comm);
2763 amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
2766 /* Clear ras blocks from ras_list and free ras block list node */
2767 list_del(&ras_node->node);
2771 amdgpu_ras_fs_fini(adev);
2772 amdgpu_ras_interrupt_remove_all(adev);
2774 WARN(con->features, "Feature mask is not cleared");
2777 amdgpu_ras_disable_all_features(adev, 1);
2779 cancel_delayed_work_sync(&con->ras_counte_delay_work);
2781 amdgpu_ras_set_context(adev, NULL);
2787 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2789 amdgpu_ras_check_supported(adev);
2790 if (!adev->ras_hw_enabled)
2793 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2794 dev_info(adev->dev, "uncorrectable hardware error"
2795 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2797 amdgpu_ras_reset_gpu(adev);
2801 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2803 if (adev->asic_type == CHIP_VEGA20 &&
2804 adev->pm.fw_version <= 0x283400) {
2805 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2806 amdgpu_ras_intr_triggered();
2812 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2814 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2819 if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2820 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2821 amdgpu_ras_set_context(adev, NULL);
2826 #ifdef CONFIG_X86_MCE_AMD
2827 static struct amdgpu_device *find_adev(uint32_t node_id)
2830 struct amdgpu_device *adev = NULL;
2832 for (i = 0; i < mce_adev_list.num_gpu; i++) {
2833 adev = mce_adev_list.devs[i];
2835 if (adev && adev->gmc.xgmi.connected_to_cpu &&
2836 adev->gmc.xgmi.physical_node_id == node_id)
2844 #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
2845 #define GET_UMC_INST(m) (((m) >> 21) & 0x7)
2846 #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
2847 #define GPU_ID_OFFSET 8
2849 static int amdgpu_bad_page_notifier(struct notifier_block *nb,
2850 unsigned long val, void *data)
2852 struct mce *m = (struct mce *)data;
2853 struct amdgpu_device *adev = NULL;
2854 uint32_t gpu_id = 0;
2855 uint32_t umc_inst = 0, ch_inst = 0;
2858 * If the error was generated in UMC_V2, which belongs to GPU UMCs,
2859 * and error occurred in DramECC (Extended error code = 0) then only
2860 * process the error, else bail out.
2862 if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
2863 (XEC(m->status, 0x3f) == 0x0)))
2867 * If it is correctable error, return.
2869 if (mce_is_correctable(m))
2873 * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
2875 gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
2877 adev = find_adev(gpu_id);
2879 DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
2885 * If it is uncorrectable error, then find out UMC instance and
2888 umc_inst = GET_UMC_INST(m->ipid);
2889 ch_inst = GET_CHAN_INDEX(m->ipid);
2891 dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
2894 if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
2900 static struct notifier_block amdgpu_bad_page_nb = {
2901 .notifier_call = amdgpu_bad_page_notifier,
2902 .priority = MCE_PRIO_UC,
2905 static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
2908 * Add the adev to the mce_adev_list.
2909 * During mode2 reset, amdgpu device is temporarily
2910 * removed from the mgpu_info list which can cause
2911 * page retirement to fail.
2912 * Use this list instead of mgpu_info to find the amdgpu
2913 * device on which the UMC error was reported.
2915 mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
2918 * Register the x86 notifier only once
2919 * with MCE subsystem.
2921 if (notifier_registered == false) {
2922 mce_register_decode_chain(&amdgpu_bad_page_nb);
2923 notifier_registered = true;
2928 struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
2933 return adev->psp.ras_context.ras;
2936 int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
2941 adev->psp.ras_context.ras = ras_con;
2945 /* check if ras is supported on block, say, sdma, gfx */
2946 int amdgpu_ras_is_supported(struct amdgpu_device *adev,
2949 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2951 if (block >= AMDGPU_RAS_BLOCK_COUNT)
2953 return ras && (adev->ras_enabled & (1 << block));
2956 int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
2958 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2960 if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
2961 amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
2966 /* Register each ip ras block into amdgpu ras */
2967 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
2968 struct amdgpu_ras_block_object *ras_block_obj)
2970 struct amdgpu_ras_block_list *ras_node;
2971 if (!adev || !ras_block_obj)
2974 if (!amdgpu_ras_asic_supported(adev))
2977 ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
2981 INIT_LIST_HEAD(&ras_node->node);
2982 ras_node->ras_obj = ras_block_obj;
2983 list_add_tail(&ras_node->node, &adev->ras_list);