2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_xgmi.h"
35 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 static const char *RAS_FS_NAME = "ras";
39 const char *ras_error_string[] = {
43 "multi_uncorrectable",
47 const char *ras_block_string[] = {
64 #define ras_err_str(i) (ras_error_string[ffs(i)])
65 #define ras_block_str(i) (ras_block_string[i])
67 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
69 /* inject address is 52 bits */
70 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
72 /* typical ECC bad page rate(1 bad page per 100MB VRAM) */
73 #define RAS_BAD_PAGE_RATE (100 * 1024 * 1024ULL)
75 enum amdgpu_ras_retire_page_reservation {
76 AMDGPU_RAS_RETIRE_PAGE_RESERVED,
77 AMDGPU_RAS_RETIRE_PAGE_PENDING,
78 AMDGPU_RAS_RETIRE_PAGE_FAULT,
81 atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
83 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
85 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
88 void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
90 if (adev && amdgpu_ras_get_context(adev))
91 amdgpu_ras_get_context(adev)->error_query_ready = ready;
94 static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
96 if (adev && amdgpu_ras_get_context(adev))
97 return amdgpu_ras_get_context(adev)->error_query_ready;
102 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
103 size_t size, loff_t *pos)
105 struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
106 struct ras_query_if info = {
112 if (amdgpu_ras_query_error_status(obj->adev, &info))
115 s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
117 "ce", info.ce_count);
122 s = min_t(u64, s, size);
125 if (copy_to_user(buf, &val[*pos], s))
133 static const struct file_operations amdgpu_ras_debugfs_ops = {
134 .owner = THIS_MODULE,
135 .read = amdgpu_ras_debugfs_read,
137 .llseek = default_llseek
140 static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
144 for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
146 if (strcmp(name, ras_block_str(i)) == 0)
152 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
153 const char __user *buf, size_t size,
154 loff_t *pos, struct ras_debug_if *data)
156 ssize_t s = min_t(u64, 64, size);
169 memset(str, 0, sizeof(str));
170 memset(data, 0, sizeof(*data));
172 if (copy_from_user(str, buf, s))
175 if (sscanf(str, "disable %32s", block_name) == 1)
177 else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
179 else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
181 else if (str[0] && str[1] && str[2] && str[3])
182 /* ascii string, but commands are not matched. */
186 if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
189 data->head.block = block_id;
190 /* only ue and ce errors are supported */
191 if (!memcmp("ue", err, 2))
192 data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
193 else if (!memcmp("ce", err, 2))
194 data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
201 if (sscanf(str, "%*s %*s %*s %u %llu %llu",
202 &sub_block, &address, &value) != 3)
203 if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
204 &sub_block, &address, &value) != 3)
206 data->head.sub_block_index = sub_block;
207 data->inject.address = address;
208 data->inject.value = value;
211 if (size < sizeof(*data))
214 if (copy_from_user(data, buf, sizeof(*data)))
222 * DOC: AMDGPU RAS debugfs control interface
224 * It accepts struct ras_debug_if who has two members.
226 * First member: ras_debug_if::head or ras_debug_if::inject.
228 * head is used to indicate which IP block will be under control.
230 * head has four members, they are block, type, sub_block_index, name.
231 * block: which IP will be under control.
232 * type: what kind of error will be enabled/disabled/injected.
233 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
234 * name: the name of IP.
236 * inject has two more members than head, they are address, value.
237 * As their names indicate, inject operation will write the
238 * value to the address.
240 * The second member: struct ras_debug_if::op.
241 * It has three kinds of operations.
243 * - 0: disable RAS on the block. Take ::head as its data.
244 * - 1: enable RAS on the block. Take ::head as its data.
245 * - 2: inject errors on the block. Take ::inject as its data.
247 * How to use the interface?
251 * Copy the struct ras_debug_if in your codes and initialize it.
252 * Write the struct to the control node.
256 * .. code-block:: bash
258 * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
262 * op: disable, enable, inject
263 * disable: only block is needed
264 * enable: block and error are needed
265 * inject: error, address, value are needed
266 * block: umc, sdma, gfx, .........
267 * see ras_block_string[] for details
269 * ue: multi_uncorrectable
270 * ce: single_correctable
272 * sub block index, pass 0 if there is no sub block
274 * here are some examples for bash commands:
276 * .. code-block:: bash
278 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
279 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
280 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
282 * How to check the result?
284 * For disable/enable, please check ras features at
285 * /sys/class/drm/card[0/1/2...]/device/ras/features
287 * For inject, please check corresponding err count at
288 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
291 * Operations are only allowed on blocks which are supported.
292 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
293 * to see which blocks support RAS on a particular asic.
296 static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
297 size_t size, loff_t *pos)
299 struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
300 struct ras_debug_if data;
303 if (!amdgpu_ras_get_error_query_ready(adev)) {
304 dev_warn(adev->dev, "RAS WARN: error injection "
305 "currently inaccessible\n");
309 ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
313 if (!amdgpu_ras_is_supported(adev, data.head.block))
318 ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
321 ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
324 if ((data.inject.address >= adev->gmc.mc_vram_size) ||
325 (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
326 dev_warn(adev->dev, "RAS WARN: input address "
327 "0x%llx is invalid.",
328 data.inject.address);
333 /* umc ce/ue error injection for a bad page is not allowed */
334 if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
335 amdgpu_ras_check_bad_page(adev, data.inject.address)) {
336 dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
337 "as bad before error injection!\n",
338 data.inject.address);
342 /* data.inject.address is offset instead of absolute gpu address */
343 ret = amdgpu_ras_error_inject(adev, &data.inject);
357 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
359 * Some boards contain an EEPROM which is used to persistently store a list of
360 * bad pages which experiences ECC errors in vram. This interface provides
361 * a way to reset the EEPROM, e.g., after testing error injection.
365 * .. code-block:: bash
367 * echo 1 > ../ras/ras_eeprom_reset
369 * will reset EEPROM table to 0 entries.
372 static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf,
373 size_t size, loff_t *pos)
375 struct amdgpu_device *adev =
376 (struct amdgpu_device *)file_inode(f)->i_private;
379 ret = amdgpu_ras_eeprom_reset_table(
380 &(amdgpu_ras_get_context(adev)->eeprom_control));
383 amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
390 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
391 .owner = THIS_MODULE,
393 .write = amdgpu_ras_debugfs_ctrl_write,
394 .llseek = default_llseek
397 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
398 .owner = THIS_MODULE,
400 .write = amdgpu_ras_debugfs_eeprom_write,
401 .llseek = default_llseek
405 * DOC: AMDGPU RAS sysfs Error Count Interface
407 * It allows the user to read the error count for each IP block on the gpu through
408 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
410 * It outputs the multiple lines which report the uncorrected (ue) and corrected
413 * The format of one line is below,
419 * .. code-block:: bash
425 static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
426 struct device_attribute *attr, char *buf)
428 struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
429 struct ras_query_if info = {
433 if (!amdgpu_ras_get_error_query_ready(obj->adev))
434 return sysfs_emit(buf, "Query currently inaccessible\n");
436 if (amdgpu_ras_query_error_status(obj->adev, &info))
439 return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
440 "ce", info.ce_count);
445 #define get_obj(obj) do { (obj)->use++; } while (0)
446 #define alive_obj(obj) ((obj)->use)
448 static inline void put_obj(struct ras_manager *obj)
450 if (obj && (--obj->use == 0))
451 list_del(&obj->node);
452 if (obj && (obj->use < 0))
453 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
456 /* make one obj and return it. */
457 static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
458 struct ras_common_if *head)
460 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
461 struct ras_manager *obj;
463 if (!adev->ras_features || !con)
466 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
469 obj = &con->objs[head->block];
470 /* already exist. return obj? */
476 list_add(&obj->node, &con->head);
482 /* return an obj equal to head, or the first when head is NULL */
483 struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
484 struct ras_common_if *head)
486 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
487 struct ras_manager *obj;
490 if (!adev->ras_features || !con)
494 if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
497 obj = &con->objs[head->block];
499 if (alive_obj(obj)) {
500 WARN_ON(head->block != obj->head.block);
504 for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
506 if (alive_obj(obj)) {
507 WARN_ON(i != obj->head.block);
517 static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev,
518 const char* invoke_type,
519 const char* block_name,
520 enum ta_ras_status ret)
523 case TA_RAS_STATUS__SUCCESS:
525 case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
527 "RAS WARN: %s %s currently unavailable\n",
533 "RAS ERROR: %s %s error failed ret 0x%X\n",
540 /* feature ctl begin */
541 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
542 struct ras_common_if *head)
544 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
546 return con->hw_supported & BIT(head->block);
549 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
550 struct ras_common_if *head)
552 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
554 return con->features & BIT(head->block);
558 * if obj is not created, then create one.
559 * set feature enable flag.
561 static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
562 struct ras_common_if *head, int enable)
564 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
565 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
567 /* If hardware does not support ras, then do not create obj.
568 * But if hardware support ras, we can create the obj.
569 * Ras framework checks con->hw_supported to see if it need do
570 * corresponding initialization.
571 * IP checks con->support to see if it need disable ras.
573 if (!amdgpu_ras_is_feature_allowed(adev, head))
575 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
580 obj = amdgpu_ras_create_obj(adev, head);
584 /* In case we create obj somewhere else */
587 con->features |= BIT(head->block);
589 if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
590 /* skip clean gfx ras context feature for VEGA20 Gaming.
593 if (!(!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)))
594 con->features &= ~BIT(head->block);
602 /* wrapper of psp_ras_enable_features */
603 int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
604 struct ras_common_if *head, bool enable)
606 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
607 union ta_ras_cmd_input *info;
613 info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
618 info->disable_features = (struct ta_ras_disable_features_input) {
619 .block_id = amdgpu_ras_block_to_ta(head->block),
620 .error_type = amdgpu_ras_error_to_ta(head->type),
623 info->enable_features = (struct ta_ras_enable_features_input) {
624 .block_id = amdgpu_ras_block_to_ta(head->block),
625 .error_type = amdgpu_ras_error_to_ta(head->type),
629 /* Do not enable if it is not allowed. */
630 WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
631 /* Are we alerady in that state we are going to set? */
632 if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
637 if (!amdgpu_ras_intr_triggered()) {
638 ret = psp_ras_enable_features(&adev->psp, info, enable);
640 amdgpu_ras_parse_status_code(adev,
641 enable ? "enable":"disable",
642 ras_block_str(head->block),
643 (enum ta_ras_status)ret);
644 if (ret == TA_RAS_STATUS__RESET_NEEDED)
654 __amdgpu_ras_feature_enable(adev, head, enable);
661 /* Only used in device probe stage and called only once. */
662 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
663 struct ras_common_if *head, bool enable)
665 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
671 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
673 /* There is no harm to issue a ras TA cmd regardless of
674 * the currecnt ras state.
675 * If current state == target state, it will do nothing
676 * But sometimes it requests driver to reset and repost
677 * with error code -EAGAIN.
679 ret = amdgpu_ras_feature_enable(adev, head, 1);
680 /* With old ras TA, we might fail to enable ras.
681 * Log it and just setup the object.
682 * TODO need remove this WA in the future.
684 if (ret == -EINVAL) {
685 ret = __amdgpu_ras_feature_enable(adev, head, 1);
688 "RAS INFO: %s setup object\n",
689 ras_block_str(head->block));
692 /* setup the object then issue a ras TA disable cmd.*/
693 ret = __amdgpu_ras_feature_enable(adev, head, 1);
697 /* gfx block ras dsiable cmd must send to ras-ta */
698 if (head->block == AMDGPU_RAS_BLOCK__GFX)
699 con->features |= BIT(head->block);
701 ret = amdgpu_ras_feature_enable(adev, head, 0);
704 ret = amdgpu_ras_feature_enable(adev, head, enable);
709 static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
712 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
713 struct ras_manager *obj, *tmp;
715 list_for_each_entry_safe(obj, tmp, &con->head, node) {
717 * aka just release the obj and corresponding flags
720 if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
723 if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
728 return con->features;
731 static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
734 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
735 int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
737 const enum amdgpu_ras_error_type default_ras_type =
738 AMDGPU_RAS_ERROR__NONE;
740 for (i = 0; i < ras_block_count; i++) {
741 struct ras_common_if head = {
743 .type = default_ras_type,
744 .sub_block_index = 0,
746 strcpy(head.name, ras_block_str(i));
749 * bypass psp. vbios enable ras for us.
750 * so just create the obj
752 if (__amdgpu_ras_feature_enable(adev, &head, 1))
755 if (amdgpu_ras_feature_enable(adev, &head, 1))
760 return con->features;
762 /* feature ctl end */
764 /* query/inject/cure begin */
765 int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
766 struct ras_query_if *info)
768 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
769 struct ras_err_data err_data = {0, 0, 0, NULL};
775 switch (info->head.block) {
776 case AMDGPU_RAS_BLOCK__UMC:
777 if (adev->umc.ras_funcs &&
778 adev->umc.ras_funcs->query_ras_error_count)
779 adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
780 /* umc query_ras_error_address is also responsible for clearing
783 if (adev->umc.ras_funcs &&
784 adev->umc.ras_funcs->query_ras_error_address)
785 adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
787 case AMDGPU_RAS_BLOCK__SDMA:
788 if (adev->sdma.funcs->query_ras_error_count) {
789 for (i = 0; i < adev->sdma.num_instances; i++)
790 adev->sdma.funcs->query_ras_error_count(adev, i,
794 case AMDGPU_RAS_BLOCK__GFX:
795 if (adev->gfx.funcs->query_ras_error_count)
796 adev->gfx.funcs->query_ras_error_count(adev, &err_data);
798 if (adev->gfx.funcs->query_ras_error_status)
799 adev->gfx.funcs->query_ras_error_status(adev);
801 case AMDGPU_RAS_BLOCK__MMHUB:
802 if (adev->mmhub.funcs->query_ras_error_count)
803 adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
805 if (adev->mmhub.funcs->query_ras_error_status)
806 adev->mmhub.funcs->query_ras_error_status(adev);
808 case AMDGPU_RAS_BLOCK__PCIE_BIF:
809 if (adev->nbio.ras_funcs &&
810 adev->nbio.ras_funcs->query_ras_error_count)
811 adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
813 case AMDGPU_RAS_BLOCK__XGMI_WAFL:
814 if (adev->gmc.xgmi.ras_funcs &&
815 adev->gmc.xgmi.ras_funcs->query_ras_error_count)
816 adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
822 obj->err_data.ue_count += err_data.ue_count;
823 obj->err_data.ce_count += err_data.ce_count;
825 info->ue_count = obj->err_data.ue_count;
826 info->ce_count = obj->err_data.ce_count;
828 if (err_data.ce_count) {
829 dev_info(adev->dev, "%ld correctable hardware errors "
830 "detected in %s block, no user "
831 "action is needed.\n",
832 obj->err_data.ce_count,
833 ras_block_str(info->head.block));
835 if (err_data.ue_count) {
836 dev_info(adev->dev, "%ld uncorrectable hardware errors "
837 "detected in %s block\n",
838 obj->err_data.ue_count,
839 ras_block_str(info->head.block));
845 int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
846 enum amdgpu_ras_block block)
848 if (!amdgpu_ras_is_supported(adev, block))
852 case AMDGPU_RAS_BLOCK__GFX:
853 if (adev->gfx.funcs->reset_ras_error_count)
854 adev->gfx.funcs->reset_ras_error_count(adev);
856 if (adev->gfx.funcs->reset_ras_error_status)
857 adev->gfx.funcs->reset_ras_error_status(adev);
859 case AMDGPU_RAS_BLOCK__MMHUB:
860 if (adev->mmhub.funcs->reset_ras_error_count)
861 adev->mmhub.funcs->reset_ras_error_count(adev);
863 case AMDGPU_RAS_BLOCK__SDMA:
864 if (adev->sdma.funcs->reset_ras_error_count)
865 adev->sdma.funcs->reset_ras_error_count(adev);
874 /* Trigger XGMI/WAFL error */
875 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
876 struct ta_ras_trigger_error_input *block_info)
880 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
881 dev_warn(adev->dev, "Failed to disallow df cstate");
883 if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
884 dev_warn(adev->dev, "Failed to disallow XGMI power down");
886 ret = psp_ras_trigger_error(&adev->psp, block_info);
888 if (amdgpu_ras_intr_triggered())
891 if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
892 dev_warn(adev->dev, "Failed to allow XGMI power down");
894 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
895 dev_warn(adev->dev, "Failed to allow df cstate");
900 /* wrapper of psp_ras_trigger_error */
901 int amdgpu_ras_error_inject(struct amdgpu_device *adev,
902 struct ras_inject_if *info)
904 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
905 struct ta_ras_trigger_error_input block_info = {
906 .block_id = amdgpu_ras_block_to_ta(info->head.block),
907 .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
908 .sub_block_index = info->head.sub_block_index,
909 .address = info->address,
910 .value = info->value,
917 /* Calculate XGMI relative offset */
918 if (adev->gmc.xgmi.num_physical_nodes > 1) {
920 amdgpu_xgmi_get_relative_phy_addr(adev,
924 switch (info->head.block) {
925 case AMDGPU_RAS_BLOCK__GFX:
926 if (adev->gfx.funcs->ras_error_inject)
927 ret = adev->gfx.funcs->ras_error_inject(adev, info);
931 case AMDGPU_RAS_BLOCK__UMC:
932 case AMDGPU_RAS_BLOCK__SDMA:
933 case AMDGPU_RAS_BLOCK__MMHUB:
934 case AMDGPU_RAS_BLOCK__PCIE_BIF:
935 ret = psp_ras_trigger_error(&adev->psp, &block_info);
937 case AMDGPU_RAS_BLOCK__XGMI_WAFL:
938 ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
941 dev_info(adev->dev, "%s error injection is not supported yet\n",
942 ras_block_str(info->head.block));
946 amdgpu_ras_parse_status_code(adev,
948 ras_block_str(info->head.block),
949 (enum ta_ras_status)ret);
954 /* get the total error counts on all IPs */
955 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
958 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
959 struct ras_manager *obj;
960 struct ras_err_data data = {0, 0};
962 if (!adev->ras_features || !con)
965 list_for_each_entry(obj, &con->head, node) {
966 struct ras_query_if info = {
970 if (amdgpu_ras_query_error_status(adev, &info))
973 data.ce_count += info.ce_count;
974 data.ue_count += info.ue_count;
977 return is_ce ? data.ce_count : data.ue_count;
979 /* query/inject/cure end */
984 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
985 struct ras_badpage **bps, unsigned int *count);
987 static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
990 case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
992 case AMDGPU_RAS_RETIRE_PAGE_PENDING:
994 case AMDGPU_RAS_RETIRE_PAGE_FAULT:
1001 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
1003 * It allows user to read the bad pages of vram on the gpu through
1004 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
1006 * It outputs multiple lines, and each line stands for one gpu page.
1008 * The format of one line is below,
1009 * gpu pfn : gpu page size : flags
1011 * gpu pfn and gpu page size are printed in hex format.
1012 * flags can be one of below character,
1014 * R: reserved, this gpu page is reserved and not able to use.
1016 * P: pending for reserve, this gpu page is marked as bad, will be reserved
1017 * in next window of page_reserve.
1019 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
1023 * .. code-block:: bash
1025 * 0x00000001 : 0x00001000 : R
1026 * 0x00000002 : 0x00001000 : P
1030 static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
1031 struct kobject *kobj, struct bin_attribute *attr,
1032 char *buf, loff_t ppos, size_t count)
1034 struct amdgpu_ras *con =
1035 container_of(attr, struct amdgpu_ras, badpages_attr);
1036 struct amdgpu_device *adev = con->adev;
1037 const unsigned int element_size =
1038 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
1039 unsigned int start = div64_ul(ppos + element_size - 1, element_size);
1040 unsigned int end = div64_ul(ppos + count - 1, element_size);
1042 struct ras_badpage *bps = NULL;
1043 unsigned int bps_count = 0;
1045 memset(buf, 0, count);
1047 if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
1050 for (; start < end && start < bps_count; start++)
1051 s += scnprintf(&buf[s], element_size + 1,
1052 "0x%08x : 0x%08x : %1s\n",
1055 amdgpu_ras_badpage_flags_str(bps[start].flags));
1062 static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
1063 struct device_attribute *attr, char *buf)
1065 struct amdgpu_ras *con =
1066 container_of(attr, struct amdgpu_ras, features_attr);
1068 return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
1071 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
1073 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1075 sysfs_remove_file_from_group(&adev->dev->kobj,
1076 &con->badpages_attr.attr,
1080 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
1082 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1083 struct attribute *attrs[] = {
1084 &con->features_attr.attr,
1087 struct attribute_group group = {
1088 .name = RAS_FS_NAME,
1092 sysfs_remove_group(&adev->dev->kobj, &group);
1097 int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
1098 struct ras_fs_if *head)
1100 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1102 if (!obj || obj->attr_inuse)
1107 memcpy(obj->fs_data.sysfs_name,
1109 sizeof(obj->fs_data.sysfs_name));
1111 obj->sysfs_attr = (struct device_attribute){
1113 .name = obj->fs_data.sysfs_name,
1116 .show = amdgpu_ras_sysfs_read,
1118 sysfs_attr_init(&obj->sysfs_attr.attr);
1120 if (sysfs_add_file_to_group(&adev->dev->kobj,
1121 &obj->sysfs_attr.attr,
1127 obj->attr_inuse = 1;
1132 int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
1133 struct ras_common_if *head)
1135 struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
1137 if (!obj || !obj->attr_inuse)
1140 sysfs_remove_file_from_group(&adev->dev->kobj,
1141 &obj->sysfs_attr.attr,
1143 obj->attr_inuse = 0;
1149 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
1151 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1152 struct ras_manager *obj, *tmp;
1154 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1155 amdgpu_ras_sysfs_remove(adev, &obj->head);
1158 if (amdgpu_bad_page_threshold != 0)
1159 amdgpu_ras_sysfs_remove_bad_page_node(adev);
1161 amdgpu_ras_sysfs_remove_feature_node(adev);
1168 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1170 * Normally when there is an uncorrectable error, the driver will reset
1171 * the GPU to recover. However, in the event of an unrecoverable error,
1172 * the driver provides an interface to reboot the system automatically
1175 * The following file in debugfs provides that interface:
1176 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1180 * .. code-block:: bash
1182 * echo true > .../ras/auto_reboot
1186 static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
1188 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1190 struct drm_minor *minor = adev_to_drm(adev)->primary;
1192 dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
1193 debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
1194 &amdgpu_ras_debugfs_ctrl_ops);
1195 debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
1196 &amdgpu_ras_debugfs_eeprom_ops);
1199 * After one uncorrectable error happens, usually GPU recovery will
1200 * be scheduled. But due to the known problem in GPU recovery failing
1201 * to bring GPU back, below interface provides one direct way to
1202 * user to reboot system automatically in such case within
1203 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1204 * will never be called.
1206 debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
1209 * User could set this not to clean up hardware's error count register
1210 * of RAS IPs during ras recovery.
1212 debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
1213 &con->disable_ras_err_cnt_harvest);
1217 static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
1218 struct ras_fs_if *head,
1221 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
1228 memcpy(obj->fs_data.debugfs_name,
1230 sizeof(obj->fs_data.debugfs_name));
1232 debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
1233 obj, &amdgpu_ras_debugfs_ops);
1236 void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
1238 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1240 struct ras_manager *obj;
1241 struct ras_fs_if fs_info;
1244 * it won't be called in resume path, no need to check
1245 * suspend and gpu reset status
1247 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
1250 dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
1252 list_for_each_entry(obj, &con->head, node) {
1253 if (amdgpu_ras_is_supported(adev, obj->head.block) &&
1254 (obj->attr_inuse == 1)) {
1255 sprintf(fs_info.debugfs_name, "%s_err_inject",
1256 ras_block_str(obj->head.block));
1257 fs_info.head = obj->head;
1258 amdgpu_ras_debugfs_create(adev, &fs_info, dir);
1266 static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
1267 amdgpu_ras_sysfs_badpages_read, NULL, 0);
1268 static DEVICE_ATTR(features, S_IRUGO,
1269 amdgpu_ras_sysfs_features_read, NULL);
1270 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
1272 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1273 struct attribute_group group = {
1274 .name = RAS_FS_NAME,
1276 struct attribute *attrs[] = {
1277 &con->features_attr.attr,
1280 struct bin_attribute *bin_attrs[] = {
1286 /* add features entry */
1287 con->features_attr = dev_attr_features;
1288 group.attrs = attrs;
1289 sysfs_attr_init(attrs[0]);
1291 if (amdgpu_bad_page_threshold != 0) {
1292 /* add bad_page_features entry */
1293 bin_attr_gpu_vram_bad_pages.private = NULL;
1294 con->badpages_attr = bin_attr_gpu_vram_bad_pages;
1295 bin_attrs[0] = &con->badpages_attr;
1296 group.bin_attrs = bin_attrs;
1297 sysfs_bin_attr_init(bin_attrs[0]);
1300 r = sysfs_create_group(&adev->dev->kobj, &group);
1302 dev_err(adev->dev, "Failed to create RAS sysfs group!");
1307 static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
1309 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1310 struct ras_manager *con_obj, *ip_obj, *tmp;
1312 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1313 list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
1314 ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
1320 amdgpu_ras_sysfs_remove_all(adev);
1326 static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
1328 struct ras_ih_data *data = &obj->ih_data;
1329 struct amdgpu_iv_entry entry;
1331 struct ras_err_data err_data = {0, 0, 0, NULL};
1333 while (data->rptr != data->wptr) {
1335 memcpy(&entry, &data->ring[data->rptr],
1336 data->element_size);
1339 data->rptr = (data->aligned_element_size +
1340 data->rptr) % data->ring_size;
1342 /* Let IP handle its data, maybe we need get the output
1343 * from the callback to udpate the error type/count, etc
1346 ret = data->cb(obj->adev, &err_data, &entry);
1347 /* ue will trigger an interrupt, and in that case
1348 * we need do a reset to recovery the whole system.
1349 * But leave IP do that recovery, here we just dispatch
1352 if (ret == AMDGPU_RAS_SUCCESS) {
1353 /* these counts could be left as 0 if
1354 * some blocks do not count error number
1356 obj->err_data.ue_count += err_data.ue_count;
1357 obj->err_data.ce_count += err_data.ce_count;
1363 static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
1365 struct ras_ih_data *data =
1366 container_of(work, struct ras_ih_data, ih_work);
1367 struct ras_manager *obj =
1368 container_of(data, struct ras_manager, ih_data);
1370 amdgpu_ras_interrupt_handler(obj);
1373 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
1374 struct ras_dispatch_if *info)
1376 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1377 struct ras_ih_data *data = &obj->ih_data;
1382 if (data->inuse == 0)
1385 /* Might be overflow... */
1386 memcpy(&data->ring[data->wptr], info->entry,
1387 data->element_size);
1390 data->wptr = (data->aligned_element_size +
1391 data->wptr) % data->ring_size;
1393 schedule_work(&data->ih_work);
1398 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
1399 struct ras_ih_if *info)
1401 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1402 struct ras_ih_data *data;
1407 data = &obj->ih_data;
1408 if (data->inuse == 0)
1411 cancel_work_sync(&data->ih_work);
1414 memset(data, 0, sizeof(*data));
1420 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
1421 struct ras_ih_if *info)
1423 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
1424 struct ras_ih_data *data;
1427 /* in case we registe the IH before enable ras feature */
1428 obj = amdgpu_ras_create_obj(adev, &info->head);
1434 data = &obj->ih_data;
1435 /* add the callback.etc */
1436 *data = (struct ras_ih_data) {
1439 .element_size = sizeof(struct amdgpu_iv_entry),
1444 INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
1446 data->aligned_element_size = ALIGN(data->element_size, 8);
1447 /* the ring can store 64 iv entries. */
1448 data->ring_size = 64 * data->aligned_element_size;
1449 data->ring = kmalloc(data->ring_size, GFP_KERNEL);
1461 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
1463 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1464 struct ras_manager *obj, *tmp;
1466 list_for_each_entry_safe(obj, tmp, &con->head, node) {
1467 struct ras_ih_if info = {
1470 amdgpu_ras_interrupt_remove_handler(adev, &info);
1477 /* traversal all IPs except NBIO to query error counter */
1478 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
1480 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1481 struct ras_manager *obj;
1483 if (!adev->ras_features || !con)
1486 list_for_each_entry(obj, &con->head, node) {
1487 struct ras_query_if info = {
1492 * PCIE_BIF IP has one different isr by ras controller
1493 * interrupt, the specific ras counter query will be
1494 * done in that isr. So skip such block from common
1495 * sync flood interrupt isr calling.
1497 if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
1500 amdgpu_ras_query_error_status(adev, &info);
1504 /* Parse RdRspStatus and WrRspStatus */
1505 static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
1506 struct ras_query_if *info)
1509 * Only two block need to query read/write
1510 * RspStatus at current state
1512 switch (info->head.block) {
1513 case AMDGPU_RAS_BLOCK__GFX:
1514 if (adev->gfx.funcs->query_ras_error_status)
1515 adev->gfx.funcs->query_ras_error_status(adev);
1517 case AMDGPU_RAS_BLOCK__MMHUB:
1518 if (adev->mmhub.funcs->query_ras_error_status)
1519 adev->mmhub.funcs->query_ras_error_status(adev);
1526 static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
1528 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1529 struct ras_manager *obj;
1531 if (!adev->ras_features || !con)
1534 list_for_each_entry(obj, &con->head, node) {
1535 struct ras_query_if info = {
1539 amdgpu_ras_error_status_query(adev, &info);
1543 /* recovery begin */
1545 /* return 0 on success.
1546 * caller need free bps.
1548 static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
1549 struct ras_badpage **bps, unsigned int *count)
1551 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1552 struct ras_err_handler_data *data;
1554 int ret = 0, status;
1556 if (!con || !con->eh_data || !bps || !count)
1559 mutex_lock(&con->recovery_lock);
1560 data = con->eh_data;
1561 if (!data || data->count == 0) {
1567 *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
1573 for (; i < data->count; i++) {
1574 (*bps)[i] = (struct ras_badpage){
1575 .bp = data->bps[i].retired_page,
1576 .size = AMDGPU_GPU_PAGE_SIZE,
1577 .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
1579 status = amdgpu_vram_mgr_query_page_status(
1580 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1581 data->bps[i].retired_page);
1582 if (status == -EBUSY)
1583 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
1584 else if (status == -ENOENT)
1585 (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
1588 *count = data->count;
1590 mutex_unlock(&con->recovery_lock);
1594 static void amdgpu_ras_do_recovery(struct work_struct *work)
1596 struct amdgpu_ras *ras =
1597 container_of(work, struct amdgpu_ras, recovery_work);
1598 struct amdgpu_device *remote_adev = NULL;
1599 struct amdgpu_device *adev = ras->adev;
1600 struct list_head device_list, *device_list_handle = NULL;
1602 if (!ras->disable_ras_err_cnt_harvest) {
1603 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
1605 /* Build list of devices to query RAS related errors */
1606 if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
1607 device_list_handle = &hive->device_list;
1609 INIT_LIST_HEAD(&device_list);
1610 list_add_tail(&adev->gmc.xgmi.head, &device_list);
1611 device_list_handle = &device_list;
1614 list_for_each_entry(remote_adev,
1615 device_list_handle, gmc.xgmi.head) {
1616 amdgpu_ras_query_err_status(remote_adev);
1617 amdgpu_ras_log_on_err_counter(remote_adev);
1620 amdgpu_put_xgmi_hive(hive);
1623 if (amdgpu_device_should_recover_gpu(ras->adev))
1624 amdgpu_device_gpu_recover(ras->adev, NULL);
1625 atomic_set(&ras->in_recovery, 0);
1628 /* alloc/realloc bps array */
1629 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
1630 struct ras_err_handler_data *data, int pages)
1632 unsigned int old_space = data->count + data->space_left;
1633 unsigned int new_space = old_space + pages;
1634 unsigned int align_space = ALIGN(new_space, 512);
1635 void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
1643 memcpy(bps, data->bps,
1644 data->count * sizeof(*data->bps));
1649 data->space_left += align_space - old_space;
1653 /* it deal with vram only. */
1654 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
1655 struct eeprom_table_record *bps, int pages)
1657 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1658 struct ras_err_handler_data *data;
1662 if (!con || !con->eh_data || !bps || pages <= 0)
1665 mutex_lock(&con->recovery_lock);
1666 data = con->eh_data;
1670 for (i = 0; i < pages; i++) {
1671 if (amdgpu_ras_check_bad_page_unlock(con,
1672 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
1675 if (!data->space_left &&
1676 amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
1681 amdgpu_vram_mgr_reserve_range(
1682 ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
1683 bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
1684 AMDGPU_GPU_PAGE_SIZE);
1686 memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps));
1691 mutex_unlock(&con->recovery_lock);
1697 * write error record array to eeprom, the function should be
1698 * protected by recovery_lock
1700 int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
1702 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1703 struct ras_err_handler_data *data;
1704 struct amdgpu_ras_eeprom_control *control;
1707 if (!con || !con->eh_data)
1710 control = &con->eeprom_control;
1711 data = con->eh_data;
1712 save_count = data->count - control->num_recs;
1713 /* only new entries are saved */
1714 if (save_count > 0) {
1715 if (amdgpu_ras_eeprom_process_recods(control,
1716 &data->bps[control->num_recs],
1719 dev_err(adev->dev, "Failed to save EEPROM table data!");
1723 dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
1730 * read error record array in eeprom and reserve enough space for
1731 * storing new bad pages
1733 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
1735 struct amdgpu_ras_eeprom_control *control =
1736 &adev->psp.ras.ras->eeprom_control;
1737 struct eeprom_table_record *bps = NULL;
1740 /* no bad page record, skip eeprom access */
1741 if (!control->num_recs || (amdgpu_bad_page_threshold == 0))
1744 bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL);
1748 if (amdgpu_ras_eeprom_process_recods(control, bps, false,
1749 control->num_recs)) {
1750 dev_err(adev->dev, "Failed to load EEPROM table records!");
1755 ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs);
1762 static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
1765 struct ras_err_handler_data *data = con->eh_data;
1768 addr >>= AMDGPU_GPU_PAGE_SHIFT;
1769 for (i = 0; i < data->count; i++)
1770 if (addr == data->bps[i].retired_page)
1777 * check if an address belongs to bad page
1779 * Note: this check is only for umc block
1781 static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
1784 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1787 if (!con || !con->eh_data)
1790 mutex_lock(&con->recovery_lock);
1791 ret = amdgpu_ras_check_bad_page_unlock(con, addr);
1792 mutex_unlock(&con->recovery_lock);
1796 static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
1797 uint32_t max_length)
1799 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1800 int tmp_threshold = amdgpu_bad_page_threshold;
1804 * Justification of value bad_page_cnt_threshold in ras structure
1806 * Generally, -1 <= amdgpu_bad_page_threshold <= max record length
1807 * in eeprom, and introduce two scenarios accordingly.
1809 * Bad page retirement enablement:
1810 * - If amdgpu_bad_page_threshold = -1,
1811 * bad_page_cnt_threshold = typical value by formula.
1813 * - When the value from user is 0 < amdgpu_bad_page_threshold <
1814 * max record length in eeprom, use it directly.
1816 * Bad page retirement disablement:
1817 * - If amdgpu_bad_page_threshold = 0, bad page retirement
1818 * functionality is disabled, and bad_page_cnt_threshold will
1822 if (tmp_threshold < -1)
1824 else if (tmp_threshold > max_length)
1825 tmp_threshold = max_length;
1827 if (tmp_threshold == -1) {
1828 val = adev->gmc.mc_vram_size;
1829 do_div(val, RAS_BAD_PAGE_RATE);
1830 con->bad_page_cnt_threshold = min(lower_32_bits(val),
1833 con->bad_page_cnt_threshold = tmp_threshold;
1837 int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
1839 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1840 struct ras_err_handler_data **data;
1841 uint32_t max_eeprom_records_len = 0;
1842 bool exc_err_limit = false;
1845 if (adev->ras_features && con)
1846 data = &con->eh_data;
1850 *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO);
1856 mutex_init(&con->recovery_lock);
1857 INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
1858 atomic_set(&con->in_recovery, 0);
1861 max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
1862 amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
1864 /* Todo: During test the SMU might fail to read the eeprom through I2C
1865 * when the GPU is pending on XGMI reset during probe time
1866 * (Mostly after second bus reset), skip it now
1868 if (adev->gmc.xgmi.pending_reset)
1870 ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
1872 * This calling fails when exc_err_limit is true or
1875 if (exc_err_limit || ret)
1878 if (con->eeprom_control.num_recs) {
1879 ret = amdgpu_ras_load_bad_pages(adev);
1887 kfree((*data)->bps);
1889 con->eh_data = NULL;
1891 dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
1894 * Except error threshold exceeding case, other failure cases in this
1895 * function would not fail amdgpu driver init.
1905 static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
1907 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
1908 struct ras_err_handler_data *data = con->eh_data;
1910 /* recovery_init failed to init it, fini is useless */
1914 cancel_work_sync(&con->recovery_work);
1916 mutex_lock(&con->recovery_lock);
1917 con->eh_data = NULL;
1920 mutex_unlock(&con->recovery_lock);
1926 /* return 0 if ras will reset gpu and repost.*/
1927 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
1930 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1935 ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
1939 static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
1941 return adev->asic_type == CHIP_VEGA10 ||
1942 adev->asic_type == CHIP_VEGA20 ||
1943 adev->asic_type == CHIP_ARCTURUS ||
1944 adev->asic_type == CHIP_ALDEBARAN ||
1945 adev->asic_type == CHIP_SIENNA_CICHLID;
1949 * check hardware's ras ability which will be saved in hw_supported.
1950 * if hardware does not support ras, we can skip some ras initializtion and
1951 * forbid some ras operations from IP.
1952 * if software itself, say boot parameter, limit the ras ability. We still
1953 * need allow IP do some limited operations, like disable. In such case,
1954 * we have to initialize ras as normal. but need check if operation is
1955 * allowed or not in each function.
1957 static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
1958 uint32_t *hw_supported, uint32_t *supported)
1963 if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
1964 !amdgpu_ras_asic_supported(adev))
1967 if (!adev->gmc.xgmi.connected_to_cpu) {
1968 if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
1969 dev_info(adev->dev, "MEM ECC is active.\n");
1970 *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
1971 1 << AMDGPU_RAS_BLOCK__DF);
1973 dev_info(adev->dev, "MEM ECC is not presented.\n");
1976 if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
1977 dev_info(adev->dev, "SRAM ECC is active.\n");
1978 *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1979 1 << AMDGPU_RAS_BLOCK__DF);
1981 dev_info(adev->dev, "SRAM ECC is not presented.\n");
1984 /* driver only manages a few IP blocks RAS feature
1985 * when GPU is connected cpu through XGMI */
1986 *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
1987 1 << AMDGPU_RAS_BLOCK__SDMA |
1988 1 << AMDGPU_RAS_BLOCK__MMHUB);
1991 /* hw_supported needs to be aligned with RAS block mask. */
1992 *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
1994 *supported = amdgpu_ras_enable == 0 ?
1995 0 : *hw_supported & amdgpu_ras_mask;
1996 adev->ras_features = *supported;
1999 int amdgpu_ras_init(struct amdgpu_device *adev)
2001 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2007 con = kmalloc(sizeof(struct amdgpu_ras) +
2008 sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
2009 GFP_KERNEL|__GFP_ZERO);
2013 con->objs = (struct ras_manager *)(con + 1);
2015 amdgpu_ras_set_context(adev, con);
2017 amdgpu_ras_check_supported(adev, &con->hw_supported,
2019 if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) {
2020 /* set gfx block ras context feature for VEGA20 Gaming
2021 * send ras disable cmd to ras ta during ras late init.
2023 if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) {
2024 con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
2034 INIT_LIST_HEAD(&con->head);
2035 /* Might need get this flag from vbios. */
2036 con->flags = RAS_DEFAULT_FLAGS;
2038 /* initialize nbio ras function ahead of any other
2039 * ras functions so hardware fatal error interrupt
2040 * can be enabled as early as possible */
2041 switch (adev->asic_type) {
2044 case CHIP_ALDEBARAN:
2045 if (!adev->gmc.xgmi.connected_to_cpu)
2046 adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
2049 /* nbio ras is not available */
2053 if (adev->nbio.ras_funcs &&
2054 adev->nbio.ras_funcs->init_ras_controller_interrupt) {
2055 r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
2060 if (adev->nbio.ras_funcs &&
2061 adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
2062 r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
2067 if (amdgpu_ras_fs_init(adev)) {
2072 dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
2073 "hardware ability[%x] ras_mask[%x]\n",
2074 con->hw_supported, con->supported);
2077 amdgpu_ras_set_context(adev, NULL);
2083 /* helper function to handle common stuff in ip late init phase */
2084 int amdgpu_ras_late_init(struct amdgpu_device *adev,
2085 struct ras_common_if *ras_block,
2086 struct ras_fs_if *fs_info,
2087 struct ras_ih_if *ih_info)
2091 /* disable RAS feature per IP block if it is not supported */
2092 if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
2093 amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
2097 r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
2100 /* request gpu reset. will run again */
2101 amdgpu_ras_request_reset_on_boot(adev,
2104 } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
2105 /* in resume phase, if fail to enable ras,
2106 * clean up all ras fs nodes, and disable ras */
2112 /* in resume phase, no need to create ras fs node */
2113 if (adev->in_suspend || amdgpu_in_reset(adev))
2117 r = amdgpu_ras_interrupt_add_handler(adev, ih_info);
2122 r = amdgpu_ras_sysfs_create(adev, fs_info);
2128 amdgpu_ras_sysfs_remove(adev, ras_block);
2131 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2133 amdgpu_ras_feature_enable(adev, ras_block, 0);
2137 /* helper function to remove ras fs node and interrupt handler */
2138 void amdgpu_ras_late_fini(struct amdgpu_device *adev,
2139 struct ras_common_if *ras_block,
2140 struct ras_ih_if *ih_info)
2142 if (!ras_block || !ih_info)
2145 amdgpu_ras_sysfs_remove(adev, ras_block);
2147 amdgpu_ras_interrupt_remove_handler(adev, ih_info);
2148 amdgpu_ras_feature_enable(adev, ras_block, 0);
2151 /* do some init work after IP late init as dependence.
2152 * and it runs in resume/gpu reset/booting up cases.
2154 void amdgpu_ras_resume(struct amdgpu_device *adev)
2156 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2157 struct ras_manager *obj, *tmp;
2159 if (!adev->ras_features || !con) {
2160 /* clean ras context for VEGA20 Gaming after send ras disable cmd */
2161 amdgpu_release_ras_context(adev);
2166 if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
2167 /* Set up all other IPs which are not implemented. There is a
2168 * tricky thing that IP's actual ras error type should be
2169 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2170 * ERROR_NONE make sense anyway.
2172 amdgpu_ras_enable_all_features(adev, 1);
2174 /* We enable ras on all hw_supported block, but as boot
2175 * parameter might disable some of them and one or more IP has
2176 * not implemented yet. So we disable them on behalf.
2178 list_for_each_entry_safe(obj, tmp, &con->head, node) {
2179 if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
2180 amdgpu_ras_feature_enable(adev, &obj->head, 0);
2181 /* there should be no any reference. */
2182 WARN_ON(alive_obj(obj));
2187 if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
2188 con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
2189 /* setup ras obj state as disabled.
2190 * for init_by_vbios case.
2191 * if we want to enable ras, just enable it in a normal way.
2192 * If we want do disable it, need setup ras obj as enabled,
2193 * then issue another TA disable cmd.
2194 * See feature_enable_on_boot
2196 amdgpu_ras_disable_all_features(adev, 1);
2197 amdgpu_ras_reset_gpu(adev);
2201 void amdgpu_ras_suspend(struct amdgpu_device *adev)
2203 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2205 if (!adev->ras_features || !con)
2208 amdgpu_ras_disable_all_features(adev, 0);
2209 /* Make sure all ras objects are disabled. */
2211 amdgpu_ras_disable_all_features(adev, 1);
2214 /* do some fini work before IP fini as dependence */
2215 int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
2217 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2219 if (!adev->ras_features || !con)
2222 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2223 amdgpu_ras_disable_all_features(adev, 0);
2224 amdgpu_ras_recovery_fini(adev);
2228 int amdgpu_ras_fini(struct amdgpu_device *adev)
2230 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2232 if (!adev->ras_features || !con)
2235 amdgpu_ras_fs_fini(adev);
2236 amdgpu_ras_interrupt_remove_all(adev);
2238 WARN(con->features, "Feature mask is not cleared");
2241 amdgpu_ras_disable_all_features(adev, 1);
2243 amdgpu_ras_set_context(adev, NULL);
2249 void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
2251 uint32_t hw_supported, supported;
2253 amdgpu_ras_check_supported(adev, &hw_supported, &supported);
2257 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
2258 dev_info(adev->dev, "uncorrectable hardware error"
2259 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2261 amdgpu_ras_reset_gpu(adev);
2265 bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
2267 if (adev->asic_type == CHIP_VEGA20 &&
2268 adev->pm.fw_version <= 0x283400) {
2269 return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
2270 amdgpu_ras_intr_triggered();
2276 void amdgpu_release_ras_context(struct amdgpu_device *adev)
2278 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
2283 if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
2284 con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
2285 amdgpu_ras_set_context(adev, NULL);