2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
27 void *ras_error_status,
28 struct amdgpu_iv_entry *entry,
31 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
32 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
35 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
36 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
37 if (ret == -EOPNOTSUPP) {
38 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
39 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
40 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
42 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
43 adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
44 adev->umc.max_ras_err_cnt_per_query) {
46 kcalloc(adev->umc.max_ras_err_cnt_per_query,
47 sizeof(struct eeprom_table_record), GFP_KERNEL);
49 /* still call query_ras_error_address to clear error status
50 * even NOMEM error is encountered
52 if(!err_data->err_addr)
53 dev_warn(adev->dev, "Failed to alloc memory for "
54 "umc error address record!\n");
56 /* umc query_ras_error_address is also responsible for clearing
59 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
63 adev->umc.ras->ecc_info_query_ras_error_count)
64 adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
67 adev->umc.ras->ecc_info_query_ras_error_address &&
68 adev->umc.max_ras_err_cnt_per_query) {
70 kcalloc(adev->umc.max_ras_err_cnt_per_query,
71 sizeof(struct eeprom_table_record), GFP_KERNEL);
73 /* still call query_ras_error_address to clear error status
74 * even NOMEM error is encountered
76 if(!err_data->err_addr)
77 dev_warn(adev->dev, "Failed to alloc memory for "
78 "umc error address record!\n");
80 /* umc query_ras_error_address is also responsible for clearing
83 adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
87 /* only uncorrectable error needs gpu reset */
88 if (err_data->ue_count) {
89 dev_info(adev->dev, "%ld uncorrectable hardware errors "
90 "detected in UMC block\n",
93 if ((amdgpu_bad_page_threshold != 0) &&
94 err_data->err_addr_cnt) {
95 amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
96 err_data->err_addr_cnt);
97 amdgpu_ras_save_bad_pages(adev);
99 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
101 if (con->update_channel_flag == true) {
102 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
103 con->update_channel_flag = false;
108 amdgpu_ras_reset_gpu(adev);
111 kfree(err_data->err_addr);
112 return AMDGPU_RAS_SUCCESS;
115 int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
116 void *ras_error_status,
120 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
121 struct ras_common_if head = {
122 .block = AMDGPU_RAS_BLOCK__UMC,
124 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
127 amdgpu_umc_do_page_retirement(adev, ras_error_status, NULL, reset);
129 if (ret == AMDGPU_RAS_SUCCESS && obj) {
130 obj->err_data.ue_count += err_data->ue_count;
131 obj->err_data.ce_count += err_data->ce_count;
137 int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
138 void *ras_error_status,
139 struct amdgpu_iv_entry *entry)
141 return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
144 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
148 r = amdgpu_ras_block_late_init(adev, ras_block);
152 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
153 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
158 /* ras init of specific umc version */
160 adev->umc.ras->err_cnt_init)
161 adev->umc.ras->err_cnt_init(adev);
166 amdgpu_ras_block_late_fini(adev, ras_block);
170 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
171 struct amdgpu_irq_src *source,
172 struct amdgpu_iv_entry *entry)
174 struct ras_common_if *ras_if = adev->umc.ras_if;
175 struct ras_dispatch_if ih_data = {
182 ih_data.head = *ras_if;
184 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
188 void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
190 uint64_t retired_page,
191 uint32_t channel_index,
194 struct eeprom_table_record *err_rec =
195 &err_data->err_addr[err_data->err_addr_cnt];
197 err_rec->address = err_addr;
198 /* page frame address is saved */
199 err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
200 err_rec->ts = (uint64_t)ktime_get_real_seconds();
201 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
203 err_rec->mem_channel = channel_index;
204 err_rec->mcumc_id = umc_inst;
206 err_data->err_addr_cnt++;