2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
28 static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
29 struct ras_err_data *err_data, uint64_t err_addr,
30 uint32_t ch_inst, uint32_t umc_inst)
32 switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
33 case IP_VERSION(6, 7, 0):
34 umc_v6_7_convert_error_address(adev,
35 err_data, err_addr, ch_inst, umc_inst);
39 "UMC address to Physical address translation is not supported\n");
40 return AMDGPU_RAS_FAIL;
43 return AMDGPU_RAS_SUCCESS;
46 int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
47 uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
49 struct ras_err_data err_data;
52 ret = amdgpu_ras_error_data_init(&err_data);
57 kcalloc(adev->umc.max_ras_err_cnt_per_query,
58 sizeof(struct eeprom_table_record), GFP_KERNEL);
59 if (!err_data.err_addr) {
61 "Failed to alloc memory for umc error record in MCA notifier!\n");
62 ret = AMDGPU_RAS_FAIL;
63 goto out_fini_err_data;
67 * Translate UMC channel address to Physical address
69 ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
72 goto out_free_err_addr;
74 if (amdgpu_bad_page_threshold != 0) {
75 amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
76 err_data.err_addr_cnt);
77 amdgpu_ras_save_bad_pages(adev, NULL);
81 kfree(err_data.err_addr);
84 amdgpu_ras_error_data_fini(&err_data);
89 static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
90 void *ras_error_status)
92 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
93 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
94 unsigned int error_query_mode;
96 unsigned long err_count;
98 amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
100 mutex_lock(&con->page_retirement_lock);
101 ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
102 if (ret == -EOPNOTSUPP &&
103 error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
104 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
105 adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
106 adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
108 if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
109 adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
110 adev->umc.max_ras_err_cnt_per_query) {
112 kcalloc(adev->umc.max_ras_err_cnt_per_query,
113 sizeof(struct eeprom_table_record), GFP_KERNEL);
115 /* still call query_ras_error_address to clear error status
116 * even NOMEM error is encountered
118 if(!err_data->err_addr)
119 dev_warn(adev->dev, "Failed to alloc memory for "
120 "umc error address record!\n");
122 /* umc query_ras_error_address is also responsible for clearing
125 adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
127 } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
128 (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
130 adev->umc.ras->ecc_info_query_ras_error_count)
131 adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
134 adev->umc.ras->ecc_info_query_ras_error_address &&
135 adev->umc.max_ras_err_cnt_per_query) {
137 kcalloc(adev->umc.max_ras_err_cnt_per_query,
138 sizeof(struct eeprom_table_record), GFP_KERNEL);
140 /* still call query_ras_error_address to clear error status
141 * even NOMEM error is encountered
143 if(!err_data->err_addr)
144 dev_warn(adev->dev, "Failed to alloc memory for "
145 "umc error address record!\n");
147 /* umc query_ras_error_address is also responsible for clearing
150 adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
154 /* only uncorrectable error needs gpu reset */
155 if (err_data->ue_count || err_data->de_count) {
156 err_count = err_data->ue_count + err_data->de_count;
157 if ((amdgpu_bad_page_threshold != 0) &&
158 err_data->err_addr_cnt) {
159 amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
160 err_data->err_addr_cnt);
161 amdgpu_ras_save_bad_pages(adev, &err_count);
163 amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs);
165 if (con->update_channel_flag == true) {
166 amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
167 con->update_channel_flag = false;
172 kfree(err_data->err_addr);
174 mutex_unlock(&con->page_retirement_lock);
177 static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
178 void *ras_error_status,
179 struct amdgpu_iv_entry *entry,
182 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
183 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
185 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
186 amdgpu_umc_handle_bad_pages(adev, ras_error_status);
188 if (err_data->ue_count && reset) {
189 /* use mode-2 reset for poison consumption */
191 con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
192 amdgpu_ras_reset_gpu(adev);
195 return AMDGPU_RAS_SUCCESS;
198 int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev,
199 bool reset, uint32_t timeout_ms)
201 struct ras_err_data err_data;
202 struct ras_common_if head = {
203 .block = AMDGPU_RAS_BLOCK__UMC,
205 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
206 uint32_t timeout = timeout_ms;
208 memset(&err_data, 0, sizeof(err_data));
209 amdgpu_ras_error_data_init(&err_data);
213 amdgpu_umc_handle_bad_pages(adev, &err_data);
215 if (timeout && !err_data.de_count) {
220 } while (timeout && !err_data.de_count);
223 dev_warn(adev->dev, "Can't find bad pages\n");
225 if (err_data.de_count)
226 dev_info(adev->dev, "%ld new deferred hardware errors detected\n", err_data.de_count);
229 obj->err_data.ue_count += err_data.ue_count;
230 obj->err_data.ce_count += err_data.ce_count;
231 obj->err_data.de_count += err_data.de_count;
234 amdgpu_ras_error_data_fini(&err_data);
236 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
239 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
241 /* use mode-2 reset for poison consumption */
242 con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
243 amdgpu_ras_reset_gpu(adev);
249 int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
250 enum amdgpu_ras_block block, bool reset)
252 int ret = AMDGPU_RAS_SUCCESS;
254 if (adev->gmc.xgmi.connected_to_cpu ||
255 adev->gmc.is_app_apu) {
257 /* MCA poison handler is only responsible for GPU reset,
258 * let MCA notifier do page retirement.
260 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
261 amdgpu_ras_reset_gpu(adev);
266 if (!amdgpu_sriov_vf(adev)) {
267 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
268 struct ras_err_data err_data;
269 struct ras_common_if head = {
270 .block = AMDGPU_RAS_BLOCK__UMC,
272 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
274 ret = amdgpu_ras_error_data_init(&err_data);
278 ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
280 if (ret == AMDGPU_RAS_SUCCESS && obj) {
281 obj->err_data.ue_count += err_data.ue_count;
282 obj->err_data.ce_count += err_data.ce_count;
283 obj->err_data.de_count += err_data.de_count;
286 amdgpu_ras_error_data_fini(&err_data);
289 amdgpu_umc_bad_page_polling_timeout(adev,
290 reset, MAX_UMC_POISON_POLLING_TIME_SYNC);
292 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
294 atomic_inc(&con->page_retirement_req_cnt);
296 wake_up(&con->page_retirement_wq);
300 if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
301 adev->virt.ops->ras_poison_handler(adev, block);
304 "No ras_poison_handler interface in SRIOV!\n");
310 int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
311 void *ras_error_status,
312 struct amdgpu_iv_entry *entry)
314 return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true);
317 int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
320 struct amdgpu_umc_ras *ras;
327 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
329 dev_err(adev->dev, "Failed to register umc ras block!\n");
333 strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
334 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
335 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
336 adev->umc.ras_if = &ras->ras_block.ras_comm;
338 if (!ras->ras_block.ras_late_init)
339 ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
341 if (!ras->ras_block.ras_cb)
342 ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
347 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
351 r = amdgpu_ras_block_late_init(adev, ras_block);
355 if (amdgpu_ras_is_supported(adev, ras_block->block)) {
356 r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
361 /* ras init of specific umc version */
363 adev->umc.ras->err_cnt_init)
364 adev->umc.ras->err_cnt_init(adev);
369 amdgpu_ras_block_late_fini(adev, ras_block);
373 int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
374 struct amdgpu_irq_src *source,
375 struct amdgpu_iv_entry *entry)
377 struct ras_common_if *ras_if = adev->umc.ras_if;
378 struct ras_dispatch_if ih_data = {
385 ih_data.head = *ras_if;
387 amdgpu_ras_interrupt_dispatch(adev, &ih_data);
391 void amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
393 uint64_t retired_page,
394 uint32_t channel_index,
397 struct eeprom_table_record *err_rec =
398 &err_data->err_addr[err_data->err_addr_cnt];
400 err_rec->address = err_addr;
401 /* page frame address is saved */
402 err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
403 err_rec->ts = (uint64_t)ktime_get_real_seconds();
404 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
406 err_rec->mem_channel = channel_index;
407 err_rec->mcumc_id = umc_inst;
409 err_data->err_addr_cnt++;
412 int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
413 umc_func func, void *data)
415 uint32_t node_inst = 0;
416 uint32_t umc_inst = 0;
417 uint32_t ch_inst = 0;
420 if (adev->umc.node_inst_num) {
421 LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
422 ret = func(adev, node_inst, umc_inst, ch_inst, data);
424 dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
425 node_inst, umc_inst, ch_inst, ret);
430 LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
431 ret = func(adev, 0, umc_inst, ch_inst, data);
433 dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
434 umc_inst, ch_inst, ret);