2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_ras.h"
29 #include "df/df_3_6_offset.h"
30 #include "xgmi/xgmi_4_0_0_smn.h"
31 #include "xgmi/xgmi_4_0_0_sh_mask.h"
32 #include "xgmi/xgmi_6_1_0_sh_mask.h"
33 #include "wafl/wafl2_4_0_0_smn.h"
34 #include "wafl/wafl2_4_0_0_sh_mask.h"
36 #include "amdgpu_reset.h"
38 #define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
39 #define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218
40 #define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
41 #define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
43 static DEFINE_MUTEX(xgmi_mutex);
45 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
47 static LIST_HEAD(xgmi_hive_list);
49 static const int xgmi_pcs_err_status_reg_vg20[] = {
50 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
51 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
54 static const int wafl_pcs_err_status_reg_vg20[] = {
55 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
56 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
59 static const int xgmi_pcs_err_status_reg_arct[] = {
60 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
61 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
62 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
63 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
64 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
65 smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
69 static const int wafl_pcs_err_status_reg_arct[] = {
70 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
71 smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
74 static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
75 smnPCS_XGMI3X16_PCS_ERROR_STATUS,
76 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
77 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
78 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
79 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
80 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
81 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
82 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
85 static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
86 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
87 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
88 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
89 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
90 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
91 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
92 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
93 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
96 static const int walf_pcs_err_status_reg_aldebaran[] = {
97 smnPCS_GOPX1_PCS_ERROR_STATUS,
98 smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
101 static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
102 smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
103 smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
106 static const int xgmi3x16_pcs_err_status_reg_v6_4[] = {
107 smnPCS_XGMI3X16_PCS_ERROR_STATUS,
108 smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000
111 static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = {
112 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
113 smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
116 static const u64 xgmi_v6_4_0_mca_base_array[] = {
121 static const char *xgmi_v6_4_0_ras_error_code_ext[32] = {
122 [0x00] = "XGMI PCS DataLossErr",
123 [0x01] = "XGMI PCS TrainingErr",
124 [0x02] = "XGMI PCS FlowCtrlAckErr",
125 [0x03] = "XGMI PCS RxFifoUnderflowErr",
126 [0x04] = "XGMI PCS RxFifoOverflowErr",
127 [0x05] = "XGMI PCS CRCErr",
128 [0x06] = "XGMI PCS BERExceededErr",
129 [0x07] = "XGMI PCS TxMetaDataErr",
130 [0x08] = "XGMI PCS ReplayBufParityErr",
131 [0x09] = "XGMI PCS DataParityErr",
132 [0x0a] = "XGMI PCS ReplayFifoOverflowErr",
133 [0x0b] = "XGMI PCS ReplayFifoUnderflowErr",
134 [0x0c] = "XGMI PCS ElasticFifoOverflowErr",
135 [0x0d] = "XGMI PCS DeskewErr",
136 [0x0e] = "XGMI PCS FlowCtrlCRCErr",
137 [0x0f] = "XGMI PCS DataStartupLimitErr",
138 [0x10] = "XGMI PCS FCInitTimeoutErr",
139 [0x11] = "XGMI PCS RecoveryTimeoutErr",
140 [0x12] = "XGMI PCS ReadySerialTimeoutErr",
141 [0x13] = "XGMI PCS ReadySerialAttemptErr",
142 [0x14] = "XGMI PCS RecoveryAttemptErr",
143 [0x15] = "XGMI PCS RecoveryRelockAttemptErr",
144 [0x16] = "XGMI PCS ReplayAttemptErr",
145 [0x17] = "XGMI PCS SyncHdrErr",
146 [0x18] = "XGMI PCS TxReplayTimeoutErr",
147 [0x19] = "XGMI PCS RxReplayTimeoutErr",
148 [0x1a] = "XGMI PCS LinkSubTxTimeoutErr",
149 [0x1b] = "XGMI PCS LinkSubRxTimeoutErr",
150 [0x1c] = "XGMI PCS RxCMDPktErr",
153 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
154 {"XGMI PCS DataLossErr",
155 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
156 {"XGMI PCS TrainingErr",
157 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
159 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
160 {"XGMI PCS BERExceededErr",
161 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
162 {"XGMI PCS TxMetaDataErr",
163 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
164 {"XGMI PCS ReplayBufParityErr",
165 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
166 {"XGMI PCS DataParityErr",
167 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
168 {"XGMI PCS ReplayFifoOverflowErr",
169 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
170 {"XGMI PCS ReplayFifoUnderflowErr",
171 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
172 {"XGMI PCS ElasticFifoOverflowErr",
173 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
174 {"XGMI PCS DeskewErr",
175 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
176 {"XGMI PCS DataStartupLimitErr",
177 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
178 {"XGMI PCS FCInitTimeoutErr",
179 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
180 {"XGMI PCS RecoveryTimeoutErr",
181 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
182 {"XGMI PCS ReadySerialTimeoutErr",
183 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
184 {"XGMI PCS ReadySerialAttemptErr",
185 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
186 {"XGMI PCS RecoveryAttemptErr",
187 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
188 {"XGMI PCS RecoveryRelockAttemptErr",
189 SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
192 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
193 {"WAFL PCS DataLossErr",
194 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
195 {"WAFL PCS TrainingErr",
196 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
198 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
199 {"WAFL PCS BERExceededErr",
200 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
201 {"WAFL PCS TxMetaDataErr",
202 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
203 {"WAFL PCS ReplayBufParityErr",
204 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
205 {"WAFL PCS DataParityErr",
206 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
207 {"WAFL PCS ReplayFifoOverflowErr",
208 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
209 {"WAFL PCS ReplayFifoUnderflowErr",
210 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
211 {"WAFL PCS ElasticFifoOverflowErr",
212 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
213 {"WAFL PCS DeskewErr",
214 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
215 {"WAFL PCS DataStartupLimitErr",
216 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
217 {"WAFL PCS FCInitTimeoutErr",
218 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
219 {"WAFL PCS RecoveryTimeoutErr",
220 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
221 {"WAFL PCS ReadySerialTimeoutErr",
222 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
223 {"WAFL PCS ReadySerialAttemptErr",
224 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
225 {"WAFL PCS RecoveryAttemptErr",
226 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
227 {"WAFL PCS RecoveryRelockAttemptErr",
228 SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
231 static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
232 {"XGMI3X16 PCS DataLossErr",
233 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
234 {"XGMI3X16 PCS TrainingErr",
235 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
236 {"XGMI3X16 PCS FlowCtrlAckErr",
237 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
238 {"XGMI3X16 PCS RxFifoUnderflowErr",
239 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
240 {"XGMI3X16 PCS RxFifoOverflowErr",
241 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
242 {"XGMI3X16 PCS CRCErr",
243 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
244 {"XGMI3X16 PCS BERExceededErr",
245 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
246 {"XGMI3X16 PCS TxVcidDataErr",
247 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
248 {"XGMI3X16 PCS ReplayBufParityErr",
249 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
250 {"XGMI3X16 PCS DataParityErr",
251 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
252 {"XGMI3X16 PCS ReplayFifoOverflowErr",
253 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
254 {"XGMI3X16 PCS ReplayFifoUnderflowErr",
255 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
256 {"XGMI3X16 PCS ElasticFifoOverflowErr",
257 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
258 {"XGMI3X16 PCS DeskewErr",
259 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
260 {"XGMI3X16 PCS FlowCtrlCRCErr",
261 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
262 {"XGMI3X16 PCS DataStartupLimitErr",
263 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
264 {"XGMI3X16 PCS FCInitTimeoutErr",
265 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
266 {"XGMI3X16 PCS RecoveryTimeoutErr",
267 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
268 {"XGMI3X16 PCS ReadySerialTimeoutErr",
269 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
270 {"XGMI3X16 PCS ReadySerialAttemptErr",
271 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
272 {"XGMI3X16 PCS RecoveryAttemptErr",
273 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
274 {"XGMI3X16 PCS RecoveryRelockAttemptErr",
275 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
276 {"XGMI3X16 PCS ReplayAttemptErr",
277 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
278 {"XGMI3X16 PCS SyncHdrErr",
279 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
280 {"XGMI3X16 PCS TxReplayTimeoutErr",
281 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
282 {"XGMI3X16 PCS RxReplayTimeoutErr",
283 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
284 {"XGMI3X16 PCS LinkSubTxTimeoutErr",
285 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
286 {"XGMI3X16 PCS LinkSubRxTimeoutErr",
287 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
288 {"XGMI3X16 PCS RxCMDPktErr",
289 SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
293 * DOC: AMDGPU XGMI Support
295 * XGMI is a high speed interconnect that joins multiple GPU cards
296 * into a homogeneous memory space that is organized by a collective
297 * hive ID and individual node IDs, both of which are 64-bit numbers.
299 * The file xgmi_device_id contains the unique per GPU device ID and
300 * is stored in the /sys/class/drm/card${cardno}/device/ directory.
302 * Inside the device directory a sub-directory 'xgmi_hive_info' is
303 * created which contains the hive ID and the list of nodes.
305 * The hive ID is stored in:
306 * /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
308 * The node information is stored in numbered directories:
309 * /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
311 * Each device has their own xgmi_hive_info direction with a mirror
312 * set of node sub-directories.
314 * The XGMI memory space is built by contiguously adding the power of
315 * two padded VRAM space from each node to each other.
319 static struct attribute amdgpu_xgmi_hive_id = {
320 .name = "xgmi_hive_id",
324 static struct attribute *amdgpu_xgmi_hive_attrs[] = {
325 &amdgpu_xgmi_hive_id,
328 ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
330 static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
331 struct attribute *attr, char *buf)
333 struct amdgpu_hive_info *hive = container_of(
334 kobj, struct amdgpu_hive_info, kobj);
336 if (attr == &amdgpu_xgmi_hive_id)
337 return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
342 static void amdgpu_xgmi_hive_release(struct kobject *kobj)
344 struct amdgpu_hive_info *hive = container_of(
345 kobj, struct amdgpu_hive_info, kobj);
347 amdgpu_reset_put_reset_domain(hive->reset_domain);
348 hive->reset_domain = NULL;
350 mutex_destroy(&hive->hive_lock);
354 static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
355 .show = amdgpu_xgmi_show_attrs,
358 static const struct kobj_type amdgpu_xgmi_hive_type = {
359 .release = amdgpu_xgmi_hive_release,
360 .sysfs_ops = &amdgpu_xgmi_hive_ops,
361 .default_groups = amdgpu_xgmi_hive_groups,
364 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
365 struct device_attribute *attr,
368 struct drm_device *ddev = dev_get_drvdata(dev);
369 struct amdgpu_device *adev = drm_to_adev(ddev);
371 return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
375 static ssize_t amdgpu_xgmi_show_physical_id(struct device *dev,
376 struct device_attribute *attr,
379 struct drm_device *ddev = dev_get_drvdata(dev);
380 struct amdgpu_device *adev = drm_to_adev(ddev);
382 return sysfs_emit(buf, "%u\n", adev->gmc.xgmi.physical_node_id);
386 static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev,
387 struct device_attribute *attr,
390 struct drm_device *ddev = dev_get_drvdata(dev);
391 struct amdgpu_device *adev = drm_to_adev(ddev);
392 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
395 for (i = 0; i < top->num_nodes; i++)
396 sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops);
398 return sysfs_emit(buf, "%s\n", buf);
401 static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
402 struct device_attribute *attr,
405 struct drm_device *ddev = dev_get_drvdata(dev);
406 struct amdgpu_device *adev = drm_to_adev(ddev);
407 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
410 for (i = 0; i < top->num_nodes; i++)
411 sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links);
413 return sysfs_emit(buf, "%s\n", buf);
416 #define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
417 static ssize_t amdgpu_xgmi_show_error(struct device *dev,
418 struct device_attribute *attr,
421 struct drm_device *ddev = dev_get_drvdata(dev);
422 struct amdgpu_device *adev = drm_to_adev(ddev);
423 uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
425 unsigned int error_count = 0;
427 ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
428 ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
430 if ((!adev->df.funcs) ||
431 (!adev->df.funcs->get_fica) ||
432 (!adev->df.funcs->set_fica))
435 fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
436 if (fica_out != 0x1f)
437 pr_err("xGMI error counters not enabled!\n");
439 fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
441 if ((fica_out & 0xffff) == 2)
442 error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
444 adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
446 return sysfs_emit(buf, "%u\n", error_count);
450 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
451 static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL);
452 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
453 static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
454 static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
456 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
457 struct amdgpu_hive_info *hive)
460 char node[10] = { 0 };
462 /* Create xgmi device id file */
463 ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
465 dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
469 ret = device_create_file(adev->dev, &dev_attr_xgmi_physical_id);
471 dev_err(adev->dev, "XGMI: Failed to create device file xgmi_physical_id\n");
475 /* Create xgmi error file */
476 ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
478 pr_err("failed to create xgmi_error\n");
480 /* Create xgmi num hops file */
481 ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops);
483 pr_err("failed to create xgmi_num_hops\n");
485 /* Create xgmi num links file */
486 ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links);
488 pr_err("failed to create xgmi_num_links\n");
490 /* Create sysfs link to hive info folder on the first device */
491 if (hive->kobj.parent != (&adev->dev->kobj)) {
492 ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
495 dev_err(adev->dev, "XGMI: Failed to create link to hive info");
500 sprintf(node, "node%d", atomic_read(&hive->number_devices));
501 /* Create sysfs link form the hive folder to yourself */
502 ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
504 dev_err(adev->dev, "XGMI: Failed to create link from hive info");
512 sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
515 device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
516 device_remove_file(adev->dev, &dev_attr_xgmi_physical_id);
517 device_remove_file(adev->dev, &dev_attr_xgmi_error);
518 device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
519 device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
525 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
526 struct amdgpu_hive_info *hive)
529 memset(node, 0, sizeof(node));
531 device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
532 device_remove_file(adev->dev, &dev_attr_xgmi_physical_id);
533 device_remove_file(adev->dev, &dev_attr_xgmi_error);
534 device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
535 device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
537 if (hive->kobj.parent != (&adev->dev->kobj))
538 sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
540 sprintf(node, "node%d", atomic_read(&hive->number_devices));
541 sysfs_remove_link(&hive->kobj, node);
547 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
549 struct amdgpu_hive_info *hive = NULL;
552 if (!adev->gmc.xgmi.hive_id)
556 kobject_get(&adev->hive->kobj);
560 mutex_lock(&xgmi_mutex);
562 list_for_each_entry(hive, &xgmi_hive_list, node) {
563 if (hive->hive_id == adev->gmc.xgmi.hive_id)
567 hive = kzalloc(sizeof(*hive), GFP_KERNEL);
569 dev_err(adev->dev, "XGMI: allocation failed\n");
575 /* initialize new hive if not exist */
576 ret = kobject_init_and_add(&hive->kobj,
577 &amdgpu_xgmi_hive_type,
579 "%s", "xgmi_hive_info");
581 dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
582 kobject_put(&hive->kobj);
588 * Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
589 * Host driver decide how to reset the GPU either through FLR or chain reset.
590 * Guest side will get individual notifications from the host for the FLR
593 if (!amdgpu_sriov_vf(adev)) {
595 * Avoid recreating reset domain when hive is reconstructed for the case
596 * of reset the devices in the XGMI hive during probe for passthrough GPU
597 * See https://www.spinics.net/lists/amd-gfx/msg58836.html
599 if (adev->reset_domain->type != XGMI_HIVE) {
601 amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
602 if (!hive->reset_domain) {
603 dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
605 kobject_put(&hive->kobj);
610 amdgpu_reset_get_reset_domain(adev->reset_domain);
611 hive->reset_domain = adev->reset_domain;
615 hive->hive_id = adev->gmc.xgmi.hive_id;
616 INIT_LIST_HEAD(&hive->device_list);
617 INIT_LIST_HEAD(&hive->node);
618 mutex_init(&hive->hive_lock);
619 atomic_set(&hive->number_devices, 0);
620 task_barrier_init(&hive->tb);
621 hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
622 hive->hi_req_gpu = NULL;
625 * hive pstate on boot is high in vega20 so we have to go to low
626 * pstate on after boot.
628 hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
629 list_add_tail(&hive->node, &xgmi_hive_list);
633 kobject_get(&hive->kobj);
634 mutex_unlock(&xgmi_mutex);
638 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
641 kobject_put(&hive->kobj);
644 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
647 struct amdgpu_hive_info *hive;
648 struct amdgpu_device *request_adev;
649 bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
652 hive = amdgpu_get_xgmi_hive(adev);
656 request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
657 init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
658 amdgpu_put_xgmi_hive(hive);
659 /* fw bug so temporarily disable pstate switching */
662 if (!hive || adev->asic_type != CHIP_VEGA20)
665 mutex_lock(&hive->hive_lock);
668 hive->hi_req_count++;
670 hive->hi_req_count--;
673 * Vega20 only needs single peer to request pstate high for the hive to
674 * go high but all peers must request pstate low for the hive to go low
676 if (hive->pstate == pstate ||
677 (!is_hi_req && hive->hi_req_count && !init_low))
680 dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
682 ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
684 dev_err(request_adev->dev,
685 "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
686 request_adev->gmc.xgmi.node_id,
687 request_adev->gmc.xgmi.hive_id, ret);
692 hive->pstate = hive->hi_req_count ?
693 hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
695 hive->pstate = pstate;
696 hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
700 mutex_unlock(&hive->hive_lock);
704 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
708 if (amdgpu_sriov_vf(adev))
711 /* Each psp need to set the latest topology */
712 ret = psp_xgmi_set_topology_info(&adev->psp,
713 atomic_read(&hive->number_devices),
714 &adev->psp.xgmi_context.top_info);
717 "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
718 adev->gmc.xgmi.node_id,
719 adev->gmc.xgmi.hive_id, ret);
726 * NOTE psp_xgmi_node_info.num_hops layout is as follows:
727 * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
728 * num_hops[5:3] = reserved
729 * num_hops[2:0] = number of hops
731 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
732 struct amdgpu_device *peer_adev)
734 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
735 uint8_t num_hops_mask = 0x7;
738 for (i = 0 ; i < top->num_nodes; ++i)
739 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
740 return top->nodes[i].num_hops & num_hops_mask;
744 int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
745 struct amdgpu_device *peer_adev)
747 struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
750 for (i = 0 ; i < top->num_nodes; ++i)
751 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
752 return top->nodes[i].num_links;
757 * Devices that support extended data require the entire hive to initialize with
758 * the shared memory buffer flag set.
760 * Hive locks and conditions apply - see amdgpu_xgmi_add_device
762 static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
763 bool set_extended_data)
765 struct amdgpu_device *tmp_adev;
768 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
769 ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
771 dev_err(tmp_adev->dev,
772 "XGMI: Failed to initialize xgmi session for data partition %i\n",
782 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
784 struct psp_xgmi_topology_info *top_info;
785 struct amdgpu_hive_info *hive;
786 struct amdgpu_xgmi *entry;
787 struct amdgpu_device *tmp_adev = NULL;
789 int count = 0, ret = 0;
791 if (!adev->gmc.xgmi.supported)
794 if (!adev->gmc.xgmi.pending_reset &&
795 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
796 ret = psp_xgmi_initialize(&adev->psp, false, true);
799 "XGMI: Failed to initialize xgmi session\n");
803 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
806 "XGMI: Failed to get hive id\n");
810 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
813 "XGMI: Failed to get node id\n");
817 adev->gmc.xgmi.hive_id = 16;
818 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
821 hive = amdgpu_get_xgmi_hive(adev);
825 "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
826 adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
829 mutex_lock(&hive->hive_lock);
831 top_info = &adev->psp.xgmi_context.top_info;
833 list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
834 list_for_each_entry(entry, &hive->device_list, head)
835 top_info->nodes[count++].node_id = entry->node_id;
836 top_info->num_nodes = count;
837 atomic_set(&hive->number_devices, count);
839 task_barrier_add_task(&hive->tb);
841 if (!adev->gmc.xgmi.pending_reset &&
842 amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
843 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
844 /* update node list for other device in the hive */
845 if (tmp_adev != adev) {
846 top_info = &tmp_adev->psp.xgmi_context.top_info;
847 top_info->nodes[count - 1].node_id =
848 adev->gmc.xgmi.node_id;
849 top_info->num_nodes = count;
851 ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
856 /* get latest topology info for each device from psp */
857 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
858 ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
859 &tmp_adev->psp.xgmi_context.top_info, false);
861 dev_err(tmp_adev->dev,
862 "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
863 tmp_adev->gmc.xgmi.node_id,
864 tmp_adev->gmc.xgmi.hive_id, ret);
865 /* To do : continue with some node failed or disable the whole hive */
870 /* get topology again for hives that support extended data */
871 if (adev->psp.xgmi_context.supports_extended_data) {
873 /* initialize the hive to get extended data. */
874 ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
878 /* get the extended data. */
879 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
880 ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
881 &tmp_adev->psp.xgmi_context.top_info, true);
883 dev_err(tmp_adev->dev,
884 "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
885 tmp_adev->gmc.xgmi.node_id,
886 tmp_adev->gmc.xgmi.hive_id, ret);
891 /* initialize the hive to get non-extended data for the next round. */
892 ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
899 if (!ret && !adev->gmc.xgmi.pending_reset)
900 ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
903 mutex_unlock(&hive->hive_lock);
907 dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
908 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
910 amdgpu_put_xgmi_hive(hive);
911 dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
912 adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
919 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
921 struct amdgpu_hive_info *hive = adev->hive;
923 if (!adev->gmc.xgmi.supported)
929 mutex_lock(&hive->hive_lock);
930 task_barrier_rem_task(&hive->tb);
931 amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
932 if (hive->hi_req_gpu == adev)
933 hive->hi_req_gpu = NULL;
934 list_del(&adev->gmc.xgmi.head);
935 mutex_unlock(&hive->hive_lock);
937 amdgpu_put_xgmi_hive(hive);
940 if (atomic_dec_return(&hive->number_devices) == 0) {
941 /* Remove the hive from global hive list */
942 mutex_lock(&xgmi_mutex);
943 list_del(&hive->node);
944 mutex_unlock(&xgmi_mutex);
946 amdgpu_put_xgmi_hive(hive);
952 static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
954 if (!adev->gmc.xgmi.supported ||
955 adev->gmc.xgmi.num_physical_nodes == 0)
958 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
960 return amdgpu_ras_block_late_init(adev, ras_block);
963 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
966 struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
967 return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
970 static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
972 WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
973 WREG32_PCIE(pcs_status_reg, 0);
976 static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
980 switch (adev->asic_type) {
982 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
983 pcs_clear_status(adev,
984 xgmi_pcs_err_status_reg_arct[i]);
987 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
988 pcs_clear_status(adev,
989 xgmi_pcs_err_status_reg_vg20[i]);
992 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
993 pcs_clear_status(adev,
994 xgmi3x16_pcs_err_status_reg_aldebaran[i]);
995 for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
996 pcs_clear_status(adev,
997 walf_pcs_err_status_reg_aldebaran[i]);
1003 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1004 case IP_VERSION(6, 4, 0):
1005 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
1006 pcs_clear_status(adev,
1007 xgmi3x16_pcs_err_status_reg_v6_4[i]);
1014 static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
1016 WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
1019 static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
1023 for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
1024 __xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]);
1027 static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev)
1031 for_each_inst(i, adev->aid_mask)
1032 xgmi_v6_4_0_reset_error_count(adev, i);
1035 static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
1037 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1038 case IP_VERSION(6, 4, 0):
1039 xgmi_v6_4_0_reset_ras_error_count(adev);
1042 amdgpu_xgmi_legacy_reset_ras_error_count(adev);
1047 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
1049 uint32_t mask_value,
1057 const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
1058 uint32_t field_array_size = 0;
1061 if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
1062 IP_VERSION(6, 1, 0) ||
1063 amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
1064 IP_VERSION(6, 4, 0)) {
1065 pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
1066 field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
1068 pcs_ras_fields = &xgmi_pcs_ras_fields[0];
1069 field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
1072 pcs_ras_fields = &wafl_pcs_ras_fields[0];
1073 field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
1077 value = value & ~mask_value;
1079 /* query xgmi/walf pcs error status,
1080 * only ue is supported */
1081 for (i = 0; value && i < field_array_size; i++) {
1083 pcs_ras_fields[i].pcs_err_mask) >>
1084 pcs_ras_fields[i].pcs_err_shift;
1086 dev_info(adev->dev, "%s detected\n",
1087 pcs_ras_fields[i].err_name);
1088 *ue_count += ue_cnt;
1091 /* reset bit value if the bit is checked */
1092 value &= ~(pcs_ras_fields[i].pcs_err_mask);
1098 static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
1099 void *ras_error_status)
1101 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1102 int i, supported = 1;
1103 uint32_t data, mask_data = 0;
1104 uint32_t ue_cnt = 0, ce_cnt = 0;
1106 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
1109 err_data->ue_count = 0;
1110 err_data->ce_count = 0;
1112 switch (adev->asic_type) {
1114 /* check xgmi pcs error */
1115 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
1116 data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
1118 amdgpu_xgmi_query_pcs_error_status(adev, data,
1119 mask_data, &ue_cnt, &ce_cnt, true, false);
1121 /* check wafl pcs error */
1122 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
1123 data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
1125 amdgpu_xgmi_query_pcs_error_status(adev, data,
1126 mask_data, &ue_cnt, &ce_cnt, false, false);
1130 /* check xgmi pcs error */
1131 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
1132 data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
1134 amdgpu_xgmi_query_pcs_error_status(adev, data,
1135 mask_data, &ue_cnt, &ce_cnt, true, false);
1137 /* check wafl pcs error */
1138 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
1139 data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
1141 amdgpu_xgmi_query_pcs_error_status(adev, data,
1142 mask_data, &ue_cnt, &ce_cnt, false, false);
1145 case CHIP_ALDEBARAN:
1146 /* check xgmi3x16 pcs error */
1147 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
1148 data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
1150 RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
1152 amdgpu_xgmi_query_pcs_error_status(adev, data,
1153 mask_data, &ue_cnt, &ce_cnt, true, true);
1155 /* check wafl pcs error */
1156 for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
1157 data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
1159 RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
1161 amdgpu_xgmi_query_pcs_error_status(adev, data,
1162 mask_data, &ue_cnt, &ce_cnt, false, true);
1170 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1171 case IP_VERSION(6, 4, 0):
1172 /* check xgmi3x16 pcs error */
1173 for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
1174 data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
1176 RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]);
1178 amdgpu_xgmi_query_pcs_error_status(adev, data,
1179 mask_data, &ue_cnt, &ce_cnt, true, true);
1184 dev_warn(adev->dev, "XGMI RAS error query not supported");
1188 amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
1190 err_data->ue_count += ue_cnt;
1191 err_data->ce_count += ce_cnt;
1194 static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
1196 const char *error_str;
1199 ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status);
1201 error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
1202 xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
1204 dev_info(adev->dev, "%s detected\n", error_str);
1206 switch (ext_error_code) {
1208 return AMDGPU_MCA_ERROR_TYPE_UE;
1210 return AMDGPU_MCA_ERROR_TYPE_CE;
1218 static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info,
1219 u64 mca_base, struct ras_err_data *err_data)
1221 int xgmi_inst = mcm_info->die_id;
1224 status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS);
1225 if (!MCA_REG__STATUS__VAL(status))
1228 switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
1229 case AMDGPU_MCA_ERROR_TYPE_UE:
1230 amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
1232 case AMDGPU_MCA_ERROR_TYPE_CE:
1233 amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
1239 WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
1242 static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
1244 struct amdgpu_smuio_mcm_config_info mcm_info = {
1245 .socket_id = adev->smuio.funcs->get_socket_id(adev),
1246 .die_id = xgmi_inst,
1250 for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
1251 __xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data);
1254 static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
1256 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
1259 for_each_inst(i, adev->aid_mask)
1260 xgmi_v6_4_0_query_error_count(adev, i, err_data);
1263 static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
1264 void *ras_error_status)
1266 switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
1267 case IP_VERSION(6, 4, 0):
1268 xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
1271 amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status);
1276 /* Trigger XGMI/WAFL error */
1277 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
1278 void *inject_if, uint32_t instance_mask)
1281 struct ta_ras_trigger_error_input *block_info =
1282 (struct ta_ras_trigger_error_input *)inject_if;
1284 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
1285 dev_warn(adev->dev, "Failed to disallow df cstate");
1287 ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DISALLOW);
1288 if (ret1 && ret1 != -EOPNOTSUPP)
1289 dev_warn(adev->dev, "Failed to disallow XGMI power down");
1291 ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
1293 if (amdgpu_ras_intr_triggered())
1296 ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DEFAULT);
1297 if (ret1 && ret1 != -EOPNOTSUPP)
1298 dev_warn(adev->dev, "Failed to allow XGMI power down");
1300 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
1301 dev_warn(adev->dev, "Failed to allow df cstate");
1306 struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
1307 .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
1308 .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
1309 .ras_error_inject = amdgpu_ras_error_inject_xgmi,
1312 struct amdgpu_xgmi_ras xgmi_ras = {
1314 .hw_ops = &xgmi_ras_hw_ops,
1315 .ras_late_init = amdgpu_xgmi_ras_late_init,
1319 int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
1322 struct amdgpu_xgmi_ras *ras;
1324 if (!adev->gmc.xgmi.ras)
1327 ras = adev->gmc.xgmi.ras;
1328 err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
1330 dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
1334 strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl");
1335 ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
1336 ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
1337 adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;