]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
Merge drm/drm-next into drm-intel-gt-next
[linux.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41
42 #include "soc15_common.h"
43 #include "clearstate_gfx9.h"
44 #include "v9_structs.h"
45
46 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
47
48 #include "amdgpu_ras.h"
49
50 #include "gfx_v9_4.h"
51 #include "gfx_v9_0.h"
52
53 #include "asic_reg/pwr/pwr_10_0_offset.h"
54 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
55
56 #define GFX9_NUM_GFX_RINGS     1
57 #define GFX9_MEC_HPD_SIZE 4096
58 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
59 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
60
61 #define mmGCEA_PROBE_MAP                        0x070c
62 #define mmGCEA_PROBE_MAP_BASE_IDX               0
63
64 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
65 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
66 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
70
71 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
72 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
77
78 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
79 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
84
85 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
86 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
87 MODULE_FIRMWARE("amdgpu/raven_me.bin");
88 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
89 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
90 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
91
92 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
93 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
94 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
99
100 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
101 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
102 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
106 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
107
108 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
109 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
110 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
111
112 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
113 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
118
119 MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
120 MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
121 MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
122 MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
123 MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
124 MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
125
126 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
127 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
128 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
129 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
130 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
131 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
132 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
133 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
134 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
135 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
136 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
137 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
138
139 enum ta_ras_gfx_subblock {
140         /*CPC*/
141         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
142         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
143         TA_RAS_BLOCK__GFX_CPC_UCODE,
144         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
145         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
146         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
147         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
148         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
149         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
150         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
151         /* CPF*/
152         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
153         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
154         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
155         TA_RAS_BLOCK__GFX_CPF_TAG,
156         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
157         /* CPG*/
158         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
159         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
160         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
161         TA_RAS_BLOCK__GFX_CPG_TAG,
162         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
163         /* GDS*/
164         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
165         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
166         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
167         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
168         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
169         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
170         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
171         /* SPI*/
172         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
173         /* SQ*/
174         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
175         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
176         TA_RAS_BLOCK__GFX_SQ_LDS_D,
177         TA_RAS_BLOCK__GFX_SQ_LDS_I,
178         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
179         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
180         /* SQC (3 ranges)*/
181         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
182         /* SQC range 0*/
183         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
184         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
185                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
186         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
187         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
188         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
189         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
190         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
191         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
192         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
193                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
194         /* SQC range 1*/
195         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
196         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
197                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
198         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
199         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
200         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
201         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
202         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
203         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
204         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
205         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
206         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
207                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
208         /* SQC range 2*/
209         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
210         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
211                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
212         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
213         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
214         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
215         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
216         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
217         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
218         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
219         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
220         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
221                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
222         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
223         /* TA*/
224         TA_RAS_BLOCK__GFX_TA_INDEX_START,
225         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
226         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
227         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
228         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
229         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
230         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
231         /* TCA*/
232         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
233         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
234         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
235         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
236         /* TCC (5 sub-ranges)*/
237         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
238         /* TCC range 0*/
239         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
240         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
241         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
242         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
243         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
244         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
245         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
246         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
247         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
248         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
249         /* TCC range 1*/
250         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
251         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
252         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
253         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
254                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
255         /* TCC range 2*/
256         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
257         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
258         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
259         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
260         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
261         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
262         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
263         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
264         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
265         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
266                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
267         /* TCC range 3*/
268         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
269         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
270         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
271         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
272                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
273         /* TCC range 4*/
274         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
275         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
276                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
277         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
278         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
279                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
280         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
281         /* TCI*/
282         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
283         /* TCP*/
284         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
285         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
286         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
287         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
288         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
289         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
290         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
291         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
292         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
293         /* TD*/
294         TA_RAS_BLOCK__GFX_TD_INDEX_START,
295         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
296         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
297         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
298         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
299         /* EA (3 sub-ranges)*/
300         TA_RAS_BLOCK__GFX_EA_INDEX_START,
301         /* EA range 0*/
302         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
303         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
304         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
305         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
306         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
307         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
308         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
309         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
310         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
311         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
312         /* EA range 1*/
313         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
314         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
315         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
316         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
317         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
318         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
319         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
320         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
321         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
322         /* EA range 2*/
323         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
324         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
325         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
326         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
327         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
328         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
329         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
330         /* UTC VM L2 bank*/
331         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
332         /* UTC VM walker*/
333         TA_RAS_BLOCK__UTC_VML2_WALKER,
334         /* UTC ATC L2 2MB cache*/
335         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
336         /* UTC ATC L2 4KB cache*/
337         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
338         TA_RAS_BLOCK__GFX_MAX
339 };
340
341 struct ras_gfx_subblock {
342         unsigned char *name;
343         int ta_subblock;
344         int hw_supported_error_type;
345         int sw_supported_error_type;
346 };
347
348 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
349         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
350                 #subblock,                                                     \
351                 TA_RAS_BLOCK__##subblock,                                      \
352                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
353                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
354         }
355
356 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
357         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
358         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
359         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
360         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
361         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
362         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
363         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
364         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
365         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
367         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
368         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
369         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
370         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
371         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
372         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
373         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
374                              0),
375         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
376                              0),
377         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
378         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
380         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
381         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
382         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
384         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
385                              0, 0),
386         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
387                              0),
388         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
389                              0, 0),
390         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
391                              0),
392         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
393                              0, 0),
394         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
395                              0),
396         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
397                              1),
398         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
399                              0, 0, 0),
400         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
401                              0),
402         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
403                              0),
404         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
405                              0),
406         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
407                              0),
408         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
409                              0),
410         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
411                              0, 0),
412         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
413                              0),
414         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
415                              0),
416         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
417                              0, 0, 0),
418         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
419                              0),
420         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
421                              0),
422         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
423                              0),
424         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
425                              0),
426         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
427                              0),
428         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
429                              0, 0),
430         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
431                              0),
432         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
433         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
434         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
435         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
436         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
438         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
440         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
441                              1),
442         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
443                              1),
444         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
445                              1),
446         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
447                              0),
448         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
449                              0),
450         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
451         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
453         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
459         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
462                              0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
464         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
465                              0),
466         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
467                              0, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
469                              0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
471         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
473         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
476         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
479         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
482         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
483         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
501         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
502         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
503         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
504 };
505
506 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
507 {
508         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
509         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
510         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
511         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
525         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
526         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
527         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
528 };
529
530 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
531 {
532         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
533         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
534         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
535         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
547         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
548         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
549         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
550 };
551
552 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
553 {
554         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
555         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
556         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
557         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
558         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
559         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
560         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
562         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
563         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
564         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
565 };
566
567 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
568 {
569         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
570         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
571         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
572         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
590         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
591         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
592         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
593 };
594
595 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
596 {
597         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
598         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
599         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
600         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
601         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
602         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
603         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
604 };
605
606 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
607 {
608         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
609         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
610         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
611         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
624         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
625         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
626         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
627 };
628
629 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
630 {
631         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
632         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
633         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
634         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
636         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
637         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
642         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
643 };
644
645 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
646 {
647         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
648         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
649         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
650 };
651
652 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
653 {
654         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
657         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
667         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
668         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
669         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
670 };
671
672 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
673 {
674         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
675         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
676         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
677         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
680         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
684         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
685         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
686         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
687 };
688
689 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
690 {
691         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
692         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
693         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
694         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
695         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
696         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
697         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
698         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
699         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
700         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
701         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
702 };
703
704 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
705         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
706         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
707 };
708
709 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
710 {
711         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
712         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
713         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
714         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
715         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
716         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
717         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
718         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
719 };
720
721 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
722 {
723         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
724         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
725         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
726         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
727         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
728         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
729         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
730         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
731 };
732
733 static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
734 {
735         static void *scratch_reg0;
736         static void *scratch_reg1;
737         static void *scratch_reg2;
738         static void *scratch_reg3;
739         static void *spare_int;
740         static uint32_t grbm_cntl;
741         static uint32_t grbm_idx;
742
743         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
744         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
745         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
746         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
747         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
748
749         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
750         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
751
752         if (amdgpu_sriov_runtime(adev)) {
753                 pr_err("shouldn't call rlcg write register during runtime\n");
754                 return;
755         }
756
757         if (offset == grbm_cntl || offset == grbm_idx) {
758                 if (offset  == grbm_cntl)
759                         writel(v, scratch_reg2);
760                 else if (offset == grbm_idx)
761                         writel(v, scratch_reg3);
762
763                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
764         } else {
765                 uint32_t i = 0;
766                 uint32_t retries = 50000;
767
768                 writel(v, scratch_reg0);
769                 writel(offset | 0x80000000, scratch_reg1);
770                 writel(1, spare_int);
771                 for (i = 0; i < retries; i++) {
772                         u32 tmp;
773
774                         tmp = readl(scratch_reg1);
775                         if (!(tmp & 0x80000000))
776                                 break;
777
778                         udelay(10);
779                 }
780                 if (i >= retries)
781                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
782         }
783
784 }
785
786 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
787 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
788 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
789 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
790
791 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
792 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
793 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
794 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
795 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
796                                 struct amdgpu_cu_info *cu_info);
797 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
798 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
799 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
800 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
801                                           void *ras_error_status);
802 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
803                                      void *inject_if);
804 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
805
806 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
807                                 uint64_t queue_mask)
808 {
809         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
810         amdgpu_ring_write(kiq_ring,
811                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
812                 /* vmid_mask:0* queue_type:0 (KIQ) */
813                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
814         amdgpu_ring_write(kiq_ring,
815                         lower_32_bits(queue_mask));     /* queue mask lo */
816         amdgpu_ring_write(kiq_ring,
817                         upper_32_bits(queue_mask));     /* queue mask hi */
818         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
819         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
820         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
821         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
822 }
823
824 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
825                                  struct amdgpu_ring *ring)
826 {
827         struct amdgpu_device *adev = kiq_ring->adev;
828         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
829         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
830         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
831
832         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
833         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
834         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
835                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
836                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
837                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
838                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
839                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
840                          /*queue_type: normal compute queue */
841                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
842                          /* alloc format: all_on_one_pipe */
843                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
844                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
845                          /* num_queues: must be 1 */
846                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
847         amdgpu_ring_write(kiq_ring,
848                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
849         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
850         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
851         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
852         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
853 }
854
855 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
856                                    struct amdgpu_ring *ring,
857                                    enum amdgpu_unmap_queues_action action,
858                                    u64 gpu_addr, u64 seq)
859 {
860         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
861
862         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
863         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
864                           PACKET3_UNMAP_QUEUES_ACTION(action) |
865                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
866                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
867                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
868         amdgpu_ring_write(kiq_ring,
869                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
870
871         if (action == PREEMPT_QUEUES_NO_UNMAP) {
872                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
873                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
874                 amdgpu_ring_write(kiq_ring, seq);
875         } else {
876                 amdgpu_ring_write(kiq_ring, 0);
877                 amdgpu_ring_write(kiq_ring, 0);
878                 amdgpu_ring_write(kiq_ring, 0);
879         }
880 }
881
882 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
883                                    struct amdgpu_ring *ring,
884                                    u64 addr,
885                                    u64 seq)
886 {
887         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
888
889         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
890         amdgpu_ring_write(kiq_ring,
891                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
892                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
893                           PACKET3_QUERY_STATUS_COMMAND(2));
894         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
895         amdgpu_ring_write(kiq_ring,
896                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
897                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
898         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
899         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
900         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
901         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
902 }
903
904 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
905                                 uint16_t pasid, uint32_t flush_type,
906                                 bool all_hub)
907 {
908         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
909         amdgpu_ring_write(kiq_ring,
910                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
911                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
912                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
913                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
914 }
915
916 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
917         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
918         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
919         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
920         .kiq_query_status = gfx_v9_0_kiq_query_status,
921         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
922         .set_resources_size = 8,
923         .map_queues_size = 7,
924         .unmap_queues_size = 6,
925         .query_status_size = 7,
926         .invalidate_tlbs_size = 2,
927 };
928
929 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
930 {
931         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
932 }
933
934 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
935 {
936         switch (adev->asic_type) {
937         case CHIP_VEGA10:
938                 soc15_program_register_sequence(adev,
939                                                 golden_settings_gc_9_0,
940                                                 ARRAY_SIZE(golden_settings_gc_9_0));
941                 soc15_program_register_sequence(adev,
942                                                 golden_settings_gc_9_0_vg10,
943                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
944                 break;
945         case CHIP_VEGA12:
946                 soc15_program_register_sequence(adev,
947                                                 golden_settings_gc_9_2_1,
948                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
949                 soc15_program_register_sequence(adev,
950                                                 golden_settings_gc_9_2_1_vg12,
951                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
952                 break;
953         case CHIP_VEGA20:
954                 soc15_program_register_sequence(adev,
955                                                 golden_settings_gc_9_0,
956                                                 ARRAY_SIZE(golden_settings_gc_9_0));
957                 soc15_program_register_sequence(adev,
958                                                 golden_settings_gc_9_0_vg20,
959                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
960                 break;
961         case CHIP_ARCTURUS:
962                 soc15_program_register_sequence(adev,
963                                                 golden_settings_gc_9_4_1_arct,
964                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
965                 break;
966         case CHIP_RAVEN:
967                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
968                                                 ARRAY_SIZE(golden_settings_gc_9_1));
969                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
970                         soc15_program_register_sequence(adev,
971                                                         golden_settings_gc_9_1_rv2,
972                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
973                 else
974                         soc15_program_register_sequence(adev,
975                                                         golden_settings_gc_9_1_rv1,
976                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
977                 break;
978          case CHIP_RENOIR:
979                 soc15_program_register_sequence(adev,
980                                                 golden_settings_gc_9_1_rn,
981                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
982                 return; /* for renoir, don't need common goldensetting */
983         default:
984                 break;
985         }
986
987         if (adev->asic_type != CHIP_ARCTURUS)
988                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
989                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
990 }
991
992 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
993 {
994         adev->gfx.scratch.num_reg = 8;
995         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
996         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
997 }
998
999 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
1000                                        bool wc, uint32_t reg, uint32_t val)
1001 {
1002         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
1003         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
1004                                 WRITE_DATA_DST_SEL(0) |
1005                                 (wc ? WR_CONFIRM : 0));
1006         amdgpu_ring_write(ring, reg);
1007         amdgpu_ring_write(ring, 0);
1008         amdgpu_ring_write(ring, val);
1009 }
1010
1011 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1012                                   int mem_space, int opt, uint32_t addr0,
1013                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1014                                   uint32_t inv)
1015 {
1016         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1017         amdgpu_ring_write(ring,
1018                                  /* memory (1) or register (0) */
1019                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1020                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1021                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1022                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1023
1024         if (mem_space)
1025                 BUG_ON(addr0 & 0x3); /* Dword align */
1026         amdgpu_ring_write(ring, addr0);
1027         amdgpu_ring_write(ring, addr1);
1028         amdgpu_ring_write(ring, ref);
1029         amdgpu_ring_write(ring, mask);
1030         amdgpu_ring_write(ring, inv); /* poll interval */
1031 }
1032
1033 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1034 {
1035         struct amdgpu_device *adev = ring->adev;
1036         uint32_t scratch;
1037         uint32_t tmp = 0;
1038         unsigned i;
1039         int r;
1040
1041         r = amdgpu_gfx_scratch_get(adev, &scratch);
1042         if (r)
1043                 return r;
1044
1045         WREG32(scratch, 0xCAFEDEAD);
1046         r = amdgpu_ring_alloc(ring, 3);
1047         if (r)
1048                 goto error_free_scratch;
1049
1050         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1051         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1052         amdgpu_ring_write(ring, 0xDEADBEEF);
1053         amdgpu_ring_commit(ring);
1054
1055         for (i = 0; i < adev->usec_timeout; i++) {
1056                 tmp = RREG32(scratch);
1057                 if (tmp == 0xDEADBEEF)
1058                         break;
1059                 udelay(1);
1060         }
1061
1062         if (i >= adev->usec_timeout)
1063                 r = -ETIMEDOUT;
1064
1065 error_free_scratch:
1066         amdgpu_gfx_scratch_free(adev, scratch);
1067         return r;
1068 }
1069
1070 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1071 {
1072         struct amdgpu_device *adev = ring->adev;
1073         struct amdgpu_ib ib;
1074         struct dma_fence *f = NULL;
1075
1076         unsigned index;
1077         uint64_t gpu_addr;
1078         uint32_t tmp;
1079         long r;
1080
1081         r = amdgpu_device_wb_get(adev, &index);
1082         if (r)
1083                 return r;
1084
1085         gpu_addr = adev->wb.gpu_addr + (index * 4);
1086         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1087         memset(&ib, 0, sizeof(ib));
1088         r = amdgpu_ib_get(adev, NULL, 16,
1089                                         AMDGPU_IB_POOL_DIRECT, &ib);
1090         if (r)
1091                 goto err1;
1092
1093         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1094         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1095         ib.ptr[2] = lower_32_bits(gpu_addr);
1096         ib.ptr[3] = upper_32_bits(gpu_addr);
1097         ib.ptr[4] = 0xDEADBEEF;
1098         ib.length_dw = 5;
1099
1100         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1101         if (r)
1102                 goto err2;
1103
1104         r = dma_fence_wait_timeout(f, false, timeout);
1105         if (r == 0) {
1106                 r = -ETIMEDOUT;
1107                 goto err2;
1108         } else if (r < 0) {
1109                 goto err2;
1110         }
1111
1112         tmp = adev->wb.wb[index];
1113         if (tmp == 0xDEADBEEF)
1114                 r = 0;
1115         else
1116                 r = -EINVAL;
1117
1118 err2:
1119         amdgpu_ib_free(adev, &ib, NULL);
1120         dma_fence_put(f);
1121 err1:
1122         amdgpu_device_wb_free(adev, index);
1123         return r;
1124 }
1125
1126
1127 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1128 {
1129         release_firmware(adev->gfx.pfp_fw);
1130         adev->gfx.pfp_fw = NULL;
1131         release_firmware(adev->gfx.me_fw);
1132         adev->gfx.me_fw = NULL;
1133         release_firmware(adev->gfx.ce_fw);
1134         adev->gfx.ce_fw = NULL;
1135         release_firmware(adev->gfx.rlc_fw);
1136         adev->gfx.rlc_fw = NULL;
1137         release_firmware(adev->gfx.mec_fw);
1138         adev->gfx.mec_fw = NULL;
1139         release_firmware(adev->gfx.mec2_fw);
1140         adev->gfx.mec2_fw = NULL;
1141
1142         kfree(adev->gfx.rlc.register_list_format);
1143 }
1144
1145 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1146 {
1147         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1148
1149         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1150         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1151         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1152         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1153         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1154         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1155         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1156         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1157         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1158         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1159         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1160         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1161         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1162         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1163                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1164 }
1165
1166 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1167 {
1168         adev->gfx.me_fw_write_wait = false;
1169         adev->gfx.mec_fw_write_wait = false;
1170
1171         if ((adev->asic_type != CHIP_ARCTURUS) &&
1172             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1173             (adev->gfx.mec_feature_version < 46) ||
1174             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1175             (adev->gfx.pfp_feature_version < 46)))
1176                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1177
1178         switch (adev->asic_type) {
1179         case CHIP_VEGA10:
1180                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1181                     (adev->gfx.me_feature_version >= 42) &&
1182                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1183                     (adev->gfx.pfp_feature_version >= 42))
1184                         adev->gfx.me_fw_write_wait = true;
1185
1186                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1187                     (adev->gfx.mec_feature_version >= 42))
1188                         adev->gfx.mec_fw_write_wait = true;
1189                 break;
1190         case CHIP_VEGA12:
1191                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1192                     (adev->gfx.me_feature_version >= 44) &&
1193                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1194                     (adev->gfx.pfp_feature_version >= 44))
1195                         adev->gfx.me_fw_write_wait = true;
1196
1197                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1198                     (adev->gfx.mec_feature_version >= 44))
1199                         adev->gfx.mec_fw_write_wait = true;
1200                 break;
1201         case CHIP_VEGA20:
1202                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1203                     (adev->gfx.me_feature_version >= 44) &&
1204                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1205                     (adev->gfx.pfp_feature_version >= 44))
1206                         adev->gfx.me_fw_write_wait = true;
1207
1208                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1209                     (adev->gfx.mec_feature_version >= 44))
1210                         adev->gfx.mec_fw_write_wait = true;
1211                 break;
1212         case CHIP_RAVEN:
1213                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1214                     (adev->gfx.me_feature_version >= 42) &&
1215                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1216                     (adev->gfx.pfp_feature_version >= 42))
1217                         adev->gfx.me_fw_write_wait = true;
1218
1219                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1220                     (adev->gfx.mec_feature_version >= 42))
1221                         adev->gfx.mec_fw_write_wait = true;
1222                 break;
1223         default:
1224                 adev->gfx.me_fw_write_wait = true;
1225                 adev->gfx.mec_fw_write_wait = true;
1226                 break;
1227         }
1228 }
1229
1230 struct amdgpu_gfxoff_quirk {
1231         u16 chip_vendor;
1232         u16 chip_device;
1233         u16 subsys_vendor;
1234         u16 subsys_device;
1235         u8 revision;
1236 };
1237
1238 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1239         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1240         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1241         /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1242         { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1243         /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1244         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1245         { 0, 0, 0, 0, 0 },
1246 };
1247
1248 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1249 {
1250         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1251
1252         while (p && p->chip_device != 0) {
1253                 if (pdev->vendor == p->chip_vendor &&
1254                     pdev->device == p->chip_device &&
1255                     pdev->subsystem_vendor == p->subsys_vendor &&
1256                     pdev->subsystem_device == p->subsys_device &&
1257                     pdev->revision == p->revision) {
1258                         return true;
1259                 }
1260                 ++p;
1261         }
1262         return false;
1263 }
1264
1265 static bool is_raven_kicker(struct amdgpu_device *adev)
1266 {
1267         if (adev->pm.fw_version >= 0x41e2b)
1268                 return true;
1269         else
1270                 return false;
1271 }
1272
1273 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1274 {
1275         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1276                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1277
1278         switch (adev->asic_type) {
1279         case CHIP_VEGA10:
1280         case CHIP_VEGA12:
1281         case CHIP_VEGA20:
1282                 break;
1283         case CHIP_RAVEN:
1284                 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1285                       (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1286                     ((!is_raven_kicker(adev) &&
1287                       adev->gfx.rlc_fw_version < 531) ||
1288                      (adev->gfx.rlc_feature_version < 1) ||
1289                      !adev->gfx.rlc.is_rlc_v2_1))
1290                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1291
1292                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1293                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1294                                 AMD_PG_SUPPORT_CP |
1295                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1296                 break;
1297         case CHIP_RENOIR:
1298                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1299                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1300                                 AMD_PG_SUPPORT_CP |
1301                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1302                 break;
1303         default:
1304                 break;
1305         }
1306 }
1307
1308 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1309                                           const char *chip_name)
1310 {
1311         char fw_name[30];
1312         int err;
1313         struct amdgpu_firmware_info *info = NULL;
1314         const struct common_firmware_header *header = NULL;
1315         const struct gfx_firmware_header_v1_0 *cp_hdr;
1316
1317         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1318         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1319         if (err)
1320                 goto out;
1321         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1322         if (err)
1323                 goto out;
1324         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1325         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1326         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1327
1328         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1329         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1330         if (err)
1331                 goto out;
1332         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1333         if (err)
1334                 goto out;
1335         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1336         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1337         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1338
1339         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1340         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1341         if (err)
1342                 goto out;
1343         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1344         if (err)
1345                 goto out;
1346         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1347         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1348         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1349
1350         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1351                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1352                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1353                 info->fw = adev->gfx.pfp_fw;
1354                 header = (const struct common_firmware_header *)info->fw->data;
1355                 adev->firmware.fw_size +=
1356                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1357
1358                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1359                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1360                 info->fw = adev->gfx.me_fw;
1361                 header = (const struct common_firmware_header *)info->fw->data;
1362                 adev->firmware.fw_size +=
1363                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1364
1365                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1366                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1367                 info->fw = adev->gfx.ce_fw;
1368                 header = (const struct common_firmware_header *)info->fw->data;
1369                 adev->firmware.fw_size +=
1370                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1371         }
1372
1373 out:
1374         if (err) {
1375                 dev_err(adev->dev,
1376                         "gfx9: Failed to load firmware \"%s\"\n",
1377                         fw_name);
1378                 release_firmware(adev->gfx.pfp_fw);
1379                 adev->gfx.pfp_fw = NULL;
1380                 release_firmware(adev->gfx.me_fw);
1381                 adev->gfx.me_fw = NULL;
1382                 release_firmware(adev->gfx.ce_fw);
1383                 adev->gfx.ce_fw = NULL;
1384         }
1385         return err;
1386 }
1387
1388 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1389                                           const char *chip_name)
1390 {
1391         char fw_name[30];
1392         int err;
1393         struct amdgpu_firmware_info *info = NULL;
1394         const struct common_firmware_header *header = NULL;
1395         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1396         unsigned int *tmp = NULL;
1397         unsigned int i = 0;
1398         uint16_t version_major;
1399         uint16_t version_minor;
1400         uint32_t smu_version;
1401
1402         /*
1403          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1404          * instead of picasso_rlc.bin.
1405          * Judgment method:
1406          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1407          *          or revision >= 0xD8 && revision <= 0xDF
1408          * otherwise is PCO FP5
1409          */
1410         if (!strcmp(chip_name, "picasso") &&
1411                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1412                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1413                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1414         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1415                 (smu_version >= 0x41e2b))
1416                 /**
1417                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1418                 */
1419                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1420         else
1421                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1422         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1423         if (err)
1424                 goto out;
1425         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1426         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1427
1428         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1429         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1430         if (version_major == 2 && version_minor == 1)
1431                 adev->gfx.rlc.is_rlc_v2_1 = true;
1432
1433         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1434         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1435         adev->gfx.rlc.save_and_restore_offset =
1436                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1437         adev->gfx.rlc.clear_state_descriptor_offset =
1438                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1439         adev->gfx.rlc.avail_scratch_ram_locations =
1440                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1441         adev->gfx.rlc.reg_restore_list_size =
1442                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1443         adev->gfx.rlc.reg_list_format_start =
1444                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1445         adev->gfx.rlc.reg_list_format_separate_start =
1446                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1447         adev->gfx.rlc.starting_offsets_start =
1448                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1449         adev->gfx.rlc.reg_list_format_size_bytes =
1450                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1451         adev->gfx.rlc.reg_list_size_bytes =
1452                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1453         adev->gfx.rlc.register_list_format =
1454                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1455                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1456         if (!adev->gfx.rlc.register_list_format) {
1457                 err = -ENOMEM;
1458                 goto out;
1459         }
1460
1461         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1462                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1463         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1464                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1465
1466         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1467
1468         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1469                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1470         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1471                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1472
1473         if (adev->gfx.rlc.is_rlc_v2_1)
1474                 gfx_v9_0_init_rlc_ext_microcode(adev);
1475
1476         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1477                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1478                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1479                 info->fw = adev->gfx.rlc_fw;
1480                 header = (const struct common_firmware_header *)info->fw->data;
1481                 adev->firmware.fw_size +=
1482                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1483
1484                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1485                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1486                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1487                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1488                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1489                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1490                         info->fw = adev->gfx.rlc_fw;
1491                         adev->firmware.fw_size +=
1492                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1493
1494                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1495                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1496                         info->fw = adev->gfx.rlc_fw;
1497                         adev->firmware.fw_size +=
1498                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1499
1500                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1501                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1502                         info->fw = adev->gfx.rlc_fw;
1503                         adev->firmware.fw_size +=
1504                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1505                 }
1506         }
1507
1508 out:
1509         if (err) {
1510                 dev_err(adev->dev,
1511                         "gfx9: Failed to load firmware \"%s\"\n",
1512                         fw_name);
1513                 release_firmware(adev->gfx.rlc_fw);
1514                 adev->gfx.rlc_fw = NULL;
1515         }
1516         return err;
1517 }
1518
1519 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1520                                           const char *chip_name)
1521 {
1522         char fw_name[30];
1523         int err;
1524         struct amdgpu_firmware_info *info = NULL;
1525         const struct common_firmware_header *header = NULL;
1526         const struct gfx_firmware_header_v1_0 *cp_hdr;
1527
1528         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1529         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1530         if (err)
1531                 goto out;
1532         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1533         if (err)
1534                 goto out;
1535         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1536         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1537         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1538
1539
1540         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1541         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1542         if (!err) {
1543                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1544                 if (err)
1545                         goto out;
1546                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1547                 adev->gfx.mec2_fw->data;
1548                 adev->gfx.mec2_fw_version =
1549                 le32_to_cpu(cp_hdr->header.ucode_version);
1550                 adev->gfx.mec2_feature_version =
1551                 le32_to_cpu(cp_hdr->ucode_feature_version);
1552         } else {
1553                 err = 0;
1554                 adev->gfx.mec2_fw = NULL;
1555         }
1556
1557         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1558                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1559                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1560                 info->fw = adev->gfx.mec_fw;
1561                 header = (const struct common_firmware_header *)info->fw->data;
1562                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1563                 adev->firmware.fw_size +=
1564                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1565
1566                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1567                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1568                 info->fw = adev->gfx.mec_fw;
1569                 adev->firmware.fw_size +=
1570                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1571
1572                 if (adev->gfx.mec2_fw) {
1573                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1574                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1575                         info->fw = adev->gfx.mec2_fw;
1576                         header = (const struct common_firmware_header *)info->fw->data;
1577                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1578                         adev->firmware.fw_size +=
1579                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1580
1581                         /* TODO: Determine if MEC2 JT FW loading can be removed
1582                                  for all GFX V9 asic and above */
1583                         if (adev->asic_type != CHIP_ARCTURUS &&
1584                             adev->asic_type != CHIP_RENOIR) {
1585                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1586                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1587                                 info->fw = adev->gfx.mec2_fw;
1588                                 adev->firmware.fw_size +=
1589                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1590                                         PAGE_SIZE);
1591                         }
1592                 }
1593         }
1594
1595 out:
1596         gfx_v9_0_check_if_need_gfxoff(adev);
1597         gfx_v9_0_check_fw_write_wait(adev);
1598         if (err) {
1599                 dev_err(adev->dev,
1600                         "gfx9: Failed to load firmware \"%s\"\n",
1601                         fw_name);
1602                 release_firmware(adev->gfx.mec_fw);
1603                 adev->gfx.mec_fw = NULL;
1604                 release_firmware(adev->gfx.mec2_fw);
1605                 adev->gfx.mec2_fw = NULL;
1606         }
1607         return err;
1608 }
1609
1610 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1611 {
1612         const char *chip_name;
1613         int r;
1614
1615         DRM_DEBUG("\n");
1616
1617         switch (adev->asic_type) {
1618         case CHIP_VEGA10:
1619                 chip_name = "vega10";
1620                 break;
1621         case CHIP_VEGA12:
1622                 chip_name = "vega12";
1623                 break;
1624         case CHIP_VEGA20:
1625                 chip_name = "vega20";
1626                 break;
1627         case CHIP_RAVEN:
1628                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1629                         chip_name = "raven2";
1630                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1631                         chip_name = "picasso";
1632                 else
1633                         chip_name = "raven";
1634                 break;
1635         case CHIP_ARCTURUS:
1636                 chip_name = "arcturus";
1637                 break;
1638         case CHIP_RENOIR:
1639                 if (adev->apu_flags & AMD_APU_IS_RENOIR)
1640                         chip_name = "renoir";
1641                 else
1642                         chip_name = "green_sardine";
1643                 break;
1644         default:
1645                 BUG();
1646         }
1647
1648         /* No CPG in Arcturus */
1649         if (adev->gfx.num_gfx_rings) {
1650                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1651                 if (r)
1652                         return r;
1653         }
1654
1655         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1656         if (r)
1657                 return r;
1658
1659         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1660         if (r)
1661                 return r;
1662
1663         return r;
1664 }
1665
1666 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1667 {
1668         u32 count = 0;
1669         const struct cs_section_def *sect = NULL;
1670         const struct cs_extent_def *ext = NULL;
1671
1672         /* begin clear state */
1673         count += 2;
1674         /* context control state */
1675         count += 3;
1676
1677         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1678                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1679                         if (sect->id == SECT_CONTEXT)
1680                                 count += 2 + ext->reg_count;
1681                         else
1682                                 return 0;
1683                 }
1684         }
1685
1686         /* end clear state */
1687         count += 2;
1688         /* clear state */
1689         count += 2;
1690
1691         return count;
1692 }
1693
1694 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1695                                     volatile u32 *buffer)
1696 {
1697         u32 count = 0, i;
1698         const struct cs_section_def *sect = NULL;
1699         const struct cs_extent_def *ext = NULL;
1700
1701         if (adev->gfx.rlc.cs_data == NULL)
1702                 return;
1703         if (buffer == NULL)
1704                 return;
1705
1706         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1707         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1708
1709         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1710         buffer[count++] = cpu_to_le32(0x80000000);
1711         buffer[count++] = cpu_to_le32(0x80000000);
1712
1713         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1714                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1715                         if (sect->id == SECT_CONTEXT) {
1716                                 buffer[count++] =
1717                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1718                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1719                                                 PACKET3_SET_CONTEXT_REG_START);
1720                                 for (i = 0; i < ext->reg_count; i++)
1721                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1722                         } else {
1723                                 return;
1724                         }
1725                 }
1726         }
1727
1728         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1729         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1730
1731         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1732         buffer[count++] = cpu_to_le32(0);
1733 }
1734
1735 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1736 {
1737         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1738         uint32_t pg_always_on_cu_num = 2;
1739         uint32_t always_on_cu_num;
1740         uint32_t i, j, k;
1741         uint32_t mask, cu_bitmap, counter;
1742
1743         if (adev->flags & AMD_IS_APU)
1744                 always_on_cu_num = 4;
1745         else if (adev->asic_type == CHIP_VEGA12)
1746                 always_on_cu_num = 8;
1747         else
1748                 always_on_cu_num = 12;
1749
1750         mutex_lock(&adev->grbm_idx_mutex);
1751         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1752                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1753                         mask = 1;
1754                         cu_bitmap = 0;
1755                         counter = 0;
1756                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1757
1758                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1759                                 if (cu_info->bitmap[i][j] & mask) {
1760                                         if (counter == pg_always_on_cu_num)
1761                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1762                                         if (counter < always_on_cu_num)
1763                                                 cu_bitmap |= mask;
1764                                         else
1765                                                 break;
1766                                         counter++;
1767                                 }
1768                                 mask <<= 1;
1769                         }
1770
1771                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1772                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1773                 }
1774         }
1775         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1776         mutex_unlock(&adev->grbm_idx_mutex);
1777 }
1778
1779 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1780 {
1781         uint32_t data;
1782
1783         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1784         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1785         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1786         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1787         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1788
1789         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1790         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1791
1792         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1793         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1794
1795         mutex_lock(&adev->grbm_idx_mutex);
1796         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1797         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1798         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1799
1800         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1801         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1802         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1803         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1804         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1805
1806         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1807         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1808         data &= 0x0000FFFF;
1809         data |= 0x00C00000;
1810         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1811
1812         /*
1813          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1814          * programmed in gfx_v9_0_init_always_on_cu_mask()
1815          */
1816
1817         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1818          * but used for RLC_LB_CNTL configuration */
1819         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1820         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1821         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1822         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1823         mutex_unlock(&adev->grbm_idx_mutex);
1824
1825         gfx_v9_0_init_always_on_cu_mask(adev);
1826 }
1827
1828 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1829 {
1830         uint32_t data;
1831
1832         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1833         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1834         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1835         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1836         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1837
1838         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1839         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1840
1841         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1842         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1843
1844         mutex_lock(&adev->grbm_idx_mutex);
1845         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1846         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1847         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1848
1849         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1850         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1851         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1852         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1853         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1854
1855         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1856         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1857         data &= 0x0000FFFF;
1858         data |= 0x00C00000;
1859         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1860
1861         /*
1862          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1863          * programmed in gfx_v9_0_init_always_on_cu_mask()
1864          */
1865
1866         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1867          * but used for RLC_LB_CNTL configuration */
1868         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1869         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1870         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1871         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1872         mutex_unlock(&adev->grbm_idx_mutex);
1873
1874         gfx_v9_0_init_always_on_cu_mask(adev);
1875 }
1876
1877 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1878 {
1879         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1880 }
1881
1882 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1883 {
1884         return 5;
1885 }
1886
1887 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1888 {
1889         const struct cs_section_def *cs_data;
1890         int r;
1891
1892         adev->gfx.rlc.cs_data = gfx9_cs_data;
1893
1894         cs_data = adev->gfx.rlc.cs_data;
1895
1896         if (cs_data) {
1897                 /* init clear state block */
1898                 r = amdgpu_gfx_rlc_init_csb(adev);
1899                 if (r)
1900                         return r;
1901         }
1902
1903         if (adev->flags & AMD_IS_APU) {
1904                 /* TODO: double check the cp_table_size for RV */
1905                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1906                 r = amdgpu_gfx_rlc_init_cpt(adev);
1907                 if (r)
1908                         return r;
1909         }
1910
1911         switch (adev->asic_type) {
1912         case CHIP_RAVEN:
1913                 gfx_v9_0_init_lbpw(adev);
1914                 break;
1915         case CHIP_VEGA20:
1916                 gfx_v9_4_init_lbpw(adev);
1917                 break;
1918         default:
1919                 break;
1920         }
1921
1922         /* init spm vmid with 0xf */
1923         if (adev->gfx.rlc.funcs->update_spm_vmid)
1924                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1925
1926         return 0;
1927 }
1928
1929 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1930 {
1931         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1932         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1933 }
1934
1935 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1936 {
1937         int r;
1938         u32 *hpd;
1939         const __le32 *fw_data;
1940         unsigned fw_size;
1941         u32 *fw;
1942         size_t mec_hpd_size;
1943
1944         const struct gfx_firmware_header_v1_0 *mec_hdr;
1945
1946         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1947
1948         /* take ownership of the relevant compute queues */
1949         amdgpu_gfx_compute_queue_acquire(adev);
1950         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1951         if (mec_hpd_size) {
1952                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1953                                               AMDGPU_GEM_DOMAIN_VRAM,
1954                                               &adev->gfx.mec.hpd_eop_obj,
1955                                               &adev->gfx.mec.hpd_eop_gpu_addr,
1956                                               (void **)&hpd);
1957                 if (r) {
1958                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1959                         gfx_v9_0_mec_fini(adev);
1960                         return r;
1961                 }
1962
1963                 memset(hpd, 0, mec_hpd_size);
1964
1965                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1966                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1967         }
1968
1969         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1970
1971         fw_data = (const __le32 *)
1972                 (adev->gfx.mec_fw->data +
1973                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1974         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1975
1976         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1977                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1978                                       &adev->gfx.mec.mec_fw_obj,
1979                                       &adev->gfx.mec.mec_fw_gpu_addr,
1980                                       (void **)&fw);
1981         if (r) {
1982                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1983                 gfx_v9_0_mec_fini(adev);
1984                 return r;
1985         }
1986
1987         memcpy(fw, fw_data, fw_size);
1988
1989         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1990         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1991
1992         return 0;
1993 }
1994
1995 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1996 {
1997         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1998                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1999                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2000                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
2001                 (SQ_IND_INDEX__FORCE_READ_MASK));
2002         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2003 }
2004
2005 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
2006                            uint32_t wave, uint32_t thread,
2007                            uint32_t regno, uint32_t num, uint32_t *out)
2008 {
2009         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2010                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2011                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2012                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
2013                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2014                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2015                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2016         while (num--)
2017                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2018 }
2019
2020 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2021 {
2022         /* type 1 wave data */
2023         dst[(*no_fields)++] = 1;
2024         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2025         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2026         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2027         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2028         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2029         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2030         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2031         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2032         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2033         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2034         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2035         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2036         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2037         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2038 }
2039
2040 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2041                                      uint32_t wave, uint32_t start,
2042                                      uint32_t size, uint32_t *dst)
2043 {
2044         wave_read_regs(
2045                 adev, simd, wave, 0,
2046                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2047 }
2048
2049 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2050                                      uint32_t wave, uint32_t thread,
2051                                      uint32_t start, uint32_t size,
2052                                      uint32_t *dst)
2053 {
2054         wave_read_regs(
2055                 adev, simd, wave, thread,
2056                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2057 }
2058
2059 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2060                                   u32 me, u32 pipe, u32 q, u32 vm)
2061 {
2062         soc15_grbm_select(adev, me, pipe, q, vm);
2063 }
2064
2065 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2066         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2067         .select_se_sh = &gfx_v9_0_select_se_sh,
2068         .read_wave_data = &gfx_v9_0_read_wave_data,
2069         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2070         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2071         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2072         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2073         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2074         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2075 };
2076
2077 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2078         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2079         .select_se_sh = &gfx_v9_0_select_se_sh,
2080         .read_wave_data = &gfx_v9_0_read_wave_data,
2081         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2082         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2083         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2084         .ras_error_inject = &gfx_v9_4_ras_error_inject,
2085         .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2086         .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2087         .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
2088 };
2089
2090 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2091 {
2092         u32 gb_addr_config;
2093         int err;
2094
2095         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2096
2097         switch (adev->asic_type) {
2098         case CHIP_VEGA10:
2099                 adev->gfx.config.max_hw_contexts = 8;
2100                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2101                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2102                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2103                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2104                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2105                 break;
2106         case CHIP_VEGA12:
2107                 adev->gfx.config.max_hw_contexts = 8;
2108                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2109                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2110                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2111                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2112                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2113                 DRM_INFO("fix gfx.config for vega12\n");
2114                 break;
2115         case CHIP_VEGA20:
2116                 adev->gfx.config.max_hw_contexts = 8;
2117                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2118                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2119                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2120                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2121                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2122                 gb_addr_config &= ~0xf3e777ff;
2123                 gb_addr_config |= 0x22014042;
2124                 /* check vbios table if gpu info is not available */
2125                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2126                 if (err)
2127                         return err;
2128                 break;
2129         case CHIP_RAVEN:
2130                 adev->gfx.config.max_hw_contexts = 8;
2131                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2132                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2133                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2134                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2135                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2136                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2137                 else
2138                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2139                 break;
2140         case CHIP_ARCTURUS:
2141                 adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2142                 adev->gfx.config.max_hw_contexts = 8;
2143                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2144                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2145                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2146                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2147                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2148                 gb_addr_config &= ~0xf3e777ff;
2149                 gb_addr_config |= 0x22014042;
2150                 break;
2151         case CHIP_RENOIR:
2152                 adev->gfx.config.max_hw_contexts = 8;
2153                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2154                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2155                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2156                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2157                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2158                 gb_addr_config &= ~0xf3e777ff;
2159                 gb_addr_config |= 0x22010042;
2160                 break;
2161         default:
2162                 BUG();
2163                 break;
2164         }
2165
2166         adev->gfx.config.gb_addr_config = gb_addr_config;
2167
2168         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2169                         REG_GET_FIELD(
2170                                         adev->gfx.config.gb_addr_config,
2171                                         GB_ADDR_CONFIG,
2172                                         NUM_PIPES);
2173
2174         adev->gfx.config.max_tile_pipes =
2175                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2176
2177         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2178                         REG_GET_FIELD(
2179                                         adev->gfx.config.gb_addr_config,
2180                                         GB_ADDR_CONFIG,
2181                                         NUM_BANKS);
2182         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2183                         REG_GET_FIELD(
2184                                         adev->gfx.config.gb_addr_config,
2185                                         GB_ADDR_CONFIG,
2186                                         MAX_COMPRESSED_FRAGS);
2187         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2188                         REG_GET_FIELD(
2189                                         adev->gfx.config.gb_addr_config,
2190                                         GB_ADDR_CONFIG,
2191                                         NUM_RB_PER_SE);
2192         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2193                         REG_GET_FIELD(
2194                                         adev->gfx.config.gb_addr_config,
2195                                         GB_ADDR_CONFIG,
2196                                         NUM_SHADER_ENGINES);
2197         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2198                         REG_GET_FIELD(
2199                                         adev->gfx.config.gb_addr_config,
2200                                         GB_ADDR_CONFIG,
2201                                         PIPE_INTERLEAVE_SIZE));
2202
2203         return 0;
2204 }
2205
2206 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2207                                       int mec, int pipe, int queue)
2208 {
2209         unsigned irq_type;
2210         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2211         unsigned int hw_prio;
2212
2213         ring = &adev->gfx.compute_ring[ring_id];
2214
2215         /* mec0 is me1 */
2216         ring->me = mec + 1;
2217         ring->pipe = pipe;
2218         ring->queue = queue;
2219
2220         ring->ring_obj = NULL;
2221         ring->use_doorbell = true;
2222         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2223         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2224                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2225         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2226
2227         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2228                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2229                 + ring->pipe;
2230         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe,
2231                                                             ring->queue) ?
2232                         AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
2233         /* type-2 packets are deprecated on MEC, use type-3 instead */
2234         return amdgpu_ring_init(adev, ring, 1024,
2235                                 &adev->gfx.eop_irq, irq_type, hw_prio);
2236 }
2237
2238 static int gfx_v9_0_sw_init(void *handle)
2239 {
2240         int i, j, k, r, ring_id;
2241         struct amdgpu_ring *ring;
2242         struct amdgpu_kiq *kiq;
2243         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2244
2245         switch (adev->asic_type) {
2246         case CHIP_VEGA10:
2247         case CHIP_VEGA12:
2248         case CHIP_VEGA20:
2249         case CHIP_RAVEN:
2250         case CHIP_ARCTURUS:
2251         case CHIP_RENOIR:
2252                 adev->gfx.mec.num_mec = 2;
2253                 break;
2254         default:
2255                 adev->gfx.mec.num_mec = 1;
2256                 break;
2257         }
2258
2259         adev->gfx.mec.num_pipe_per_mec = 4;
2260         adev->gfx.mec.num_queue_per_pipe = 8;
2261
2262         /* EOP Event */
2263         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2264         if (r)
2265                 return r;
2266
2267         /* Privileged reg */
2268         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2269                               &adev->gfx.priv_reg_irq);
2270         if (r)
2271                 return r;
2272
2273         /* Privileged inst */
2274         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2275                               &adev->gfx.priv_inst_irq);
2276         if (r)
2277                 return r;
2278
2279         /* ECC error */
2280         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2281                               &adev->gfx.cp_ecc_error_irq);
2282         if (r)
2283                 return r;
2284
2285         /* FUE error */
2286         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2287                               &adev->gfx.cp_ecc_error_irq);
2288         if (r)
2289                 return r;
2290
2291         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2292
2293         gfx_v9_0_scratch_init(adev);
2294
2295         r = gfx_v9_0_init_microcode(adev);
2296         if (r) {
2297                 DRM_ERROR("Failed to load gfx firmware!\n");
2298                 return r;
2299         }
2300
2301         r = adev->gfx.rlc.funcs->init(adev);
2302         if (r) {
2303                 DRM_ERROR("Failed to init rlc BOs!\n");
2304                 return r;
2305         }
2306
2307         r = gfx_v9_0_mec_init(adev);
2308         if (r) {
2309                 DRM_ERROR("Failed to init MEC BOs!\n");
2310                 return r;
2311         }
2312
2313         /* set up the gfx ring */
2314         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2315                 ring = &adev->gfx.gfx_ring[i];
2316                 ring->ring_obj = NULL;
2317                 if (!i)
2318                         sprintf(ring->name, "gfx");
2319                 else
2320                         sprintf(ring->name, "gfx_%d", i);
2321                 ring->use_doorbell = true;
2322                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2323                 r = amdgpu_ring_init(adev, ring, 1024,
2324                                      &adev->gfx.eop_irq,
2325                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2326                                      AMDGPU_RING_PRIO_DEFAULT);
2327                 if (r)
2328                         return r;
2329         }
2330
2331         /* set up the compute queues - allocate horizontally across pipes */
2332         ring_id = 0;
2333         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2334                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2335                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2336                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2337                                         continue;
2338
2339                                 r = gfx_v9_0_compute_ring_init(adev,
2340                                                                ring_id,
2341                                                                i, k, j);
2342                                 if (r)
2343                                         return r;
2344
2345                                 ring_id++;
2346                         }
2347                 }
2348         }
2349
2350         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2351         if (r) {
2352                 DRM_ERROR("Failed to init KIQ BOs!\n");
2353                 return r;
2354         }
2355
2356         kiq = &adev->gfx.kiq;
2357         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2358         if (r)
2359                 return r;
2360
2361         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2362         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2363         if (r)
2364                 return r;
2365
2366         adev->gfx.ce_ram_size = 0x8000;
2367
2368         r = gfx_v9_0_gpu_early_init(adev);
2369         if (r)
2370                 return r;
2371
2372         return 0;
2373 }
2374
2375
2376 static int gfx_v9_0_sw_fini(void *handle)
2377 {
2378         int i;
2379         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2380
2381         amdgpu_gfx_ras_fini(adev);
2382
2383         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2384                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2385         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2386                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2387
2388         amdgpu_gfx_mqd_sw_fini(adev);
2389         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2390         amdgpu_gfx_kiq_fini(adev);
2391
2392         gfx_v9_0_mec_fini(adev);
2393         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2394         if (adev->flags & AMD_IS_APU) {
2395                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2396                                 &adev->gfx.rlc.cp_table_gpu_addr,
2397                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2398         }
2399         gfx_v9_0_free_microcode(adev);
2400
2401         return 0;
2402 }
2403
2404
2405 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2406 {
2407         /* TODO */
2408 }
2409
2410 void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
2411                            u32 instance)
2412 {
2413         u32 data;
2414
2415         if (instance == 0xffffffff)
2416                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2417         else
2418                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2419
2420         if (se_num == 0xffffffff)
2421                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2422         else
2423                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2424
2425         if (sh_num == 0xffffffff)
2426                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2427         else
2428                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2429
2430         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2431 }
2432
2433 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2434 {
2435         u32 data, mask;
2436
2437         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2438         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2439
2440         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2441         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2442
2443         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2444                                          adev->gfx.config.max_sh_per_se);
2445
2446         return (~data) & mask;
2447 }
2448
2449 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2450 {
2451         int i, j;
2452         u32 data;
2453         u32 active_rbs = 0;
2454         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2455                                         adev->gfx.config.max_sh_per_se;
2456
2457         mutex_lock(&adev->grbm_idx_mutex);
2458         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2459                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2460                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2461                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2462                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2463                                                rb_bitmap_width_per_sh);
2464                 }
2465         }
2466         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2467         mutex_unlock(&adev->grbm_idx_mutex);
2468
2469         adev->gfx.config.backend_enable_mask = active_rbs;
2470         adev->gfx.config.num_rbs = hweight32(active_rbs);
2471 }
2472
2473 #define DEFAULT_SH_MEM_BASES    (0x6000)
2474 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2475 {
2476         int i;
2477         uint32_t sh_mem_config;
2478         uint32_t sh_mem_bases;
2479
2480         /*
2481          * Configure apertures:
2482          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2483          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2484          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2485          */
2486         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2487
2488         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2489                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2490                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2491
2492         mutex_lock(&adev->srbm_mutex);
2493         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2494                 soc15_grbm_select(adev, 0, 0, 0, i);
2495                 /* CP and shaders */
2496                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2497                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2498         }
2499         soc15_grbm_select(adev, 0, 0, 0, 0);
2500         mutex_unlock(&adev->srbm_mutex);
2501
2502         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2503            acccess. These should be enabled by FW for target VMIDs. */
2504         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2505                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2506                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2507                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2508                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2509         }
2510 }
2511
2512 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2513 {
2514         int vmid;
2515
2516         /*
2517          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2518          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2519          * the driver can enable them for graphics. VMID0 should maintain
2520          * access so that HWS firmware can save/restore entries.
2521          */
2522         for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
2523                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2524                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2525                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2526                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2527         }
2528 }
2529
2530 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2531 {
2532         uint32_t tmp;
2533
2534         switch (adev->asic_type) {
2535         case CHIP_ARCTURUS:
2536                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2537                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2538                                         DISABLE_BARRIER_WAITCNT, 1);
2539                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2540                 break;
2541         default:
2542                 break;
2543         }
2544 }
2545
2546 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2547 {
2548         u32 tmp;
2549         int i;
2550
2551         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2552
2553         gfx_v9_0_tiling_mode_table_init(adev);
2554
2555         gfx_v9_0_setup_rb(adev);
2556         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2557         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2558
2559         /* XXX SH_MEM regs */
2560         /* where to put LDS, scratch, GPUVM in FSA64 space */
2561         mutex_lock(&adev->srbm_mutex);
2562         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2563                 soc15_grbm_select(adev, 0, 0, 0, i);
2564                 /* CP and shaders */
2565                 if (i == 0) {
2566                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2567                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2568                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2569                                             !!adev->gmc.noretry);
2570                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2571                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2572                 } else {
2573                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2574                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2575                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2576                                             !!adev->gmc.noretry);
2577                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2578                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2579                                 (adev->gmc.private_aperture_start >> 48));
2580                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2581                                 (adev->gmc.shared_aperture_start >> 48));
2582                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2583                 }
2584         }
2585         soc15_grbm_select(adev, 0, 0, 0, 0);
2586
2587         mutex_unlock(&adev->srbm_mutex);
2588
2589         gfx_v9_0_init_compute_vmid(adev);
2590         gfx_v9_0_init_gds_vmid(adev);
2591         gfx_v9_0_init_sq_config(adev);
2592 }
2593
2594 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2595 {
2596         u32 i, j, k;
2597         u32 mask;
2598
2599         mutex_lock(&adev->grbm_idx_mutex);
2600         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2601                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2602                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2603                         for (k = 0; k < adev->usec_timeout; k++) {
2604                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2605                                         break;
2606                                 udelay(1);
2607                         }
2608                         if (k == adev->usec_timeout) {
2609                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2610                                                       0xffffffff, 0xffffffff);
2611                                 mutex_unlock(&adev->grbm_idx_mutex);
2612                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2613                                          i, j);
2614                                 return;
2615                         }
2616                 }
2617         }
2618         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2619         mutex_unlock(&adev->grbm_idx_mutex);
2620
2621         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2622                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2623                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2624                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2625         for (k = 0; k < adev->usec_timeout; k++) {
2626                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2627                         break;
2628                 udelay(1);
2629         }
2630 }
2631
2632 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2633                                                bool enable)
2634 {
2635         u32 tmp;
2636
2637         /* don't toggle interrupts that are only applicable
2638          * to me0 pipe0 on AISCs that have me0 removed */
2639         if (!adev->gfx.num_gfx_rings)
2640                 return;
2641
2642         tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2643
2644         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2645         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2646         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2647         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2648
2649         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2650 }
2651
2652 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2653 {
2654         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2655         /* csib */
2656         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2657                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2658         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2659                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2660         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2661                         adev->gfx.rlc.clear_state_size);
2662 }
2663
2664 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2665                                 int indirect_offset,
2666                                 int list_size,
2667                                 int *unique_indirect_regs,
2668                                 int unique_indirect_reg_count,
2669                                 int *indirect_start_offsets,
2670                                 int *indirect_start_offsets_count,
2671                                 int max_start_offsets_count)
2672 {
2673         int idx;
2674
2675         for (; indirect_offset < list_size; indirect_offset++) {
2676                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2677                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2678                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2679
2680                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2681                         indirect_offset += 2;
2682
2683                         /* look for the matching indice */
2684                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2685                                 if (unique_indirect_regs[idx] ==
2686                                         register_list_format[indirect_offset] ||
2687                                         !unique_indirect_regs[idx])
2688                                         break;
2689                         }
2690
2691                         BUG_ON(idx >= unique_indirect_reg_count);
2692
2693                         if (!unique_indirect_regs[idx])
2694                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2695
2696                         indirect_offset++;
2697                 }
2698         }
2699 }
2700
2701 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2702 {
2703         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2704         int unique_indirect_reg_count = 0;
2705
2706         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2707         int indirect_start_offsets_count = 0;
2708
2709         int list_size = 0;
2710         int i = 0, j = 0;
2711         u32 tmp = 0;
2712
2713         u32 *register_list_format =
2714                 kmemdup(adev->gfx.rlc.register_list_format,
2715                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2716         if (!register_list_format)
2717                 return -ENOMEM;
2718
2719         /* setup unique_indirect_regs array and indirect_start_offsets array */
2720         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2721         gfx_v9_1_parse_ind_reg_list(register_list_format,
2722                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2723                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2724                                     unique_indirect_regs,
2725                                     unique_indirect_reg_count,
2726                                     indirect_start_offsets,
2727                                     &indirect_start_offsets_count,
2728                                     ARRAY_SIZE(indirect_start_offsets));
2729
2730         /* enable auto inc in case it is disabled */
2731         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2732         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2733         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2734
2735         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2736         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2737                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2738         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2739                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2740                         adev->gfx.rlc.register_restore[i]);
2741
2742         /* load indirect register */
2743         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2744                 adev->gfx.rlc.reg_list_format_start);
2745
2746         /* direct register portion */
2747         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2748                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2749                         register_list_format[i]);
2750
2751         /* indirect register portion */
2752         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2753                 if (register_list_format[i] == 0xFFFFFFFF) {
2754                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2755                         continue;
2756                 }
2757
2758                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2759                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2760
2761                 for (j = 0; j < unique_indirect_reg_count; j++) {
2762                         if (register_list_format[i] == unique_indirect_regs[j]) {
2763                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2764                                 break;
2765                         }
2766                 }
2767
2768                 BUG_ON(j >= unique_indirect_reg_count);
2769
2770                 i++;
2771         }
2772
2773         /* set save/restore list size */
2774         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2775         list_size = list_size >> 1;
2776         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2777                 adev->gfx.rlc.reg_restore_list_size);
2778         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2779
2780         /* write the starting offsets to RLC scratch ram */
2781         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2782                 adev->gfx.rlc.starting_offsets_start);
2783         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2784                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2785                        indirect_start_offsets[i]);
2786
2787         /* load unique indirect regs*/
2788         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2789                 if (unique_indirect_regs[i] != 0) {
2790                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2791                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2792                                unique_indirect_regs[i] & 0x3FFFF);
2793
2794                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2795                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2796                                unique_indirect_regs[i] >> 20);
2797                 }
2798         }
2799
2800         kfree(register_list_format);
2801         return 0;
2802 }
2803
2804 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2805 {
2806         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2807 }
2808
2809 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2810                                              bool enable)
2811 {
2812         uint32_t data = 0;
2813         uint32_t default_data = 0;
2814
2815         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2816         if (enable) {
2817                 /* enable GFXIP control over CGPG */
2818                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2819                 if(default_data != data)
2820                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2821
2822                 /* update status */
2823                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2824                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2825                 if(default_data != data)
2826                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2827         } else {
2828                 /* restore GFXIP control over GCPG */
2829                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2830                 if(default_data != data)
2831                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2832         }
2833 }
2834
2835 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2836 {
2837         uint32_t data = 0;
2838
2839         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2840                               AMD_PG_SUPPORT_GFX_SMG |
2841                               AMD_PG_SUPPORT_GFX_DMG)) {
2842                 /* init IDLE_POLL_COUNT = 60 */
2843                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2844                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2845                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2846                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2847
2848                 /* init RLC PG Delay */
2849                 data = 0;
2850                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2851                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2852                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2853                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2854                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2855
2856                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2857                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2858                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2859                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2860
2861                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2862                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2863                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2864                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2865
2866                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2867                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2868
2869                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2870                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2871                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2872                 if (adev->asic_type != CHIP_RENOIR)
2873                         pwr_10_0_gfxip_control_over_cgpg(adev, true);
2874         }
2875 }
2876
2877 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2878                                                 bool enable)
2879 {
2880         uint32_t data = 0;
2881         uint32_t default_data = 0;
2882
2883         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2884         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2885                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2886                              enable ? 1 : 0);
2887         if (default_data != data)
2888                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2889 }
2890
2891 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2892                                                 bool enable)
2893 {
2894         uint32_t data = 0;
2895         uint32_t default_data = 0;
2896
2897         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2898         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2899                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2900                              enable ? 1 : 0);
2901         if(default_data != data)
2902                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2903 }
2904
2905 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2906                                         bool enable)
2907 {
2908         uint32_t data = 0;
2909         uint32_t default_data = 0;
2910
2911         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2912         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2913                              CP_PG_DISABLE,
2914                              enable ? 0 : 1);
2915         if(default_data != data)
2916                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2917 }
2918
2919 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2920                                                 bool enable)
2921 {
2922         uint32_t data, default_data;
2923
2924         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2925         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2926                              GFX_POWER_GATING_ENABLE,
2927                              enable ? 1 : 0);
2928         if(default_data != data)
2929                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2930 }
2931
2932 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2933                                                 bool enable)
2934 {
2935         uint32_t data, default_data;
2936
2937         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2938         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2939                              GFX_PIPELINE_PG_ENABLE,
2940                              enable ? 1 : 0);
2941         if(default_data != data)
2942                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2943
2944         if (!enable)
2945                 /* read any GFX register to wake up GFX */
2946                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2947 }
2948
2949 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2950                                                        bool enable)
2951 {
2952         uint32_t data, default_data;
2953
2954         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2955         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2956                              STATIC_PER_CU_PG_ENABLE,
2957                              enable ? 1 : 0);
2958         if(default_data != data)
2959                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2960 }
2961
2962 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2963                                                 bool enable)
2964 {
2965         uint32_t data, default_data;
2966
2967         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2968         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2969                              DYN_PER_CU_PG_ENABLE,
2970                              enable ? 1 : 0);
2971         if(default_data != data)
2972                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2973 }
2974
2975 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2976 {
2977         gfx_v9_0_init_csb(adev);
2978
2979         /*
2980          * Rlc save restore list is workable since v2_1.
2981          * And it's needed by gfxoff feature.
2982          */
2983         if (adev->gfx.rlc.is_rlc_v2_1) {
2984                 if (adev->asic_type == CHIP_VEGA12 ||
2985                     (adev->apu_flags & AMD_APU_IS_RAVEN2))
2986                         gfx_v9_1_init_rlc_save_restore_list(adev);
2987                 gfx_v9_0_enable_save_restore_machine(adev);
2988         }
2989
2990         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2991                               AMD_PG_SUPPORT_GFX_SMG |
2992                               AMD_PG_SUPPORT_GFX_DMG |
2993                               AMD_PG_SUPPORT_CP |
2994                               AMD_PG_SUPPORT_GDS |
2995                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2996                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2997                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2998                 gfx_v9_0_init_gfx_power_gating(adev);
2999         }
3000 }
3001
3002 static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
3003 {
3004         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
3005         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3006         gfx_v9_0_wait_for_rlc_serdes(adev);
3007 }
3008
3009 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
3010 {
3011         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3012         udelay(50);
3013         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
3014         udelay(50);
3015 }
3016
3017 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3018 {
3019 #ifdef AMDGPU_RLC_DEBUG_RETRY
3020         u32 rlc_ucode_ver;
3021 #endif
3022
3023         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3024         udelay(50);
3025
3026         /* carrizo do enable cp interrupt after cp inited */
3027         if (!(adev->flags & AMD_IS_APU)) {
3028                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3029                 udelay(50);
3030         }
3031
3032 #ifdef AMDGPU_RLC_DEBUG_RETRY
3033         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3034         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3035         if(rlc_ucode_ver == 0x108) {
3036                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3037                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3038                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3039                  * default is 0x9C4 to create a 100us interval */
3040                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3041                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3042                  * to disable the page fault retry interrupts, default is
3043                  * 0x100 (256) */
3044                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3045         }
3046 #endif
3047 }
3048
3049 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3050 {
3051         const struct rlc_firmware_header_v2_0 *hdr;
3052         const __le32 *fw_data;
3053         unsigned i, fw_size;
3054
3055         if (!adev->gfx.rlc_fw)
3056                 return -EINVAL;
3057
3058         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3059         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3060
3061         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3062                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3063         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3064
3065         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3066                         RLCG_UCODE_LOADING_START_ADDRESS);
3067         for (i = 0; i < fw_size; i++)
3068                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3069         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3070
3071         return 0;
3072 }
3073
3074 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3075 {
3076         int r;
3077
3078         if (amdgpu_sriov_vf(adev)) {
3079                 gfx_v9_0_init_csb(adev);
3080                 return 0;
3081         }
3082
3083         adev->gfx.rlc.funcs->stop(adev);
3084
3085         /* disable CG */
3086         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3087
3088         gfx_v9_0_init_pg(adev);
3089
3090         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3091                 /* legacy rlc firmware loading */
3092                 r = gfx_v9_0_rlc_load_microcode(adev);
3093                 if (r)
3094                         return r;
3095         }
3096
3097         switch (adev->asic_type) {
3098         case CHIP_RAVEN:
3099                 if (amdgpu_lbpw == 0)
3100                         gfx_v9_0_enable_lbpw(adev, false);
3101                 else
3102                         gfx_v9_0_enable_lbpw(adev, true);
3103                 break;
3104         case CHIP_VEGA20:
3105                 if (amdgpu_lbpw > 0)
3106                         gfx_v9_0_enable_lbpw(adev, true);
3107                 else
3108                         gfx_v9_0_enable_lbpw(adev, false);
3109                 break;
3110         default:
3111                 break;
3112         }
3113
3114         adev->gfx.rlc.funcs->start(adev);
3115
3116         return 0;
3117 }
3118
3119 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3120 {
3121         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3122
3123         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3124         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3125         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3126         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3127         udelay(50);
3128 }
3129
3130 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3131 {
3132         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3133         const struct gfx_firmware_header_v1_0 *ce_hdr;
3134         const struct gfx_firmware_header_v1_0 *me_hdr;
3135         const __le32 *fw_data;
3136         unsigned i, fw_size;
3137
3138         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3139                 return -EINVAL;
3140
3141         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3142                 adev->gfx.pfp_fw->data;
3143         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3144                 adev->gfx.ce_fw->data;
3145         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3146                 adev->gfx.me_fw->data;
3147
3148         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3149         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3150         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3151
3152         gfx_v9_0_cp_gfx_enable(adev, false);
3153
3154         /* PFP */
3155         fw_data = (const __le32 *)
3156                 (adev->gfx.pfp_fw->data +
3157                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3158         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3159         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3160         for (i = 0; i < fw_size; i++)
3161                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3162         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3163
3164         /* CE */
3165         fw_data = (const __le32 *)
3166                 (adev->gfx.ce_fw->data +
3167                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3168         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3169         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3170         for (i = 0; i < fw_size; i++)
3171                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3172         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3173
3174         /* ME */
3175         fw_data = (const __le32 *)
3176                 (adev->gfx.me_fw->data +
3177                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3178         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3179         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3180         for (i = 0; i < fw_size; i++)
3181                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3182         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3183
3184         return 0;
3185 }
3186
3187 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3188 {
3189         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3190         const struct cs_section_def *sect = NULL;
3191         const struct cs_extent_def *ext = NULL;
3192         int r, i, tmp;
3193
3194         /* init the CP */
3195         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3196         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3197
3198         gfx_v9_0_cp_gfx_enable(adev, true);
3199
3200         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3201         if (r) {
3202                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3203                 return r;
3204         }
3205
3206         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3207         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3208
3209         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3210         amdgpu_ring_write(ring, 0x80000000);
3211         amdgpu_ring_write(ring, 0x80000000);
3212
3213         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3214                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3215                         if (sect->id == SECT_CONTEXT) {
3216                                 amdgpu_ring_write(ring,
3217                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3218                                                ext->reg_count));
3219                                 amdgpu_ring_write(ring,
3220                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3221                                 for (i = 0; i < ext->reg_count; i++)
3222                                         amdgpu_ring_write(ring, ext->extent[i]);
3223                         }
3224                 }
3225         }
3226
3227         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3228         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3229
3230         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3231         amdgpu_ring_write(ring, 0);
3232
3233         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3234         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3235         amdgpu_ring_write(ring, 0x8000);
3236         amdgpu_ring_write(ring, 0x8000);
3237
3238         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3239         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3240                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3241         amdgpu_ring_write(ring, tmp);
3242         amdgpu_ring_write(ring, 0);
3243
3244         amdgpu_ring_commit(ring);
3245
3246         return 0;
3247 }
3248
3249 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3250 {
3251         struct amdgpu_ring *ring;
3252         u32 tmp;
3253         u32 rb_bufsz;
3254         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3255
3256         /* Set the write pointer delay */
3257         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3258
3259         /* set the RB to use vmid 0 */
3260         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3261
3262         /* Set ring buffer size */
3263         ring = &adev->gfx.gfx_ring[0];
3264         rb_bufsz = order_base_2(ring->ring_size / 8);
3265         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3266         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3267 #ifdef __BIG_ENDIAN
3268         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3269 #endif
3270         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3271
3272         /* Initialize the ring buffer's write pointers */
3273         ring->wptr = 0;
3274         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3275         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3276
3277         /* set the wb address wether it's enabled or not */
3278         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3279         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3280         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3281
3282         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3283         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3284         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3285
3286         mdelay(1);
3287         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3288
3289         rb_addr = ring->gpu_addr >> 8;
3290         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3291         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3292
3293         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3294         if (ring->use_doorbell) {
3295                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3296                                     DOORBELL_OFFSET, ring->doorbell_index);
3297                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3298                                     DOORBELL_EN, 1);
3299         } else {
3300                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3301         }
3302         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3303
3304         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3305                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3306         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3307
3308         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3309                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3310
3311
3312         /* start the ring */
3313         gfx_v9_0_cp_gfx_start(adev);
3314         ring->sched.ready = true;
3315
3316         return 0;
3317 }
3318
3319 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3320 {
3321         if (enable) {
3322                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3323         } else {
3324                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3325                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3326                 adev->gfx.kiq.ring.sched.ready = false;
3327         }
3328         udelay(50);
3329 }
3330
3331 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3332 {
3333         const struct gfx_firmware_header_v1_0 *mec_hdr;
3334         const __le32 *fw_data;
3335         unsigned i;
3336         u32 tmp;
3337
3338         if (!adev->gfx.mec_fw)
3339                 return -EINVAL;
3340
3341         gfx_v9_0_cp_compute_enable(adev, false);
3342
3343         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3344         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3345
3346         fw_data = (const __le32 *)
3347                 (adev->gfx.mec_fw->data +
3348                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3349         tmp = 0;
3350         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3351         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3352         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3353
3354         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3355                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3356         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3357                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3358
3359         /* MEC1 */
3360         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3361                          mec_hdr->jt_offset);
3362         for (i = 0; i < mec_hdr->jt_size; i++)
3363                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3364                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3365
3366         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3367                         adev->gfx.mec_fw_version);
3368         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3369
3370         return 0;
3371 }
3372
3373 /* KIQ functions */
3374 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3375 {
3376         uint32_t tmp;
3377         struct amdgpu_device *adev = ring->adev;
3378
3379         /* tell RLC which is KIQ queue */
3380         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3381         tmp &= 0xffffff00;
3382         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3383         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3384         tmp |= 0x80;
3385         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3386 }
3387
3388 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3389 {
3390         struct amdgpu_device *adev = ring->adev;
3391
3392         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3393                 if (amdgpu_gfx_is_high_priority_compute_queue(adev,
3394                                                               ring->pipe,
3395                                                               ring->queue)) {
3396                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3397                         mqd->cp_hqd_queue_priority =
3398                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3399                 }
3400         }
3401 }
3402
3403 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3404 {
3405         struct amdgpu_device *adev = ring->adev;
3406         struct v9_mqd *mqd = ring->mqd_ptr;
3407         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3408         uint32_t tmp;
3409
3410         mqd->header = 0xC0310800;
3411         mqd->compute_pipelinestat_enable = 0x00000001;
3412         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3413         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3414         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3415         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3416         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3417         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3418         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3419         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3420         mqd->compute_misc_reserved = 0x00000003;
3421
3422         mqd->dynamic_cu_mask_addr_lo =
3423                 lower_32_bits(ring->mqd_gpu_addr
3424                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3425         mqd->dynamic_cu_mask_addr_hi =
3426                 upper_32_bits(ring->mqd_gpu_addr
3427                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3428
3429         eop_base_addr = ring->eop_gpu_addr >> 8;
3430         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3431         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3432
3433         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3434         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3435         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3436                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3437
3438         mqd->cp_hqd_eop_control = tmp;
3439
3440         /* enable doorbell? */
3441         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3442
3443         if (ring->use_doorbell) {
3444                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3445                                     DOORBELL_OFFSET, ring->doorbell_index);
3446                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3447                                     DOORBELL_EN, 1);
3448                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3449                                     DOORBELL_SOURCE, 0);
3450                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3451                                     DOORBELL_HIT, 0);
3452         } else {
3453                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3454                                          DOORBELL_EN, 0);
3455         }
3456
3457         mqd->cp_hqd_pq_doorbell_control = tmp;
3458
3459         /* disable the queue if it's active */
3460         ring->wptr = 0;
3461         mqd->cp_hqd_dequeue_request = 0;
3462         mqd->cp_hqd_pq_rptr = 0;
3463         mqd->cp_hqd_pq_wptr_lo = 0;
3464         mqd->cp_hqd_pq_wptr_hi = 0;
3465
3466         /* set the pointer to the MQD */
3467         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3468         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3469
3470         /* set MQD vmid to 0 */
3471         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3472         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3473         mqd->cp_mqd_control = tmp;
3474
3475         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3476         hqd_gpu_addr = ring->gpu_addr >> 8;
3477         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3478         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3479
3480         /* set up the HQD, this is similar to CP_RB0_CNTL */
3481         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3482         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3483                             (order_base_2(ring->ring_size / 4) - 1));
3484         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3485                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3486 #ifdef __BIG_ENDIAN
3487         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3488 #endif
3489         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3490         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3491         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3492         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3493         mqd->cp_hqd_pq_control = tmp;
3494
3495         /* set the wb address whether it's enabled or not */
3496         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3497         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3498         mqd->cp_hqd_pq_rptr_report_addr_hi =
3499                 upper_32_bits(wb_gpu_addr) & 0xffff;
3500
3501         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3502         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3503         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3504         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3505
3506         tmp = 0;
3507         /* enable the doorbell if requested */
3508         if (ring->use_doorbell) {
3509                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3510                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3511                                 DOORBELL_OFFSET, ring->doorbell_index);
3512
3513                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3514                                          DOORBELL_EN, 1);
3515                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3516                                          DOORBELL_SOURCE, 0);
3517                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3518                                          DOORBELL_HIT, 0);
3519         }
3520
3521         mqd->cp_hqd_pq_doorbell_control = tmp;
3522
3523         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3524         ring->wptr = 0;
3525         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3526
3527         /* set the vmid for the queue */
3528         mqd->cp_hqd_vmid = 0;
3529
3530         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3531         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3532         mqd->cp_hqd_persistent_state = tmp;
3533
3534         /* set MIN_IB_AVAIL_SIZE */
3535         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3536         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3537         mqd->cp_hqd_ib_control = tmp;
3538
3539         /* set static priority for a queue/ring */
3540         gfx_v9_0_mqd_set_priority(ring, mqd);
3541         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3542
3543         /* map_queues packet doesn't need activate the queue,
3544          * so only kiq need set this field.
3545          */
3546         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3547                 mqd->cp_hqd_active = 1;
3548
3549         return 0;
3550 }
3551
3552 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3553 {
3554         struct amdgpu_device *adev = ring->adev;
3555         struct v9_mqd *mqd = ring->mqd_ptr;
3556         int j;
3557
3558         /* disable wptr polling */
3559         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3560
3561         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3562                mqd->cp_hqd_eop_base_addr_lo);
3563         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3564                mqd->cp_hqd_eop_base_addr_hi);
3565
3566         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3567         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3568                mqd->cp_hqd_eop_control);
3569
3570         /* enable doorbell? */
3571         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3572                mqd->cp_hqd_pq_doorbell_control);
3573
3574         /* disable the queue if it's active */
3575         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3576                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3577                 for (j = 0; j < adev->usec_timeout; j++) {
3578                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3579                                 break;
3580                         udelay(1);
3581                 }
3582                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3583                        mqd->cp_hqd_dequeue_request);
3584                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3585                        mqd->cp_hqd_pq_rptr);
3586                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3587                        mqd->cp_hqd_pq_wptr_lo);
3588                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3589                        mqd->cp_hqd_pq_wptr_hi);
3590         }
3591
3592         /* set the pointer to the MQD */
3593         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3594                mqd->cp_mqd_base_addr_lo);
3595         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3596                mqd->cp_mqd_base_addr_hi);
3597
3598         /* set MQD vmid to 0 */
3599         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3600                mqd->cp_mqd_control);
3601
3602         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3603         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3604                mqd->cp_hqd_pq_base_lo);
3605         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3606                mqd->cp_hqd_pq_base_hi);
3607
3608         /* set up the HQD, this is similar to CP_RB0_CNTL */
3609         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3610                mqd->cp_hqd_pq_control);
3611
3612         /* set the wb address whether it's enabled or not */
3613         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3614                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3615         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3616                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3617
3618         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3619         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3620                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3621         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3622                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3623
3624         /* enable the doorbell if requested */
3625         if (ring->use_doorbell) {
3626                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3627                                         (adev->doorbell_index.kiq * 2) << 2);
3628                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3629                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3630         }
3631
3632         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3633                mqd->cp_hqd_pq_doorbell_control);
3634
3635         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3636         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3637                mqd->cp_hqd_pq_wptr_lo);
3638         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3639                mqd->cp_hqd_pq_wptr_hi);
3640
3641         /* set the vmid for the queue */
3642         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3643
3644         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3645                mqd->cp_hqd_persistent_state);
3646
3647         /* activate the queue */
3648         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3649                mqd->cp_hqd_active);
3650
3651         if (ring->use_doorbell)
3652                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3653
3654         return 0;
3655 }
3656
3657 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3658 {
3659         struct amdgpu_device *adev = ring->adev;
3660         int j;
3661
3662         /* disable the queue if it's active */
3663         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3664
3665                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3666
3667                 for (j = 0; j < adev->usec_timeout; j++) {
3668                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3669                                 break;
3670                         udelay(1);
3671                 }
3672
3673                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3674                         DRM_DEBUG("KIQ dequeue request failed.\n");
3675
3676                         /* Manual disable if dequeue request times out */
3677                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3678                 }
3679
3680                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3681                       0);
3682         }
3683
3684         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3685         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3686         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3687         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3688         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3689         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3690         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3691         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3692
3693         return 0;
3694 }
3695
3696 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3697 {
3698         struct amdgpu_device *adev = ring->adev;
3699         struct v9_mqd *mqd = ring->mqd_ptr;
3700         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3701
3702         gfx_v9_0_kiq_setting(ring);
3703
3704         if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3705                 /* reset MQD to a clean status */
3706                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3707                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3708
3709                 /* reset ring buffer */
3710                 ring->wptr = 0;
3711                 amdgpu_ring_clear_ring(ring);
3712
3713                 mutex_lock(&adev->srbm_mutex);
3714                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3715                 gfx_v9_0_kiq_init_register(ring);
3716                 soc15_grbm_select(adev, 0, 0, 0, 0);
3717                 mutex_unlock(&adev->srbm_mutex);
3718         } else {
3719                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3720                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3721                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3722                 mutex_lock(&adev->srbm_mutex);
3723                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3724                 gfx_v9_0_mqd_init(ring);
3725                 gfx_v9_0_kiq_init_register(ring);
3726                 soc15_grbm_select(adev, 0, 0, 0, 0);
3727                 mutex_unlock(&adev->srbm_mutex);
3728
3729                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3730                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3731         }
3732
3733         return 0;
3734 }
3735
3736 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3737 {
3738         struct amdgpu_device *adev = ring->adev;
3739         struct v9_mqd *mqd = ring->mqd_ptr;
3740         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3741
3742         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3743                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3744                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3745                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3746                 mutex_lock(&adev->srbm_mutex);
3747                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3748                 gfx_v9_0_mqd_init(ring);
3749                 soc15_grbm_select(adev, 0, 0, 0, 0);
3750                 mutex_unlock(&adev->srbm_mutex);
3751
3752                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3753                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3754         } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3755                 /* reset MQD to a clean status */
3756                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3757                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3758
3759                 /* reset ring buffer */
3760                 ring->wptr = 0;
3761                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3762                 amdgpu_ring_clear_ring(ring);
3763         } else {
3764                 amdgpu_ring_clear_ring(ring);
3765         }
3766
3767         return 0;
3768 }
3769
3770 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3771 {
3772         struct amdgpu_ring *ring;
3773         int r;
3774
3775         ring = &adev->gfx.kiq.ring;
3776
3777         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3778         if (unlikely(r != 0))
3779                 return r;
3780
3781         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3782         if (unlikely(r != 0))
3783                 return r;
3784
3785         gfx_v9_0_kiq_init_queue(ring);
3786         amdgpu_bo_kunmap(ring->mqd_obj);
3787         ring->mqd_ptr = NULL;
3788         amdgpu_bo_unreserve(ring->mqd_obj);
3789         ring->sched.ready = true;
3790         return 0;
3791 }
3792
3793 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3794 {
3795         struct amdgpu_ring *ring = NULL;
3796         int r = 0, i;
3797
3798         gfx_v9_0_cp_compute_enable(adev, true);
3799
3800         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3801                 ring = &adev->gfx.compute_ring[i];
3802
3803                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3804                 if (unlikely(r != 0))
3805                         goto done;
3806                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3807                 if (!r) {
3808                         r = gfx_v9_0_kcq_init_queue(ring);
3809                         amdgpu_bo_kunmap(ring->mqd_obj);
3810                         ring->mqd_ptr = NULL;
3811                 }
3812                 amdgpu_bo_unreserve(ring->mqd_obj);
3813                 if (r)
3814                         goto done;
3815         }
3816
3817         r = amdgpu_gfx_enable_kcq(adev);
3818 done:
3819         return r;
3820 }
3821
3822 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3823 {
3824         int r, i;
3825         struct amdgpu_ring *ring;
3826
3827         if (!(adev->flags & AMD_IS_APU))
3828                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3829
3830         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3831                 if (adev->gfx.num_gfx_rings) {
3832                         /* legacy firmware loading */
3833                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3834                         if (r)
3835                                 return r;
3836                 }
3837
3838                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3839                 if (r)
3840                         return r;
3841         }
3842
3843         r = gfx_v9_0_kiq_resume(adev);
3844         if (r)
3845                 return r;
3846
3847         if (adev->gfx.num_gfx_rings) {
3848                 r = gfx_v9_0_cp_gfx_resume(adev);
3849                 if (r)
3850                         return r;
3851         }
3852
3853         r = gfx_v9_0_kcq_resume(adev);
3854         if (r)
3855                 return r;
3856
3857         if (adev->gfx.num_gfx_rings) {
3858                 ring = &adev->gfx.gfx_ring[0];
3859                 r = amdgpu_ring_test_helper(ring);
3860                 if (r)
3861                         return r;
3862         }
3863
3864         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3865                 ring = &adev->gfx.compute_ring[i];
3866                 amdgpu_ring_test_helper(ring);
3867         }
3868
3869         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3870
3871         return 0;
3872 }
3873
3874 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3875 {
3876         u32 tmp;
3877
3878         if (adev->asic_type != CHIP_ARCTURUS)
3879                 return;
3880
3881         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3882         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3883                                 adev->df.hash_status.hash_64k);
3884         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3885                                 adev->df.hash_status.hash_2m);
3886         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3887                                 adev->df.hash_status.hash_1g);
3888         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3889 }
3890
3891 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3892 {
3893         if (adev->gfx.num_gfx_rings)
3894                 gfx_v9_0_cp_gfx_enable(adev, enable);
3895         gfx_v9_0_cp_compute_enable(adev, enable);
3896 }
3897
3898 static int gfx_v9_0_hw_init(void *handle)
3899 {
3900         int r;
3901         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3902
3903         if (!amdgpu_sriov_vf(adev))
3904                 gfx_v9_0_init_golden_registers(adev);
3905
3906         gfx_v9_0_constants_init(adev);
3907
3908         gfx_v9_0_init_tcp_config(adev);
3909
3910         r = adev->gfx.rlc.funcs->resume(adev);
3911         if (r)
3912                 return r;
3913
3914         r = gfx_v9_0_cp_resume(adev);
3915         if (r)
3916                 return r;
3917
3918         return r;
3919 }
3920
3921 static int gfx_v9_0_hw_fini(void *handle)
3922 {
3923         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3924
3925         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3926         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3927         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3928
3929         /* DF freeze and kcq disable will fail */
3930         if (!amdgpu_ras_intr_triggered())
3931                 /* disable KCQ to avoid CPC touch memory not valid anymore */
3932                 amdgpu_gfx_disable_kcq(adev);
3933
3934         if (amdgpu_sriov_vf(adev)) {
3935                 gfx_v9_0_cp_gfx_enable(adev, false);
3936                 /* must disable polling for SRIOV when hw finished, otherwise
3937                  * CPC engine may still keep fetching WB address which is already
3938                  * invalid after sw finished and trigger DMAR reading error in
3939                  * hypervisor side.
3940                  */
3941                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3942                 return 0;
3943         }
3944
3945         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3946          * otherwise KIQ is hanging when binding back
3947          */
3948         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3949                 mutex_lock(&adev->srbm_mutex);
3950                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3951                                 adev->gfx.kiq.ring.pipe,
3952                                 adev->gfx.kiq.ring.queue, 0);
3953                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3954                 soc15_grbm_select(adev, 0, 0, 0, 0);
3955                 mutex_unlock(&adev->srbm_mutex);
3956         }
3957
3958         gfx_v9_0_cp_enable(adev, false);
3959         adev->gfx.rlc.funcs->stop(adev);
3960
3961         return 0;
3962 }
3963
3964 static int gfx_v9_0_suspend(void *handle)
3965 {
3966         return gfx_v9_0_hw_fini(handle);
3967 }
3968
3969 static int gfx_v9_0_resume(void *handle)
3970 {
3971         return gfx_v9_0_hw_init(handle);
3972 }
3973
3974 static bool gfx_v9_0_is_idle(void *handle)
3975 {
3976         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3977
3978         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3979                                 GRBM_STATUS, GUI_ACTIVE))
3980                 return false;
3981         else
3982                 return true;
3983 }
3984
3985 static int gfx_v9_0_wait_for_idle(void *handle)
3986 {
3987         unsigned i;
3988         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3989
3990         for (i = 0; i < adev->usec_timeout; i++) {
3991                 if (gfx_v9_0_is_idle(handle))
3992                         return 0;
3993                 udelay(1);
3994         }
3995         return -ETIMEDOUT;
3996 }
3997
3998 static int gfx_v9_0_soft_reset(void *handle)
3999 {
4000         u32 grbm_soft_reset = 0;
4001         u32 tmp;
4002         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4003
4004         /* GRBM_STATUS */
4005         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
4006         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4007                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4008                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4009                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4010                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4011                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
4012                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4013                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4014                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4015                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4016         }
4017
4018         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4019                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4020                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4021         }
4022
4023         /* GRBM_STATUS2 */
4024         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4025         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4026                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4027                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4028
4029
4030         if (grbm_soft_reset) {
4031                 /* stop the rlc */
4032                 adev->gfx.rlc.funcs->stop(adev);
4033
4034                 if (adev->gfx.num_gfx_rings)
4035                         /* Disable GFX parsing/prefetching */
4036                         gfx_v9_0_cp_gfx_enable(adev, false);
4037
4038                 /* Disable MEC parsing/prefetching */
4039                 gfx_v9_0_cp_compute_enable(adev, false);
4040
4041                 if (grbm_soft_reset) {
4042                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4043                         tmp |= grbm_soft_reset;
4044                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4045                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4046                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4047
4048                         udelay(50);
4049
4050                         tmp &= ~grbm_soft_reset;
4051                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4052                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4053                 }
4054
4055                 /* Wait a little for things to settle down */
4056                 udelay(50);
4057         }
4058         return 0;
4059 }
4060
4061 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4062 {
4063         signed long r, cnt = 0;
4064         unsigned long flags;
4065         uint32_t seq, reg_val_offs = 0;
4066         uint64_t value = 0;
4067         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4068         struct amdgpu_ring *ring = &kiq->ring;
4069
4070         BUG_ON(!ring->funcs->emit_rreg);
4071
4072         spin_lock_irqsave(&kiq->ring_lock, flags);
4073         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4074                 pr_err("critical bug! too many kiq readers\n");
4075                 goto failed_unlock;
4076         }
4077         amdgpu_ring_alloc(ring, 32);
4078         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4079         amdgpu_ring_write(ring, 9 |     /* src: register*/
4080                                 (5 << 8) |      /* dst: memory */
4081                                 (1 << 16) |     /* count sel */
4082                                 (1 << 20));     /* write confirm */
4083         amdgpu_ring_write(ring, 0);
4084         amdgpu_ring_write(ring, 0);
4085         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4086                                 reg_val_offs * 4));
4087         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4088                                 reg_val_offs * 4));
4089         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4090         if (r)
4091                 goto failed_undo;
4092
4093         amdgpu_ring_commit(ring);
4094         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4095
4096         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4097
4098         /* don't wait anymore for gpu reset case because this way may
4099          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4100          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4101          * never return if we keep waiting in virt_kiq_rreg, which cause
4102          * gpu_recover() hang there.
4103          *
4104          * also don't wait anymore for IRQ context
4105          * */
4106         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
4107                 goto failed_kiq_read;
4108
4109         might_sleep();
4110         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4111                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4112                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4113         }
4114
4115         if (cnt > MAX_KIQ_REG_TRY)
4116                 goto failed_kiq_read;
4117
4118         mb();
4119         value = (uint64_t)adev->wb.wb[reg_val_offs] |
4120                 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4121         amdgpu_device_wb_free(adev, reg_val_offs);
4122         return value;
4123
4124 failed_undo:
4125         amdgpu_ring_undo(ring);
4126 failed_unlock:
4127         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4128 failed_kiq_read:
4129         if (reg_val_offs)
4130                 amdgpu_device_wb_free(adev, reg_val_offs);
4131         pr_err("failed to read gpu clock\n");
4132         return ~0;
4133 }
4134
4135 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4136 {
4137         uint64_t clock;
4138
4139         amdgpu_gfx_off_ctrl(adev, false);
4140         mutex_lock(&adev->gfx.gpu_clock_mutex);
4141         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4142                 clock = gfx_v9_0_kiq_read_clock(adev);
4143         } else {
4144                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4145                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4146                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4147         }
4148         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4149         amdgpu_gfx_off_ctrl(adev, true);
4150         return clock;
4151 }
4152
4153 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4154                                           uint32_t vmid,
4155                                           uint32_t gds_base, uint32_t gds_size,
4156                                           uint32_t gws_base, uint32_t gws_size,
4157                                           uint32_t oa_base, uint32_t oa_size)
4158 {
4159         struct amdgpu_device *adev = ring->adev;
4160
4161         /* GDS Base */
4162         gfx_v9_0_write_data_to_reg(ring, 0, false,
4163                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4164                                    gds_base);
4165
4166         /* GDS Size */
4167         gfx_v9_0_write_data_to_reg(ring, 0, false,
4168                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4169                                    gds_size);
4170
4171         /* GWS */
4172         gfx_v9_0_write_data_to_reg(ring, 0, false,
4173                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4174                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4175
4176         /* OA */
4177         gfx_v9_0_write_data_to_reg(ring, 0, false,
4178                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4179                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4180 }
4181
4182 static const u32 vgpr_init_compute_shader[] =
4183 {
4184         0xb07c0000, 0xbe8000ff,
4185         0x000000f8, 0xbf110800,
4186         0x7e000280, 0x7e020280,
4187         0x7e040280, 0x7e060280,
4188         0x7e080280, 0x7e0a0280,
4189         0x7e0c0280, 0x7e0e0280,
4190         0x80808800, 0xbe803200,
4191         0xbf84fff5, 0xbf9c0000,
4192         0xd28c0001, 0x0001007f,
4193         0xd28d0001, 0x0002027e,
4194         0x10020288, 0xb8810904,
4195         0xb7814000, 0xd1196a01,
4196         0x00000301, 0xbe800087,
4197         0xbefc00c1, 0xd89c4000,
4198         0x00020201, 0xd89cc080,
4199         0x00040401, 0x320202ff,
4200         0x00000800, 0x80808100,
4201         0xbf84fff8, 0x7e020280,
4202         0xbf810000, 0x00000000,
4203 };
4204
4205 static const u32 sgpr_init_compute_shader[] =
4206 {
4207         0xb07c0000, 0xbe8000ff,
4208         0x0000005f, 0xbee50080,
4209         0xbe812c65, 0xbe822c65,
4210         0xbe832c65, 0xbe842c65,
4211         0xbe852c65, 0xb77c0005,
4212         0x80808500, 0xbf84fff8,
4213         0xbe800080, 0xbf810000,
4214 };
4215
4216 static const u32 vgpr_init_compute_shader_arcturus[] = {
4217         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4218         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4219         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4220         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4221         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4222         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4223         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4224         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4225         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4226         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4227         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4228         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4229         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4230         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4231         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4232         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4233         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4234         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4235         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4236         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4237         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4238         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4239         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4240         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4241         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4242         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4243         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4244         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4245         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4246         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4247         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4248         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4249         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4250         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4251         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4252         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4253         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4254         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4255         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4256         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4257         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4258         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4259         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4260         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4261         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4262         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4263         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4264         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4265         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4266         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4267         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4268         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4269         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4270         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4271         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4272         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4273         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4274         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4275         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4276         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4277         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4278         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4279         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4280         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4281         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4282         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4283         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4284         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4285         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4286         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4287         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4288         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4289         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4290         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4291         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4292         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4293         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4294         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4295         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4296         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4297         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4298         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4299         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4300         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4301         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4302         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4303         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4304         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4305         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4306         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4307         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4308         0xbf84fff8, 0xbf810000,
4309 };
4310
4311 /* When below register arrays changed, please update gpr_reg_size,
4312   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4313   to cover all gfx9 ASICs */
4314 static const struct soc15_reg_entry vgpr_init_regs[] = {
4315    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4316    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4317    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4318    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4319    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4320    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4321    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4322    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4323    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4324    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4325    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4326    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4327    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4328    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4329 };
4330
4331 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4332    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4333    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4334    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4335    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4336    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4337    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4338    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4339    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4340    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4341    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4342    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4343    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4344    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4345    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4346 };
4347
4348 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4349    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4350    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4351    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4352    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4353    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4354    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4355    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4356    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4357    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4358    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4359    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4360    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4361    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4362    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4363 };
4364
4365 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4366    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4367    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4368    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4369    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4370    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4371    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4372    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4373    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4374    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4375    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4376    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4377    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4378    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4379    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4380 };
4381
4382 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4383    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4384    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4385    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4386    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4387    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4388    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4389    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4390    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4391    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4392    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4393    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4394    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4395    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4396    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4397    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4398    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4399    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4400    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4401    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4402    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4403    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4404    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4405    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4406    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4407    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4408    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4409    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4410    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4411    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4412    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4413    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4414    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4415    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4416 };
4417
4418 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4419 {
4420         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4421         int i, r;
4422
4423         /* only support when RAS is enabled */
4424         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4425                 return 0;
4426
4427         r = amdgpu_ring_alloc(ring, 7);
4428         if (r) {
4429                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4430                         ring->name, r);
4431                 return r;
4432         }
4433
4434         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4435         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4436
4437         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4438         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4439                                 PACKET3_DMA_DATA_DST_SEL(1) |
4440                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4441                                 PACKET3_DMA_DATA_ENGINE(0)));
4442         amdgpu_ring_write(ring, 0);
4443         amdgpu_ring_write(ring, 0);
4444         amdgpu_ring_write(ring, 0);
4445         amdgpu_ring_write(ring, 0);
4446         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4447                                 adev->gds.gds_size);
4448
4449         amdgpu_ring_commit(ring);
4450
4451         for (i = 0; i < adev->usec_timeout; i++) {
4452                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4453                         break;
4454                 udelay(1);
4455         }
4456
4457         if (i >= adev->usec_timeout)
4458                 r = -ETIMEDOUT;
4459
4460         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4461
4462         return r;
4463 }
4464
4465 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4466 {
4467         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4468         struct amdgpu_ib ib;
4469         struct dma_fence *f = NULL;
4470         int r, i;
4471         unsigned total_size, vgpr_offset, sgpr_offset;
4472         u64 gpu_addr;
4473
4474         int compute_dim_x = adev->gfx.config.max_shader_engines *
4475                                                 adev->gfx.config.max_cu_per_sh *
4476                                                 adev->gfx.config.max_sh_per_se;
4477         int sgpr_work_group_size = 5;
4478         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4479         int vgpr_init_shader_size;
4480         const u32 *vgpr_init_shader_ptr;
4481         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4482
4483         /* only support when RAS is enabled */
4484         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4485                 return 0;
4486
4487         /* bail if the compute ring is not ready */
4488         if (!ring->sched.ready)
4489                 return 0;
4490
4491         if (adev->asic_type == CHIP_ARCTURUS) {
4492                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4493                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4494                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4495         } else {
4496                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4497                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4498                 vgpr_init_regs_ptr = vgpr_init_regs;
4499         }
4500
4501         total_size =
4502                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4503         total_size +=
4504                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4505         total_size +=
4506                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4507         total_size = ALIGN(total_size, 256);
4508         vgpr_offset = total_size;
4509         total_size += ALIGN(vgpr_init_shader_size, 256);
4510         sgpr_offset = total_size;
4511         total_size += sizeof(sgpr_init_compute_shader);
4512
4513         /* allocate an indirect buffer to put the commands in */
4514         memset(&ib, 0, sizeof(ib));
4515         r = amdgpu_ib_get(adev, NULL, total_size,
4516                                         AMDGPU_IB_POOL_DIRECT, &ib);
4517         if (r) {
4518                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4519                 return r;
4520         }
4521
4522         /* load the compute shaders */
4523         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4524                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4525
4526         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4527                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4528
4529         /* init the ib length to 0 */
4530         ib.length_dw = 0;
4531
4532         /* VGPR */
4533         /* write the register state for the compute dispatch */
4534         for (i = 0; i < gpr_reg_size; i++) {
4535                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4536                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4537                                                                 - PACKET3_SET_SH_REG_START;
4538                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4539         }
4540         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4541         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4542         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4543         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4544                                                         - PACKET3_SET_SH_REG_START;
4545         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4546         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4547
4548         /* write dispatch packet */
4549         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4550         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4551         ib.ptr[ib.length_dw++] = 1; /* y */
4552         ib.ptr[ib.length_dw++] = 1; /* z */
4553         ib.ptr[ib.length_dw++] =
4554                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4555
4556         /* write CS partial flush packet */
4557         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4558         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4559
4560         /* SGPR1 */
4561         /* write the register state for the compute dispatch */
4562         for (i = 0; i < gpr_reg_size; i++) {
4563                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4564                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4565                                                                 - PACKET3_SET_SH_REG_START;
4566                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4567         }
4568         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4569         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4570         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4571         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4572                                                         - PACKET3_SET_SH_REG_START;
4573         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4574         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4575
4576         /* write dispatch packet */
4577         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4578         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4579         ib.ptr[ib.length_dw++] = 1; /* y */
4580         ib.ptr[ib.length_dw++] = 1; /* z */
4581         ib.ptr[ib.length_dw++] =
4582                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4583
4584         /* write CS partial flush packet */
4585         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4586         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4587
4588         /* SGPR2 */
4589         /* write the register state for the compute dispatch */
4590         for (i = 0; i < gpr_reg_size; i++) {
4591                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4592                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4593                                                                 - PACKET3_SET_SH_REG_START;
4594                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4595         }
4596         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4597         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4598         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4599         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4600                                                         - PACKET3_SET_SH_REG_START;
4601         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4602         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4603
4604         /* write dispatch packet */
4605         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4606         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4607         ib.ptr[ib.length_dw++] = 1; /* y */
4608         ib.ptr[ib.length_dw++] = 1; /* z */
4609         ib.ptr[ib.length_dw++] =
4610                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4611
4612         /* write CS partial flush packet */
4613         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4614         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4615
4616         /* shedule the ib on the ring */
4617         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4618         if (r) {
4619                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4620                 goto fail;
4621         }
4622
4623         /* wait for the GPU to finish processing the IB */
4624         r = dma_fence_wait(f, false);
4625         if (r) {
4626                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4627                 goto fail;
4628         }
4629
4630 fail:
4631         amdgpu_ib_free(adev, &ib, NULL);
4632         dma_fence_put(f);
4633
4634         return r;
4635 }
4636
4637 static int gfx_v9_0_early_init(void *handle)
4638 {
4639         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4640
4641         if (adev->asic_type == CHIP_ARCTURUS)
4642                 adev->gfx.num_gfx_rings = 0;
4643         else
4644                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4645         adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4646                                           AMDGPU_MAX_COMPUTE_RINGS);
4647         gfx_v9_0_set_kiq_pm4_funcs(adev);
4648         gfx_v9_0_set_ring_funcs(adev);
4649         gfx_v9_0_set_irq_funcs(adev);
4650         gfx_v9_0_set_gds_init(adev);
4651         gfx_v9_0_set_rlc_funcs(adev);
4652
4653         return 0;
4654 }
4655
4656 static int gfx_v9_0_ecc_late_init(void *handle)
4657 {
4658         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4659         int r;
4660
4661         /*
4662          * Temp workaround to fix the issue that CP firmware fails to
4663          * update read pointer when CPDMA is writing clearing operation
4664          * to GDS in suspend/resume sequence on several cards. So just
4665          * limit this operation in cold boot sequence.
4666          */
4667         if (!adev->in_suspend) {
4668                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4669                 if (r)
4670                         return r;
4671         }
4672
4673         /* requires IBs so do in late init after IB pool is initialized */
4674         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4675         if (r)
4676                 return r;
4677
4678         if (adev->gfx.funcs &&
4679             adev->gfx.funcs->reset_ras_error_count)
4680                 adev->gfx.funcs->reset_ras_error_count(adev);
4681
4682         r = amdgpu_gfx_ras_late_init(adev);
4683         if (r)
4684                 return r;
4685
4686         return 0;
4687 }
4688
4689 static int gfx_v9_0_late_init(void *handle)
4690 {
4691         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4692         int r;
4693
4694         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4695         if (r)
4696                 return r;
4697
4698         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4699         if (r)
4700                 return r;
4701
4702         r = gfx_v9_0_ecc_late_init(handle);
4703         if (r)
4704                 return r;
4705
4706         return 0;
4707 }
4708
4709 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4710 {
4711         uint32_t rlc_setting;
4712
4713         /* if RLC is not enabled, do nothing */
4714         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4715         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4716                 return false;
4717
4718         return true;
4719 }
4720
4721 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4722 {
4723         uint32_t data;
4724         unsigned i;
4725
4726         data = RLC_SAFE_MODE__CMD_MASK;
4727         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4728         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4729
4730         /* wait for RLC_SAFE_MODE */
4731         for (i = 0; i < adev->usec_timeout; i++) {
4732                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4733                         break;
4734                 udelay(1);
4735         }
4736 }
4737
4738 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4739 {
4740         uint32_t data;
4741
4742         data = RLC_SAFE_MODE__CMD_MASK;
4743         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4744 }
4745
4746 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4747                                                 bool enable)
4748 {
4749         amdgpu_gfx_rlc_enter_safe_mode(adev);
4750
4751         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4752                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4753                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4754                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4755         } else {
4756                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4757                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4758                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4759         }
4760
4761         amdgpu_gfx_rlc_exit_safe_mode(adev);
4762 }
4763
4764 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4765                                                 bool enable)
4766 {
4767         /* TODO: double check if we need to perform under safe mode */
4768         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4769
4770         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4771                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4772         else
4773                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4774
4775         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4776                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4777         else
4778                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4779
4780         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4781 }
4782
4783 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4784                                                       bool enable)
4785 {
4786         uint32_t data, def;
4787
4788         amdgpu_gfx_rlc_enter_safe_mode(adev);
4789
4790         /* It is disabled by HW by default */
4791         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4792                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4793                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4794
4795                 if (adev->asic_type != CHIP_VEGA12)
4796                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4797
4798                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4799                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4800                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4801
4802                 /* only for Vega10 & Raven1 */
4803                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4804
4805                 if (def != data)
4806                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4807
4808                 /* MGLS is a global flag to control all MGLS in GFX */
4809                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4810                         /* 2 - RLC memory Light sleep */
4811                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4812                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4813                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4814                                 if (def != data)
4815                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4816                         }
4817                         /* 3 - CP memory Light sleep */
4818                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4819                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4820                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4821                                 if (def != data)
4822                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4823                         }
4824                 }
4825         } else {
4826                 /* 1 - MGCG_OVERRIDE */
4827                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4828
4829                 if (adev->asic_type != CHIP_VEGA12)
4830                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4831
4832                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4833                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4834                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4835                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4836
4837                 if (def != data)
4838                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4839
4840                 /* 2 - disable MGLS in RLC */
4841                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4842                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4843                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4844                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4845                 }
4846
4847                 /* 3 - disable MGLS in CP */
4848                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4849                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4850                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4851                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4852                 }
4853         }
4854
4855         amdgpu_gfx_rlc_exit_safe_mode(adev);
4856 }
4857
4858 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4859                                            bool enable)
4860 {
4861         uint32_t data, def;
4862
4863         if (adev->asic_type == CHIP_ARCTURUS)
4864                 return;
4865
4866         amdgpu_gfx_rlc_enter_safe_mode(adev);
4867
4868         /* Enable 3D CGCG/CGLS */
4869         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4870                 /* write cmd to clear cgcg/cgls ov */
4871                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4872                 /* unset CGCG override */
4873                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4874                 /* update CGCG and CGLS override bits */
4875                 if (def != data)
4876                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4877
4878                 /* enable 3Dcgcg FSM(0x0000363f) */
4879                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4880
4881                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4882                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4883                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4884                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4885                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4886                 if (def != data)
4887                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4888
4889                 /* set IDLE_POLL_COUNT(0x00900100) */
4890                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4891                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4892                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4893                 if (def != data)
4894                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4895         } else {
4896                 /* Disable CGCG/CGLS */
4897                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4898                 /* disable cgcg, cgls should be disabled */
4899                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4900                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4901                 /* disable cgcg and cgls in FSM */
4902                 if (def != data)
4903                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4904         }
4905
4906         amdgpu_gfx_rlc_exit_safe_mode(adev);
4907 }
4908
4909 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4910                                                       bool enable)
4911 {
4912         uint32_t def, data;
4913
4914         amdgpu_gfx_rlc_enter_safe_mode(adev);
4915
4916         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4917                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4918                 /* unset CGCG override */
4919                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4920                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4921                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4922                 else
4923                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4924                 /* update CGCG and CGLS override bits */
4925                 if (def != data)
4926                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4927
4928                 /* enable cgcg FSM(0x0000363F) */
4929                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4930
4931                 if (adev->asic_type == CHIP_ARCTURUS)
4932                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4933                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4934                 else
4935                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4936                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4937                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4938                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4939                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4940                 if (def != data)
4941                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4942
4943                 /* set IDLE_POLL_COUNT(0x00900100) */
4944                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4945                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4946                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4947                 if (def != data)
4948                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4949         } else {
4950                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4951                 /* reset CGCG/CGLS bits */
4952                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4953                 /* disable cgcg and cgls in FSM */
4954                 if (def != data)
4955                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4956         }
4957
4958         amdgpu_gfx_rlc_exit_safe_mode(adev);
4959 }
4960
4961 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4962                                             bool enable)
4963 {
4964         if (enable) {
4965                 /* CGCG/CGLS should be enabled after MGCG/MGLS
4966                  * ===  MGCG + MGLS ===
4967                  */
4968                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4969                 /* ===  CGCG /CGLS for GFX 3D Only === */
4970                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4971                 /* ===  CGCG + CGLS === */
4972                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4973         } else {
4974                 /* CGCG/CGLS should be disabled before MGCG/MGLS
4975                  * ===  CGCG + CGLS ===
4976                  */
4977                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4978                 /* ===  CGCG /CGLS for GFX 3D Only === */
4979                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4980                 /* ===  MGCG + MGLS === */
4981                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4982         }
4983         return 0;
4984 }
4985
4986 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4987 {
4988         u32 reg, data;
4989
4990         reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
4991         if (amdgpu_sriov_is_pp_one_vf(adev))
4992                 data = RREG32_NO_KIQ(reg);
4993         else
4994                 data = RREG32(reg);
4995
4996         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4997         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4998
4999         if (amdgpu_sriov_is_pp_one_vf(adev))
5000                 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
5001         else
5002                 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
5003 }
5004
5005 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
5006                                         uint32_t offset,
5007                                         struct soc15_reg_rlcg *entries, int arr_size)
5008 {
5009         int i;
5010         uint32_t reg;
5011
5012         if (!entries)
5013                 return false;
5014
5015         for (i = 0; i < arr_size; i++) {
5016                 const struct soc15_reg_rlcg *entry;
5017
5018                 entry = &entries[i];
5019                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
5020                 if (offset == reg)
5021                         return true;
5022         }
5023
5024         return false;
5025 }
5026
5027 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5028 {
5029         return gfx_v9_0_check_rlcg_range(adev, offset,
5030                                         (void *)rlcg_access_gc_9_0,
5031                                         ARRAY_SIZE(rlcg_access_gc_9_0));
5032 }
5033
5034 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5035         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5036         .set_safe_mode = gfx_v9_0_set_safe_mode,
5037         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5038         .init = gfx_v9_0_rlc_init,
5039         .get_csb_size = gfx_v9_0_get_csb_size,
5040         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5041         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5042         .resume = gfx_v9_0_rlc_resume,
5043         .stop = gfx_v9_0_rlc_stop,
5044         .reset = gfx_v9_0_rlc_reset,
5045         .start = gfx_v9_0_rlc_start,
5046         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5047         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5048         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5049 };
5050
5051 static int gfx_v9_0_set_powergating_state(void *handle,
5052                                           enum amd_powergating_state state)
5053 {
5054         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5055         bool enable = (state == AMD_PG_STATE_GATE);
5056
5057         switch (adev->asic_type) {
5058         case CHIP_RAVEN:
5059         case CHIP_RENOIR:
5060                 if (!enable)
5061                         amdgpu_gfx_off_ctrl(adev, false);
5062
5063                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5064                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5065                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5066                 } else {
5067                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5068                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5069                 }
5070
5071                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5072                         gfx_v9_0_enable_cp_power_gating(adev, true);
5073                 else
5074                         gfx_v9_0_enable_cp_power_gating(adev, false);
5075
5076                 /* update gfx cgpg state */
5077                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5078
5079                 /* update mgcg state */
5080                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5081
5082                 if (enable)
5083                         amdgpu_gfx_off_ctrl(adev, true);
5084                 break;
5085         case CHIP_VEGA12:
5086                 amdgpu_gfx_off_ctrl(adev, enable);
5087                 break;
5088         default:
5089                 break;
5090         }
5091
5092         return 0;
5093 }
5094
5095 static int gfx_v9_0_set_clockgating_state(void *handle,
5096                                           enum amd_clockgating_state state)
5097 {
5098         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5099
5100         if (amdgpu_sriov_vf(adev))
5101                 return 0;
5102
5103         switch (adev->asic_type) {
5104         case CHIP_VEGA10:
5105         case CHIP_VEGA12:
5106         case CHIP_VEGA20:
5107         case CHIP_RAVEN:
5108         case CHIP_ARCTURUS:
5109         case CHIP_RENOIR:
5110                 gfx_v9_0_update_gfx_clock_gating(adev,
5111                                                  state == AMD_CG_STATE_GATE);
5112                 break;
5113         default:
5114                 break;
5115         }
5116         return 0;
5117 }
5118
5119 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5120 {
5121         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5122         int data;
5123
5124         if (amdgpu_sriov_vf(adev))
5125                 *flags = 0;
5126
5127         /* AMD_CG_SUPPORT_GFX_MGCG */
5128         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5129         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5130                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5131
5132         /* AMD_CG_SUPPORT_GFX_CGCG */
5133         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5134         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5135                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5136
5137         /* AMD_CG_SUPPORT_GFX_CGLS */
5138         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5139                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5140
5141         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5142         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5143         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5144                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5145
5146         /* AMD_CG_SUPPORT_GFX_CP_LS */
5147         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5148         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5149                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5150
5151         if (adev->asic_type != CHIP_ARCTURUS) {
5152                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5153                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5154                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5155                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5156
5157                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5158                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5159                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5160         }
5161 }
5162
5163 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5164 {
5165         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5166 }
5167
5168 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5169 {
5170         struct amdgpu_device *adev = ring->adev;
5171         u64 wptr;
5172
5173         /* XXX check if swapping is necessary on BE */
5174         if (ring->use_doorbell) {
5175                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5176         } else {
5177                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5178                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5179         }
5180
5181         return wptr;
5182 }
5183
5184 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5185 {
5186         struct amdgpu_device *adev = ring->adev;
5187
5188         if (ring->use_doorbell) {
5189                 /* XXX check if swapping is necessary on BE */
5190                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5191                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5192         } else {
5193                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5194                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5195         }
5196 }
5197
5198 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5199 {
5200         struct amdgpu_device *adev = ring->adev;
5201         u32 ref_and_mask, reg_mem_engine;
5202         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5203
5204         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5205                 switch (ring->me) {
5206                 case 1:
5207                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5208                         break;
5209                 case 2:
5210                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5211                         break;
5212                 default:
5213                         return;
5214                 }
5215                 reg_mem_engine = 0;
5216         } else {
5217                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5218                 reg_mem_engine = 1; /* pfp */
5219         }
5220
5221         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5222                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5223                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5224                               ref_and_mask, ref_and_mask, 0x20);
5225 }
5226
5227 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5228                                         struct amdgpu_job *job,
5229                                         struct amdgpu_ib *ib,
5230                                         uint32_t flags)
5231 {
5232         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5233         u32 header, control = 0;
5234
5235         if (ib->flags & AMDGPU_IB_FLAG_CE)
5236                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5237         else
5238                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5239
5240         control |= ib->length_dw | (vmid << 24);
5241
5242         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5243                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5244
5245                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5246                         gfx_v9_0_ring_emit_de_meta(ring);
5247         }
5248
5249         amdgpu_ring_write(ring, header);
5250         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5251         amdgpu_ring_write(ring,
5252 #ifdef __BIG_ENDIAN
5253                 (2 << 0) |
5254 #endif
5255                 lower_32_bits(ib->gpu_addr));
5256         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5257         amdgpu_ring_write(ring, control);
5258 }
5259
5260 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5261                                           struct amdgpu_job *job,
5262                                           struct amdgpu_ib *ib,
5263                                           uint32_t flags)
5264 {
5265         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5266         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5267
5268         /* Currently, there is a high possibility to get wave ID mismatch
5269          * between ME and GDS, leading to a hw deadlock, because ME generates
5270          * different wave IDs than the GDS expects. This situation happens
5271          * randomly when at least 5 compute pipes use GDS ordered append.
5272          * The wave IDs generated by ME are also wrong after suspend/resume.
5273          * Those are probably bugs somewhere else in the kernel driver.
5274          *
5275          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5276          * GDS to 0 for this ring (me/pipe).
5277          */
5278         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5279                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5280                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5281                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5282         }
5283
5284         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5285         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5286         amdgpu_ring_write(ring,
5287 #ifdef __BIG_ENDIAN
5288                                 (2 << 0) |
5289 #endif
5290                                 lower_32_bits(ib->gpu_addr));
5291         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5292         amdgpu_ring_write(ring, control);
5293 }
5294
5295 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5296                                      u64 seq, unsigned flags)
5297 {
5298         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5299         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5300         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5301
5302         /* RELEASE_MEM - flush caches, send int */
5303         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5304         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5305                                                EOP_TC_NC_ACTION_EN) :
5306                                               (EOP_TCL1_ACTION_EN |
5307                                                EOP_TC_ACTION_EN |
5308                                                EOP_TC_WB_ACTION_EN |
5309                                                EOP_TC_MD_ACTION_EN)) |
5310                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5311                                  EVENT_INDEX(5)));
5312         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5313
5314         /*
5315          * the address should be Qword aligned if 64bit write, Dword
5316          * aligned if only send 32bit data low (discard data high)
5317          */
5318         if (write64bit)
5319                 BUG_ON(addr & 0x7);
5320         else
5321                 BUG_ON(addr & 0x3);
5322         amdgpu_ring_write(ring, lower_32_bits(addr));
5323         amdgpu_ring_write(ring, upper_32_bits(addr));
5324         amdgpu_ring_write(ring, lower_32_bits(seq));
5325         amdgpu_ring_write(ring, upper_32_bits(seq));
5326         amdgpu_ring_write(ring, 0);
5327 }
5328
5329 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5330 {
5331         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5332         uint32_t seq = ring->fence_drv.sync_seq;
5333         uint64_t addr = ring->fence_drv.gpu_addr;
5334
5335         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5336                               lower_32_bits(addr), upper_32_bits(addr),
5337                               seq, 0xffffffff, 4);
5338 }
5339
5340 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5341                                         unsigned vmid, uint64_t pd_addr)
5342 {
5343         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5344
5345         /* compute doesn't have PFP */
5346         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5347                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5348                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5349                 amdgpu_ring_write(ring, 0x0);
5350         }
5351 }
5352
5353 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5354 {
5355         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5356 }
5357
5358 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5359 {
5360         u64 wptr;
5361
5362         /* XXX check if swapping is necessary on BE */
5363         if (ring->use_doorbell)
5364                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5365         else
5366                 BUG();
5367         return wptr;
5368 }
5369
5370 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5371 {
5372         struct amdgpu_device *adev = ring->adev;
5373
5374         /* XXX check if swapping is necessary on BE */
5375         if (ring->use_doorbell) {
5376                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5377                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5378         } else{
5379                 BUG(); /* only DOORBELL method supported on gfx9 now */
5380         }
5381 }
5382
5383 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5384                                          u64 seq, unsigned int flags)
5385 {
5386         struct amdgpu_device *adev = ring->adev;
5387
5388         /* we only allocate 32bit for each seq wb address */
5389         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5390
5391         /* write fence seq to the "addr" */
5392         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5393         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5394                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5395         amdgpu_ring_write(ring, lower_32_bits(addr));
5396         amdgpu_ring_write(ring, upper_32_bits(addr));
5397         amdgpu_ring_write(ring, lower_32_bits(seq));
5398
5399         if (flags & AMDGPU_FENCE_FLAG_INT) {
5400                 /* set register to trigger INT */
5401                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5402                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5403                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5404                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5405                 amdgpu_ring_write(ring, 0);
5406                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5407         }
5408 }
5409
5410 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5411 {
5412         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5413         amdgpu_ring_write(ring, 0);
5414 }
5415
5416 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5417 {
5418         struct v9_ce_ib_state ce_payload = {0};
5419         uint64_t csa_addr;
5420         int cnt;
5421
5422         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5423         csa_addr = amdgpu_csa_vaddr(ring->adev);
5424
5425         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5426         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5427                                  WRITE_DATA_DST_SEL(8) |
5428                                  WR_CONFIRM) |
5429                                  WRITE_DATA_CACHE_POLICY(0));
5430         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5431         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5432         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5433 }
5434
5435 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5436 {
5437         struct v9_de_ib_state de_payload = {0};
5438         uint64_t csa_addr, gds_addr;
5439         int cnt;
5440
5441         csa_addr = amdgpu_csa_vaddr(ring->adev);
5442         gds_addr = csa_addr + 4096;
5443         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5444         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5445
5446         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5447         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5448         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5449                                  WRITE_DATA_DST_SEL(8) |
5450                                  WR_CONFIRM) |
5451                                  WRITE_DATA_CACHE_POLICY(0));
5452         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5453         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5454         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5455 }
5456
5457 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5458                                    bool secure)
5459 {
5460         uint32_t v = secure ? FRAME_TMZ : 0;
5461
5462         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5463         amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5464 }
5465
5466 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5467 {
5468         uint32_t dw2 = 0;
5469
5470         if (amdgpu_sriov_vf(ring->adev))
5471                 gfx_v9_0_ring_emit_ce_meta(ring);
5472
5473         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5474         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5475                 /* set load_global_config & load_global_uconfig */
5476                 dw2 |= 0x8001;
5477                 /* set load_cs_sh_regs */
5478                 dw2 |= 0x01000000;
5479                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5480                 dw2 |= 0x10002;
5481
5482                 /* set load_ce_ram if preamble presented */
5483                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5484                         dw2 |= 0x10000000;
5485         } else {
5486                 /* still load_ce_ram if this is the first time preamble presented
5487                  * although there is no context switch happens.
5488                  */
5489                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5490                         dw2 |= 0x10000000;
5491         }
5492
5493         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5494         amdgpu_ring_write(ring, dw2);
5495         amdgpu_ring_write(ring, 0);
5496 }
5497
5498 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5499 {
5500         unsigned ret;
5501         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5502         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5503         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5504         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5505         ret = ring->wptr & ring->buf_mask;
5506         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5507         return ret;
5508 }
5509
5510 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5511 {
5512         unsigned cur;
5513         BUG_ON(offset > ring->buf_mask);
5514         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5515
5516         cur = (ring->wptr & ring->buf_mask) - 1;
5517         if (likely(cur > offset))
5518                 ring->ring[offset] = cur - offset;
5519         else
5520                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5521 }
5522
5523 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5524                                     uint32_t reg_val_offs)
5525 {
5526         struct amdgpu_device *adev = ring->adev;
5527
5528         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5529         amdgpu_ring_write(ring, 0 |     /* src: register*/
5530                                 (5 << 8) |      /* dst: memory */
5531                                 (1 << 20));     /* write confirm */
5532         amdgpu_ring_write(ring, reg);
5533         amdgpu_ring_write(ring, 0);
5534         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5535                                 reg_val_offs * 4));
5536         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5537                                 reg_val_offs * 4));
5538 }
5539
5540 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5541                                     uint32_t val)
5542 {
5543         uint32_t cmd = 0;
5544
5545         switch (ring->funcs->type) {
5546         case AMDGPU_RING_TYPE_GFX:
5547                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5548                 break;
5549         case AMDGPU_RING_TYPE_KIQ:
5550                 cmd = (1 << 16); /* no inc addr */
5551                 break;
5552         default:
5553                 cmd = WR_CONFIRM;
5554                 break;
5555         }
5556         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5557         amdgpu_ring_write(ring, cmd);
5558         amdgpu_ring_write(ring, reg);
5559         amdgpu_ring_write(ring, 0);
5560         amdgpu_ring_write(ring, val);
5561 }
5562
5563 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5564                                         uint32_t val, uint32_t mask)
5565 {
5566         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5567 }
5568
5569 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5570                                                   uint32_t reg0, uint32_t reg1,
5571                                                   uint32_t ref, uint32_t mask)
5572 {
5573         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5574         struct amdgpu_device *adev = ring->adev;
5575         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5576                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5577
5578         if (fw_version_ok)
5579                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5580                                       ref, mask, 0x20);
5581         else
5582                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5583                                                            ref, mask);
5584 }
5585
5586 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5587 {
5588         struct amdgpu_device *adev = ring->adev;
5589         uint32_t value = 0;
5590
5591         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5592         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5593         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5594         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5595         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5596 }
5597
5598 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5599                                                  enum amdgpu_interrupt_state state)
5600 {
5601         switch (state) {
5602         case AMDGPU_IRQ_STATE_DISABLE:
5603         case AMDGPU_IRQ_STATE_ENABLE:
5604                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5605                                TIME_STAMP_INT_ENABLE,
5606                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5607                 break;
5608         default:
5609                 break;
5610         }
5611 }
5612
5613 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5614                                                      int me, int pipe,
5615                                                      enum amdgpu_interrupt_state state)
5616 {
5617         u32 mec_int_cntl, mec_int_cntl_reg;
5618
5619         /*
5620          * amdgpu controls only the first MEC. That's why this function only
5621          * handles the setting of interrupts for this specific MEC. All other
5622          * pipes' interrupts are set by amdkfd.
5623          */
5624
5625         if (me == 1) {
5626                 switch (pipe) {
5627                 case 0:
5628                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5629                         break;
5630                 case 1:
5631                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5632                         break;
5633                 case 2:
5634                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5635                         break;
5636                 case 3:
5637                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5638                         break;
5639                 default:
5640                         DRM_DEBUG("invalid pipe %d\n", pipe);
5641                         return;
5642                 }
5643         } else {
5644                 DRM_DEBUG("invalid me %d\n", me);
5645                 return;
5646         }
5647
5648         switch (state) {
5649         case AMDGPU_IRQ_STATE_DISABLE:
5650                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5651                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5652                                              TIME_STAMP_INT_ENABLE, 0);
5653                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5654                 break;
5655         case AMDGPU_IRQ_STATE_ENABLE:
5656                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5657                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5658                                              TIME_STAMP_INT_ENABLE, 1);
5659                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5660                 break;
5661         default:
5662                 break;
5663         }
5664 }
5665
5666 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5667                                              struct amdgpu_irq_src *source,
5668                                              unsigned type,
5669                                              enum amdgpu_interrupt_state state)
5670 {
5671         switch (state) {
5672         case AMDGPU_IRQ_STATE_DISABLE:
5673         case AMDGPU_IRQ_STATE_ENABLE:
5674                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5675                                PRIV_REG_INT_ENABLE,
5676                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5677                 break;
5678         default:
5679                 break;
5680         }
5681
5682         return 0;
5683 }
5684
5685 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5686                                               struct amdgpu_irq_src *source,
5687                                               unsigned type,
5688                                               enum amdgpu_interrupt_state state)
5689 {
5690         switch (state) {
5691         case AMDGPU_IRQ_STATE_DISABLE:
5692         case AMDGPU_IRQ_STATE_ENABLE:
5693                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5694                                PRIV_INSTR_INT_ENABLE,
5695                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5696                 break;
5697         default:
5698                 break;
5699         }
5700
5701         return 0;
5702 }
5703
5704 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5705         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5706                         CP_ECC_ERROR_INT_ENABLE, 1)
5707
5708 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5709         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5710                         CP_ECC_ERROR_INT_ENABLE, 0)
5711
5712 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5713                                               struct amdgpu_irq_src *source,
5714                                               unsigned type,
5715                                               enum amdgpu_interrupt_state state)
5716 {
5717         switch (state) {
5718         case AMDGPU_IRQ_STATE_DISABLE:
5719                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5720                                 CP_ECC_ERROR_INT_ENABLE, 0);
5721                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5722                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5723                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5724                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5725                 break;
5726
5727         case AMDGPU_IRQ_STATE_ENABLE:
5728                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5729                                 CP_ECC_ERROR_INT_ENABLE, 1);
5730                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5731                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5732                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5733                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5734                 break;
5735         default:
5736                 break;
5737         }
5738
5739         return 0;
5740 }
5741
5742
5743 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5744                                             struct amdgpu_irq_src *src,
5745                                             unsigned type,
5746                                             enum amdgpu_interrupt_state state)
5747 {
5748         switch (type) {
5749         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5750                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5751                 break;
5752         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5753                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5754                 break;
5755         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5756                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5757                 break;
5758         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5759                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5760                 break;
5761         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5762                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5763                 break;
5764         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5765                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5766                 break;
5767         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5768                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5769                 break;
5770         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5771                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5772                 break;
5773         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5774                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5775                 break;
5776         default:
5777                 break;
5778         }
5779         return 0;
5780 }
5781
5782 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5783                             struct amdgpu_irq_src *source,
5784                             struct amdgpu_iv_entry *entry)
5785 {
5786         int i;
5787         u8 me_id, pipe_id, queue_id;
5788         struct amdgpu_ring *ring;
5789
5790         DRM_DEBUG("IH: CP EOP\n");
5791         me_id = (entry->ring_id & 0x0c) >> 2;
5792         pipe_id = (entry->ring_id & 0x03) >> 0;
5793         queue_id = (entry->ring_id & 0x70) >> 4;
5794
5795         switch (me_id) {
5796         case 0:
5797                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5798                 break;
5799         case 1:
5800         case 2:
5801                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5802                         ring = &adev->gfx.compute_ring[i];
5803                         /* Per-queue interrupt is supported for MEC starting from VI.
5804                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5805                           */
5806                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5807                                 amdgpu_fence_process(ring);
5808                 }
5809                 break;
5810         }
5811         return 0;
5812 }
5813
5814 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5815                            struct amdgpu_iv_entry *entry)
5816 {
5817         u8 me_id, pipe_id, queue_id;
5818         struct amdgpu_ring *ring;
5819         int i;
5820
5821         me_id = (entry->ring_id & 0x0c) >> 2;
5822         pipe_id = (entry->ring_id & 0x03) >> 0;
5823         queue_id = (entry->ring_id & 0x70) >> 4;
5824
5825         switch (me_id) {
5826         case 0:
5827                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5828                 break;
5829         case 1:
5830         case 2:
5831                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5832                         ring = &adev->gfx.compute_ring[i];
5833                         if (ring->me == me_id && ring->pipe == pipe_id &&
5834                             ring->queue == queue_id)
5835                                 drm_sched_fault(&ring->sched);
5836                 }
5837                 break;
5838         }
5839 }
5840
5841 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5842                                  struct amdgpu_irq_src *source,
5843                                  struct amdgpu_iv_entry *entry)
5844 {
5845         DRM_ERROR("Illegal register access in command stream\n");
5846         gfx_v9_0_fault(adev, entry);
5847         return 0;
5848 }
5849
5850 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5851                                   struct amdgpu_irq_src *source,
5852                                   struct amdgpu_iv_entry *entry)
5853 {
5854         DRM_ERROR("Illegal instruction in command stream\n");
5855         gfx_v9_0_fault(adev, entry);
5856         return 0;
5857 }
5858
5859
5860 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5861         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5862           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5863           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5864         },
5865         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5866           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5867           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5868         },
5869         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5870           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5871           0, 0
5872         },
5873         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5874           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5875           0, 0
5876         },
5877         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5878           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5879           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5880         },
5881         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5882           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5883           0, 0
5884         },
5885         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5886           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5887           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5888         },
5889         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5890           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5891           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5892         },
5893         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5894           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5895           0, 0
5896         },
5897         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5898           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5899           0, 0
5900         },
5901         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5902           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5903           0, 0
5904         },
5905         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5906           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5907           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5908         },
5909         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5910           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5911           0, 0
5912         },
5913         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5914           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5915           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5916         },
5917         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5918           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5919           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5920           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5921         },
5922         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5923           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5924           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5925           0, 0
5926         },
5927         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5928           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5929           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5930           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5931         },
5932         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5933           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5934           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5935           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5936         },
5937         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5938           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5939           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5940           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5941         },
5942         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5943           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5944           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5945           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5946         },
5947         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5948           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5949           0, 0
5950         },
5951         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5952           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5953           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5954         },
5955         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5956           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
5957           0, 0
5958         },
5959         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5960           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
5961           0, 0
5962         },
5963         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5964           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
5965           0, 0
5966         },
5967         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5968           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
5969           0, 0
5970         },
5971         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5972           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
5973           0, 0
5974         },
5975         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5976           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
5977           0, 0
5978         },
5979         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5980           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5981           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
5982         },
5983         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5984           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5985           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
5986         },
5987         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5988           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5989           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
5990         },
5991         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5992           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5993           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
5994         },
5995         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5996           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5997           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
5998         },
5999         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6000           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
6001           0, 0
6002         },
6003         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6004           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
6005           0, 0
6006         },
6007         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6008           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
6009           0, 0
6010         },
6011         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6012           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
6013           0, 0
6014         },
6015         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6016           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
6017           0, 0
6018         },
6019         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
6020           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
6021           0, 0
6022         },
6023         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6024           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6025           0, 0
6026         },
6027         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6028           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6029           0, 0
6030         },
6031         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6032           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6033           0, 0
6034         },
6035         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6036           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6037           0, 0
6038         },
6039         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6040           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6041           0, 0
6042         },
6043         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6044           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6045           0, 0
6046         },
6047         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6048           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6049           0, 0
6050         },
6051         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6052           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6053           0, 0
6054         },
6055         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6056           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6057           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6058         },
6059         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6060           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6061           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6062         },
6063         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6064           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6065           0, 0
6066         },
6067         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6068           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6069           0, 0
6070         },
6071         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6072           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6073           0, 0
6074         },
6075         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6076           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6077           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6078         },
6079         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6080           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6081           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6082         },
6083         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6084           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6085           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6086         },
6087         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6088           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6089           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6090         },
6091         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6092           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6093           0, 0
6094         },
6095         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6096           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6097           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6098         },
6099         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6100           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6101           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6102         },
6103         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6104           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6105           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6106         },
6107         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6108           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6109           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6110         },
6111         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6112           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6113           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6114         },
6115         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6116           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6117           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6118         },
6119         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6120           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6121           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6122         },
6123         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6124           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6125           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6126         },
6127         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6128           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6129           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6130         },
6131         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6132           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6133           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6134         },
6135         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6136           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6137           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6138         },
6139         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6140           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6141           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6142         },
6143         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6144           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6145           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6146         },
6147         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6148           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6149           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6150         },
6151         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6152           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6153           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6154         },
6155         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6156           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6157           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6158         },
6159         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6160           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6161           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6162         },
6163         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6164           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6165           0, 0
6166         },
6167         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6168           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6169           0, 0
6170         },
6171         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6172           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6173           0, 0
6174         },
6175         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6176           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6177           0, 0
6178         },
6179         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6180           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6181           0, 0
6182         },
6183         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6184           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6185           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6186         },
6187         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6188           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6189           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6190         },
6191         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6192           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6193           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6194         },
6195         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6196           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6197           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6198         },
6199         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6200           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6201           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6202         },
6203         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6204           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6205           0, 0
6206         },
6207         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6208           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6209           0, 0
6210         },
6211         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6212           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6213           0, 0
6214         },
6215         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6216           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6217           0, 0
6218         },
6219         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6220           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6221           0, 0
6222         },
6223         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6224           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6225           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6226         },
6227         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6228           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6229           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6230         },
6231         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6232           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6233           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6234         },
6235         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6236           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6237           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6238         },
6239         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6240           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6241           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6242         },
6243         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6244           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6245           0, 0
6246         },
6247         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6248           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6249           0, 0
6250         },
6251         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6252           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6253           0, 0
6254         },
6255         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6256           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6257           0, 0
6258         },
6259         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6260           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6261           0, 0
6262         },
6263         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6264           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6265           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6266         },
6267         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6268           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6269           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6270         },
6271         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6272           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6273           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6274         },
6275         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6276           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6277           0, 0
6278         },
6279         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6280           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6281           0, 0
6282         },
6283         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6284           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6285           0, 0
6286         },
6287         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6288           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6289           0, 0
6290         },
6291         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6292           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6293           0, 0
6294         },
6295         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6296           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6297           0, 0
6298         }
6299 };
6300
6301 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6302                                      void *inject_if)
6303 {
6304         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6305         int ret;
6306         struct ta_ras_trigger_error_input block_info = { 0 };
6307
6308         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6309                 return -EINVAL;
6310
6311         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6312                 return -EINVAL;
6313
6314         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6315                 return -EPERM;
6316
6317         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6318               info->head.type)) {
6319                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6320                         ras_gfx_subblocks[info->head.sub_block_index].name,
6321                         info->head.type);
6322                 return -EPERM;
6323         }
6324
6325         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6326               info->head.type)) {
6327                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6328                         ras_gfx_subblocks[info->head.sub_block_index].name,
6329                         info->head.type);
6330                 return -EPERM;
6331         }
6332
6333         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6334         block_info.sub_block_index =
6335                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6336         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6337         block_info.address = info->address;
6338         block_info.value = info->value;
6339
6340         mutex_lock(&adev->grbm_idx_mutex);
6341         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6342         mutex_unlock(&adev->grbm_idx_mutex);
6343
6344         return ret;
6345 }
6346
6347 static const char *vml2_mems[] = {
6348         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6349         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6350         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6351         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6352         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6353         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6354         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6355         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6356         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6357         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6358         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6359         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6360         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6361         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6362         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6363         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6364 };
6365
6366 static const char *vml2_walker_mems[] = {
6367         "UTC_VML2_CACHE_PDE0_MEM0",
6368         "UTC_VML2_CACHE_PDE0_MEM1",
6369         "UTC_VML2_CACHE_PDE1_MEM0",
6370         "UTC_VML2_CACHE_PDE1_MEM1",
6371         "UTC_VML2_CACHE_PDE2_MEM0",
6372         "UTC_VML2_CACHE_PDE2_MEM1",
6373         "UTC_VML2_RDIF_LOG_FIFO",
6374 };
6375
6376 static const char *atc_l2_cache_2m_mems[] = {
6377         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6378         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6379         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6380         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6381 };
6382
6383 static const char *atc_l2_cache_4k_mems[] = {
6384         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6385         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6386         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6387         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6388         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6389         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6390         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6391         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6392         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6393         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6394         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6395         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6396         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6397         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6398         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6399         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6400         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6401         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6402         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6403         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6404         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6405         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6406         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6407         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6408         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6409         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6410         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6411         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6412         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6413         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6414         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6415         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6416 };
6417
6418 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6419                                          struct ras_err_data *err_data)
6420 {
6421         uint32_t i, data;
6422         uint32_t sec_count, ded_count;
6423
6424         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6425         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6426         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6427         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6428         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6429         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6430         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6431         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6432
6433         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6434                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6435                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6436
6437                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6438                 if (sec_count) {
6439                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6440                                 "SEC %d\n", i, vml2_mems[i], sec_count);
6441                         err_data->ce_count += sec_count;
6442                 }
6443
6444                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6445                 if (ded_count) {
6446                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6447                                 "DED %d\n", i, vml2_mems[i], ded_count);
6448                         err_data->ue_count += ded_count;
6449                 }
6450         }
6451
6452         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6453                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6454                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6455
6456                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6457                                                 SEC_COUNT);
6458                 if (sec_count) {
6459                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6460                                 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6461                         err_data->ce_count += sec_count;
6462                 }
6463
6464                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6465                                                 DED_COUNT);
6466                 if (ded_count) {
6467                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6468                                 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6469                         err_data->ue_count += ded_count;
6470                 }
6471         }
6472
6473         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6474                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6475                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6476
6477                 sec_count = (data & 0x00006000L) >> 0xd;
6478                 if (sec_count) {
6479                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6480                                 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6481                                 sec_count);
6482                         err_data->ce_count += sec_count;
6483                 }
6484         }
6485
6486         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6487                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6488                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6489
6490                 sec_count = (data & 0x00006000L) >> 0xd;
6491                 if (sec_count) {
6492                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6493                                 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6494                                 sec_count);
6495                         err_data->ce_count += sec_count;
6496                 }
6497
6498                 ded_count = (data & 0x00018000L) >> 0xf;
6499                 if (ded_count) {
6500                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6501                                 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6502                                 ded_count);
6503                         err_data->ue_count += ded_count;
6504                 }
6505         }
6506
6507         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6508         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6509         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6510         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6511
6512         return 0;
6513 }
6514
6515 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6516         const struct soc15_reg_entry *reg,
6517         uint32_t se_id, uint32_t inst_id, uint32_t value,
6518         uint32_t *sec_count, uint32_t *ded_count)
6519 {
6520         uint32_t i;
6521         uint32_t sec_cnt, ded_cnt;
6522
6523         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6524                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6525                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6526                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6527                         continue;
6528
6529                 sec_cnt = (value &
6530                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6531                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6532                 if (sec_cnt) {
6533                         dev_info(adev->dev, "GFX SubBlock %s, "
6534                                 "Instance[%d][%d], SEC %d\n",
6535                                 gfx_v9_0_ras_fields[i].name,
6536                                 se_id, inst_id,
6537                                 sec_cnt);
6538                         *sec_count += sec_cnt;
6539                 }
6540
6541                 ded_cnt = (value &
6542                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6543                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6544                 if (ded_cnt) {
6545                         dev_info(adev->dev, "GFX SubBlock %s, "
6546                                 "Instance[%d][%d], DED %d\n",
6547                                 gfx_v9_0_ras_fields[i].name,
6548                                 se_id, inst_id,
6549                                 ded_cnt);
6550                         *ded_count += ded_cnt;
6551                 }
6552         }
6553
6554         return 0;
6555 }
6556
6557 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6558 {
6559         int i, j, k;
6560
6561         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6562                 return;
6563
6564         /* read back registers to clear the counters */
6565         mutex_lock(&adev->grbm_idx_mutex);
6566         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6567                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6568                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6569                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6570                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6571                         }
6572                 }
6573         }
6574         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6575         mutex_unlock(&adev->grbm_idx_mutex);
6576
6577         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6578         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6579         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6580         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6581         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6582         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6583         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6584         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6585
6586         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6587                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6588                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6589         }
6590
6591         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6592                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6593                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6594         }
6595
6596         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6597                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6598                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6599         }
6600
6601         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6602                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6603                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6604         }
6605
6606         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6607         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6608         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6609         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6610 }
6611
6612 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6613                                           void *ras_error_status)
6614 {
6615         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6616         uint32_t sec_count = 0, ded_count = 0;
6617         uint32_t i, j, k;
6618         uint32_t reg_value;
6619
6620         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6621                 return -EINVAL;
6622
6623         err_data->ue_count = 0;
6624         err_data->ce_count = 0;
6625
6626         mutex_lock(&adev->grbm_idx_mutex);
6627
6628         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6629                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6630                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6631                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6632                                 reg_value =
6633                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6634                                 if (reg_value)
6635                                         gfx_v9_0_ras_error_count(adev,
6636                                                 &gfx_v9_0_edc_counter_regs[i],
6637                                                 j, k, reg_value,
6638                                                 &sec_count, &ded_count);
6639                         }
6640                 }
6641         }
6642
6643         err_data->ce_count += sec_count;
6644         err_data->ue_count += ded_count;
6645
6646         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6647         mutex_unlock(&adev->grbm_idx_mutex);
6648
6649         gfx_v9_0_query_utc_edc_status(adev, err_data);
6650
6651         return 0;
6652 }
6653
6654 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6655 {
6656         const unsigned int cp_coher_cntl =
6657                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6658                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6659                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6660                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6661                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6662
6663         /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6664         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6665         amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6666         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6667         amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6668         amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6669         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6670         amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6671 }
6672
6673 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6674         .name = "gfx_v9_0",
6675         .early_init = gfx_v9_0_early_init,
6676         .late_init = gfx_v9_0_late_init,
6677         .sw_init = gfx_v9_0_sw_init,
6678         .sw_fini = gfx_v9_0_sw_fini,
6679         .hw_init = gfx_v9_0_hw_init,
6680         .hw_fini = gfx_v9_0_hw_fini,
6681         .suspend = gfx_v9_0_suspend,
6682         .resume = gfx_v9_0_resume,
6683         .is_idle = gfx_v9_0_is_idle,
6684         .wait_for_idle = gfx_v9_0_wait_for_idle,
6685         .soft_reset = gfx_v9_0_soft_reset,
6686         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6687         .set_powergating_state = gfx_v9_0_set_powergating_state,
6688         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6689 };
6690
6691 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6692         .type = AMDGPU_RING_TYPE_GFX,
6693         .align_mask = 0xff,
6694         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6695         .support_64bit_ptrs = true,
6696         .vmhub = AMDGPU_GFXHUB_0,
6697         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6698         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6699         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6700         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6701                 5 +  /* COND_EXEC */
6702                 7 +  /* PIPELINE_SYNC */
6703                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6704                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6705                 2 + /* VM_FLUSH */
6706                 8 +  /* FENCE for VM_FLUSH */
6707                 20 + /* GDS switch */
6708                 4 + /* double SWITCH_BUFFER,
6709                        the first COND_EXEC jump to the place just
6710                            prior to this double SWITCH_BUFFER  */
6711                 5 + /* COND_EXEC */
6712                 7 +      /*     HDP_flush */
6713                 4 +      /*     VGT_flush */
6714                 14 + /* CE_META */
6715                 31 + /* DE_META */
6716                 3 + /* CNTX_CTRL */
6717                 5 + /* HDP_INVL */
6718                 8 + 8 + /* FENCE x2 */
6719                 2 + /* SWITCH_BUFFER */
6720                 7, /* gfx_v9_0_emit_mem_sync */
6721         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6722         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6723         .emit_fence = gfx_v9_0_ring_emit_fence,
6724         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6725         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6726         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6727         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6728         .test_ring = gfx_v9_0_ring_test_ring,
6729         .test_ib = gfx_v9_0_ring_test_ib,
6730         .insert_nop = amdgpu_ring_insert_nop,
6731         .pad_ib = amdgpu_ring_generic_pad_ib,
6732         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6733         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6734         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6735         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6736         .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6737         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6738         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6739         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6740         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6741         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6742 };
6743
6744 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6745         .type = AMDGPU_RING_TYPE_COMPUTE,
6746         .align_mask = 0xff,
6747         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6748         .support_64bit_ptrs = true,
6749         .vmhub = AMDGPU_GFXHUB_0,
6750         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6751         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6752         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6753         .emit_frame_size =
6754                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6755                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6756                 5 + /* hdp invalidate */
6757                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6758                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6759                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6760                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6761                 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6762                 7, /* gfx_v9_0_emit_mem_sync */
6763         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6764         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6765         .emit_fence = gfx_v9_0_ring_emit_fence,
6766         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6767         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6768         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6769         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6770         .test_ring = gfx_v9_0_ring_test_ring,
6771         .test_ib = gfx_v9_0_ring_test_ib,
6772         .insert_nop = amdgpu_ring_insert_nop,
6773         .pad_ib = amdgpu_ring_generic_pad_ib,
6774         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6775         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6776         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6777         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6778 };
6779
6780 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6781         .type = AMDGPU_RING_TYPE_KIQ,
6782         .align_mask = 0xff,
6783         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6784         .support_64bit_ptrs = true,
6785         .vmhub = AMDGPU_GFXHUB_0,
6786         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6787         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6788         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6789         .emit_frame_size =
6790                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6791                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6792                 5 + /* hdp invalidate */
6793                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6794                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6795                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6796                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6797                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6798         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6799         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6800         .test_ring = gfx_v9_0_ring_test_ring,
6801         .insert_nop = amdgpu_ring_insert_nop,
6802         .pad_ib = amdgpu_ring_generic_pad_ib,
6803         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6804         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6805         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6806         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6807 };
6808
6809 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6810 {
6811         int i;
6812
6813         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6814
6815         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6816                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6817
6818         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6819                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6820 }
6821
6822 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6823         .set = gfx_v9_0_set_eop_interrupt_state,
6824         .process = gfx_v9_0_eop_irq,
6825 };
6826
6827 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6828         .set = gfx_v9_0_set_priv_reg_fault_state,
6829         .process = gfx_v9_0_priv_reg_irq,
6830 };
6831
6832 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6833         .set = gfx_v9_0_set_priv_inst_fault_state,
6834         .process = gfx_v9_0_priv_inst_irq,
6835 };
6836
6837 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6838         .set = gfx_v9_0_set_cp_ecc_error_state,
6839         .process = amdgpu_gfx_cp_ecc_error_irq,
6840 };
6841
6842
6843 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6844 {
6845         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6846         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6847
6848         adev->gfx.priv_reg_irq.num_types = 1;
6849         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6850
6851         adev->gfx.priv_inst_irq.num_types = 1;
6852         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6853
6854         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6855         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6856 }
6857
6858 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6859 {
6860         switch (adev->asic_type) {
6861         case CHIP_VEGA10:
6862         case CHIP_VEGA12:
6863         case CHIP_VEGA20:
6864         case CHIP_RAVEN:
6865         case CHIP_ARCTURUS:
6866         case CHIP_RENOIR:
6867                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6868                 break;
6869         default:
6870                 break;
6871         }
6872 }
6873
6874 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6875 {
6876         /* init asci gds info */
6877         switch (adev->asic_type) {
6878         case CHIP_VEGA10:
6879         case CHIP_VEGA12:
6880         case CHIP_VEGA20:
6881                 adev->gds.gds_size = 0x10000;
6882                 break;
6883         case CHIP_RAVEN:
6884         case CHIP_ARCTURUS:
6885                 adev->gds.gds_size = 0x1000;
6886                 break;
6887         default:
6888                 adev->gds.gds_size = 0x10000;
6889                 break;
6890         }
6891
6892         switch (adev->asic_type) {
6893         case CHIP_VEGA10:
6894         case CHIP_VEGA20:
6895                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6896                 break;
6897         case CHIP_VEGA12:
6898                 adev->gds.gds_compute_max_wave_id = 0x27f;
6899                 break;
6900         case CHIP_RAVEN:
6901                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
6902                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6903                 else
6904                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6905                 break;
6906         case CHIP_ARCTURUS:
6907                 adev->gds.gds_compute_max_wave_id = 0xfff;
6908                 break;
6909         default:
6910                 /* this really depends on the chip */
6911                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6912                 break;
6913         }
6914
6915         adev->gds.gws_size = 64;
6916         adev->gds.oa_size = 16;
6917 }
6918
6919 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6920                                                  u32 bitmap)
6921 {
6922         u32 data;
6923
6924         if (!bitmap)
6925                 return;
6926
6927         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6928         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6929
6930         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6931 }
6932
6933 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6934 {
6935         u32 data, mask;
6936
6937         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6938         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6939
6940         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6941         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6942
6943         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6944
6945         return (~data) & mask;
6946 }
6947
6948 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6949                                  struct amdgpu_cu_info *cu_info)
6950 {
6951         int i, j, k, counter, active_cu_number = 0;
6952         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6953         unsigned disable_masks[4 * 4];
6954
6955         if (!adev || !cu_info)
6956                 return -EINVAL;
6957
6958         /*
6959          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6960          */
6961         if (adev->gfx.config.max_shader_engines *
6962                 adev->gfx.config.max_sh_per_se > 16)
6963                 return -EINVAL;
6964
6965         amdgpu_gfx_parse_disable_cu(disable_masks,
6966                                     adev->gfx.config.max_shader_engines,
6967                                     adev->gfx.config.max_sh_per_se);
6968
6969         mutex_lock(&adev->grbm_idx_mutex);
6970         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6971                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6972                         mask = 1;
6973                         ao_bitmap = 0;
6974                         counter = 0;
6975                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6976                         gfx_v9_0_set_user_cu_inactive_bitmap(
6977                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6978                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6979
6980                         /*
6981                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
6982                          * 4x4 size array, and it's usually suitable for Vega
6983                          * ASICs which has 4*2 SE/SH layout.
6984                          * But for Arcturus, SE/SH layout is changed to 8*1.
6985                          * To mostly reduce the impact, we make it compatible
6986                          * with current bitmap array as below:
6987                          *    SE4,SH0 --> bitmap[0][1]
6988                          *    SE5,SH0 --> bitmap[1][1]
6989                          *    SE6,SH0 --> bitmap[2][1]
6990                          *    SE7,SH0 --> bitmap[3][1]
6991                          */
6992                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6993
6994                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6995                                 if (bitmap & mask) {
6996                                         if (counter < adev->gfx.config.max_cu_per_sh)
6997                                                 ao_bitmap |= mask;
6998                                         counter ++;
6999                                 }
7000                                 mask <<= 1;
7001                         }
7002                         active_cu_number += counter;
7003                         if (i < 2 && j < 2)
7004                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7005                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
7006                 }
7007         }
7008         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
7009         mutex_unlock(&adev->grbm_idx_mutex);
7010
7011         cu_info->number = active_cu_number;
7012         cu_info->ao_cu_mask = ao_cu_mask;
7013         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7014
7015         return 0;
7016 }
7017
7018 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
7019 {
7020         .type = AMD_IP_BLOCK_TYPE_GFX,
7021         .major = 9,
7022         .minor = 0,
7023         .rev = 0,
7024         .funcs = &gfx_v9_0_ip_funcs,
7025 };
This page took 0.469156 seconds and 4 git commands to generate.