]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drm/amdgpu: update athub interrupt harvesting handle
[linux.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41 #include "hdp/hdp_4_0_offset.h"
42
43 #include "soc15_common.h"
44 #include "clearstate_gfx9.h"
45 #include "v9_structs.h"
46
47 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
48
49 #include "amdgpu_ras.h"
50
51 #include "gfx_v9_4.h"
52
53 #include "asic_reg/pwr/pwr_10_0_offset.h"
54 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
55
56 #define GFX9_NUM_GFX_RINGS     1
57 #define GFX9_MEC_HPD_SIZE 4096
58 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
59 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
60
61 #define mmGCEA_PROBE_MAP                        0x070c
62 #define mmGCEA_PROBE_MAP_BASE_IDX               0
63
64 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
65 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
66 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
70
71 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
72 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
77
78 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
79 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
84
85 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
86 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
87 MODULE_FIRMWARE("amdgpu/raven_me.bin");
88 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
89 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
90 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
91
92 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
93 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
94 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
99
100 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
101 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
102 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
106 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
107
108 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
109 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
110 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
111
112 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
113 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
114 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
118
119 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
120 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
121 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
122 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
123 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
124 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
125 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
126 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
127 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
128 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
129 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
130 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
131
132 enum ta_ras_gfx_subblock {
133         /*CPC*/
134         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
135         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
136         TA_RAS_BLOCK__GFX_CPC_UCODE,
137         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
138         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
139         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
140         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
141         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
142         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
143         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
144         /* CPF*/
145         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
146         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
147         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
148         TA_RAS_BLOCK__GFX_CPF_TAG,
149         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
150         /* CPG*/
151         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
152         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
153         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
154         TA_RAS_BLOCK__GFX_CPG_TAG,
155         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
156         /* GDS*/
157         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
158         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
159         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
160         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
161         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
162         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
163         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
164         /* SPI*/
165         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
166         /* SQ*/
167         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
168         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
169         TA_RAS_BLOCK__GFX_SQ_LDS_D,
170         TA_RAS_BLOCK__GFX_SQ_LDS_I,
171         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
172         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
173         /* SQC (3 ranges)*/
174         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
175         /* SQC range 0*/
176         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
177         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
178                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
179         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
180         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
181         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
182         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
183         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
184         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
185         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
186                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
187         /* SQC range 1*/
188         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
189         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
190                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
191         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
192         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
193         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
194         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
195         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
196         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
197         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
198         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
199         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
200                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
201         /* SQC range 2*/
202         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
203         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
204                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
205         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
206         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
207         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
208         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
209         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
210         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
211         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
212         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
213         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
214                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
215         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
216         /* TA*/
217         TA_RAS_BLOCK__GFX_TA_INDEX_START,
218         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
219         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
220         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
221         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
222         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
223         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
224         /* TCA*/
225         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
226         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
227         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
228         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
229         /* TCC (5 sub-ranges)*/
230         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
231         /* TCC range 0*/
232         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
233         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
234         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
235         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
236         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
237         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
238         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
239         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
240         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
241         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
242         /* TCC range 1*/
243         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
244         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
245         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
246         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
247                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
248         /* TCC range 2*/
249         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
250         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
251         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
252         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
253         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
254         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
255         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
256         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
257         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
258         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
259                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
260         /* TCC range 3*/
261         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
262         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
263         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
264         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
265                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
266         /* TCC range 4*/
267         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
268         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
269                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
270         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
271         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
272                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
273         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
274         /* TCI*/
275         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
276         /* TCP*/
277         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
278         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
279         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
280         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
281         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
282         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
283         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
284         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
285         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
286         /* TD*/
287         TA_RAS_BLOCK__GFX_TD_INDEX_START,
288         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
289         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
290         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
291         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
292         /* EA (3 sub-ranges)*/
293         TA_RAS_BLOCK__GFX_EA_INDEX_START,
294         /* EA range 0*/
295         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
296         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
297         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
298         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
299         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
300         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
301         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
302         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
303         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
304         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
305         /* EA range 1*/
306         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
307         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
308         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
309         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
310         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
311         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
312         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
313         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
314         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
315         /* EA range 2*/
316         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
317         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
318         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
319         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
320         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
321         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
322         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
323         /* UTC VM L2 bank*/
324         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
325         /* UTC VM walker*/
326         TA_RAS_BLOCK__UTC_VML2_WALKER,
327         /* UTC ATC L2 2MB cache*/
328         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
329         /* UTC ATC L2 4KB cache*/
330         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
331         TA_RAS_BLOCK__GFX_MAX
332 };
333
334 struct ras_gfx_subblock {
335         unsigned char *name;
336         int ta_subblock;
337         int hw_supported_error_type;
338         int sw_supported_error_type;
339 };
340
341 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
342         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
343                 #subblock,                                                     \
344                 TA_RAS_BLOCK__##subblock,                                      \
345                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
346                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
347         }
348
349 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
350         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
351         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
352         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
353         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
354         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
355         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
356         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
357         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
358         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
359         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
360         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
361         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
362         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
363         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
364         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
365         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
366         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
367                              0),
368         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
369                              0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
371         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
372         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
373         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
374         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
375         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
377         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
378                              0, 0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
380                              0),
381         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
382                              0, 0),
383         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
384                              0),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
386                              0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
388                              0),
389         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
390                              1),
391         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
392                              0, 0, 0),
393         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
394                              0),
395         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
396                              0),
397         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
398                              0),
399         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
400                              0),
401         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
402                              0),
403         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
404                              0, 0),
405         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
406                              0),
407         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
408                              0),
409         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
410                              0, 0, 0),
411         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
412                              0),
413         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
414                              0),
415         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
416                              0),
417         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
418                              0),
419         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
420                              0),
421         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
422                              0, 0),
423         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
424                              0),
425         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
426         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
427         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
428         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
429         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
430         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
431         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
432         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
433         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
434                              1),
435         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
436                              1),
437         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
438                              1),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
440                              0),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
442                              0),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
444         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
445         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
446         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
447         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
448         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
449         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
450         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
451         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
452         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
453         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
454         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
455                              0),
456         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
458                              0),
459         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
460                              0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
462                              0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
464         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
466         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
469         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
471         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
472         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
473         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
474         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
475         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
476         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
479         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
483         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
486         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
497 };
498
499 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
500 {
501         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
502         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
503         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
504         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
505         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
506         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
507         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
508         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
509         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
510         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
511         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
512         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
513         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
514         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
521 };
522
523 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
524 {
525         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
526         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
527         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
528         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
529         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
530         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
531         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
532         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
533         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
534         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
535         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
536         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
537         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
538         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
543 };
544
545 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
546 {
547         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
548         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
549         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
550         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
551         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
552         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
553         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
554         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
555         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
556         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
557         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
558 };
559
560 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
561 {
562         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
563         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
564         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
565         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
566         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
567         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
568         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
569         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
570         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
571         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
572         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
573         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
574         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
575         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
586 };
587
588 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
589 {
590         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
591         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
592         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
593         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
594         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
595         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
596         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
597 };
598
599 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
600 {
601         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
602         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
603         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
604         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
605         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
606         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
607         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
608         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
609         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
610         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
611         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
612         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
613         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
614         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
620 };
621
622 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
623 {
624         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
625         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
626         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
627         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
628         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
629         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
630         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
631         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
632         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
633         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
634         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
635         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
636 };
637
638 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
639 {
640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
642         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
643 };
644
645 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
646 {
647         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
648         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
649         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
650         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
651         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
652         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
653         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
654         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
657         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
658         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
659         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
660         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
663 };
664
665 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
666 {
667         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
668         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
669         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
670         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
671         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
672         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
673         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
674         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
675         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
676         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
677         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
678         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
679         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
680 };
681
682 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
683 {
684         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
685         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
686         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
687         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
688         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
689         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
690         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
691         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
692         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
693         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
694         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
695 };
696
697 static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
698         {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
699         {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
700 };
701
702 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
703 {
704         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
705         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
706         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
707         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
708         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
709         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
710         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
711         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
712 };
713
714 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
715 {
716         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
717         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
718         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
719         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
720         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
721         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
722         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
723         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
724 };
725
726 static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
727 {
728         static void *scratch_reg0;
729         static void *scratch_reg1;
730         static void *scratch_reg2;
731         static void *scratch_reg3;
732         static void *spare_int;
733         static uint32_t grbm_cntl;
734         static uint32_t grbm_idx;
735
736         scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
737         scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
738         scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
739         scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
740         spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
741
742         grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
743         grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
744
745         if (amdgpu_sriov_runtime(adev)) {
746                 pr_err("shouldn't call rlcg write register during runtime\n");
747                 return;
748         }
749
750         if (offset == grbm_cntl || offset == grbm_idx) {
751                 if (offset  == grbm_cntl)
752                         writel(v, scratch_reg2);
753                 else if (offset == grbm_idx)
754                         writel(v, scratch_reg3);
755
756                 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
757         } else {
758                 uint32_t i = 0;
759                 uint32_t retries = 50000;
760
761                 writel(v, scratch_reg0);
762                 writel(offset | 0x80000000, scratch_reg1);
763                 writel(1, spare_int);
764                 for (i = 0; i < retries; i++) {
765                         u32 tmp;
766
767                         tmp = readl(scratch_reg1);
768                         if (!(tmp & 0x80000000))
769                                 break;
770
771                         udelay(10);
772                 }
773                 if (i >= retries)
774                         pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
775         }
776
777 }
778
779 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
780 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
781 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
782 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
783
784 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
785 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
786 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
787 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
788 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
789                                  struct amdgpu_cu_info *cu_info);
790 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
791 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
792 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
793 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
794 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
795                                           void *ras_error_status);
796 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
797                                      void *inject_if);
798 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
799
800 static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
801                                 uint64_t queue_mask)
802 {
803         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
804         amdgpu_ring_write(kiq_ring,
805                 PACKET3_SET_RESOURCES_VMID_MASK(0) |
806                 /* vmid_mask:0* queue_type:0 (KIQ) */
807                 PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
808         amdgpu_ring_write(kiq_ring,
809                         lower_32_bits(queue_mask));     /* queue mask lo */
810         amdgpu_ring_write(kiq_ring,
811                         upper_32_bits(queue_mask));     /* queue mask hi */
812         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
813         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
814         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
815         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
816 }
817
818 static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
819                                  struct amdgpu_ring *ring)
820 {
821         struct amdgpu_device *adev = kiq_ring->adev;
822         uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
823         uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
824         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
825
826         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
827         /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
828         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
829                          PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
830                          PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
831                          PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
832                          PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
833                          PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
834                          /*queue_type: normal compute queue */
835                          PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
836                          /* alloc format: all_on_one_pipe */
837                          PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
838                          PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
839                          /* num_queues: must be 1 */
840                          PACKET3_MAP_QUEUES_NUM_QUEUES(1));
841         amdgpu_ring_write(kiq_ring,
842                         PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
843         amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
844         amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
845         amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
846         amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
847 }
848
849 static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
850                                    struct amdgpu_ring *ring,
851                                    enum amdgpu_unmap_queues_action action,
852                                    u64 gpu_addr, u64 seq)
853 {
854         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
855
856         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
857         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
858                           PACKET3_UNMAP_QUEUES_ACTION(action) |
859                           PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
860                           PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
861                           PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
862         amdgpu_ring_write(kiq_ring,
863                         PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
864
865         if (action == PREEMPT_QUEUES_NO_UNMAP) {
866                 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
867                 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
868                 amdgpu_ring_write(kiq_ring, seq);
869         } else {
870                 amdgpu_ring_write(kiq_ring, 0);
871                 amdgpu_ring_write(kiq_ring, 0);
872                 amdgpu_ring_write(kiq_ring, 0);
873         }
874 }
875
876 static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
877                                    struct amdgpu_ring *ring,
878                                    u64 addr,
879                                    u64 seq)
880 {
881         uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
882
883         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
884         amdgpu_ring_write(kiq_ring,
885                           PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
886                           PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
887                           PACKET3_QUERY_STATUS_COMMAND(2));
888         /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
889         amdgpu_ring_write(kiq_ring,
890                         PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
891                         PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
892         amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
893         amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
894         amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
895         amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
896 }
897
898 static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
899                                 uint16_t pasid, uint32_t flush_type,
900                                 bool all_hub)
901 {
902         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
903         amdgpu_ring_write(kiq_ring,
904                         PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
905                         PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
906                         PACKET3_INVALIDATE_TLBS_PASID(pasid) |
907                         PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
908 }
909
910 static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
911         .kiq_set_resources = gfx_v9_0_kiq_set_resources,
912         .kiq_map_queues = gfx_v9_0_kiq_map_queues,
913         .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
914         .kiq_query_status = gfx_v9_0_kiq_query_status,
915         .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
916         .set_resources_size = 8,
917         .map_queues_size = 7,
918         .unmap_queues_size = 6,
919         .query_status_size = 7,
920         .invalidate_tlbs_size = 2,
921 };
922
923 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
924 {
925         adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
926 }
927
928 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
929 {
930         switch (adev->asic_type) {
931         case CHIP_VEGA10:
932                 soc15_program_register_sequence(adev,
933                                                 golden_settings_gc_9_0,
934                                                 ARRAY_SIZE(golden_settings_gc_9_0));
935                 soc15_program_register_sequence(adev,
936                                                 golden_settings_gc_9_0_vg10,
937                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
938                 break;
939         case CHIP_VEGA12:
940                 soc15_program_register_sequence(adev,
941                                                 golden_settings_gc_9_2_1,
942                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
943                 soc15_program_register_sequence(adev,
944                                                 golden_settings_gc_9_2_1_vg12,
945                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
946                 break;
947         case CHIP_VEGA20:
948                 soc15_program_register_sequence(adev,
949                                                 golden_settings_gc_9_0,
950                                                 ARRAY_SIZE(golden_settings_gc_9_0));
951                 soc15_program_register_sequence(adev,
952                                                 golden_settings_gc_9_0_vg20,
953                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
954                 break;
955         case CHIP_ARCTURUS:
956                 soc15_program_register_sequence(adev,
957                                                 golden_settings_gc_9_4_1_arct,
958                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
959                 break;
960         case CHIP_RAVEN:
961                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
962                                                 ARRAY_SIZE(golden_settings_gc_9_1));
963                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
964                         soc15_program_register_sequence(adev,
965                                                         golden_settings_gc_9_1_rv2,
966                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
967                 else
968                         soc15_program_register_sequence(adev,
969                                                         golden_settings_gc_9_1_rv1,
970                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
971                 break;
972          case CHIP_RENOIR:
973                 soc15_program_register_sequence(adev,
974                                                 golden_settings_gc_9_1_rn,
975                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
976                 return; /* for renoir, don't need common goldensetting */
977         default:
978                 break;
979         }
980
981         if (adev->asic_type != CHIP_ARCTURUS)
982                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
983                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
984 }
985
986 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
987 {
988         adev->gfx.scratch.num_reg = 8;
989         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
990         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
991 }
992
993 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
994                                        bool wc, uint32_t reg, uint32_t val)
995 {
996         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
997         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
998                                 WRITE_DATA_DST_SEL(0) |
999                                 (wc ? WR_CONFIRM : 0));
1000         amdgpu_ring_write(ring, reg);
1001         amdgpu_ring_write(ring, 0);
1002         amdgpu_ring_write(ring, val);
1003 }
1004
1005 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
1006                                   int mem_space, int opt, uint32_t addr0,
1007                                   uint32_t addr1, uint32_t ref, uint32_t mask,
1008                                   uint32_t inv)
1009 {
1010         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1011         amdgpu_ring_write(ring,
1012                                  /* memory (1) or register (0) */
1013                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
1014                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
1015                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
1016                                  WAIT_REG_MEM_ENGINE(eng_sel)));
1017
1018         if (mem_space)
1019                 BUG_ON(addr0 & 0x3); /* Dword align */
1020         amdgpu_ring_write(ring, addr0);
1021         amdgpu_ring_write(ring, addr1);
1022         amdgpu_ring_write(ring, ref);
1023         amdgpu_ring_write(ring, mask);
1024         amdgpu_ring_write(ring, inv); /* poll interval */
1025 }
1026
1027 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
1028 {
1029         struct amdgpu_device *adev = ring->adev;
1030         uint32_t scratch;
1031         uint32_t tmp = 0;
1032         unsigned i;
1033         int r;
1034
1035         r = amdgpu_gfx_scratch_get(adev, &scratch);
1036         if (r)
1037                 return r;
1038
1039         WREG32(scratch, 0xCAFEDEAD);
1040         r = amdgpu_ring_alloc(ring, 3);
1041         if (r)
1042                 goto error_free_scratch;
1043
1044         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
1045         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
1046         amdgpu_ring_write(ring, 0xDEADBEEF);
1047         amdgpu_ring_commit(ring);
1048
1049         for (i = 0; i < adev->usec_timeout; i++) {
1050                 tmp = RREG32(scratch);
1051                 if (tmp == 0xDEADBEEF)
1052                         break;
1053                 udelay(1);
1054         }
1055
1056         if (i >= adev->usec_timeout)
1057                 r = -ETIMEDOUT;
1058
1059 error_free_scratch:
1060         amdgpu_gfx_scratch_free(adev, scratch);
1061         return r;
1062 }
1063
1064 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1065 {
1066         struct amdgpu_device *adev = ring->adev;
1067         struct amdgpu_ib ib;
1068         struct dma_fence *f = NULL;
1069
1070         unsigned index;
1071         uint64_t gpu_addr;
1072         uint32_t tmp;
1073         long r;
1074
1075         r = amdgpu_device_wb_get(adev, &index);
1076         if (r)
1077                 return r;
1078
1079         gpu_addr = adev->wb.gpu_addr + (index * 4);
1080         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
1081         memset(&ib, 0, sizeof(ib));
1082         r = amdgpu_ib_get(adev, NULL, 16,
1083                                         AMDGPU_IB_POOL_DIRECT, &ib);
1084         if (r)
1085                 goto err1;
1086
1087         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
1088         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
1089         ib.ptr[2] = lower_32_bits(gpu_addr);
1090         ib.ptr[3] = upper_32_bits(gpu_addr);
1091         ib.ptr[4] = 0xDEADBEEF;
1092         ib.length_dw = 5;
1093
1094         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1095         if (r)
1096                 goto err2;
1097
1098         r = dma_fence_wait_timeout(f, false, timeout);
1099         if (r == 0) {
1100                 r = -ETIMEDOUT;
1101                 goto err2;
1102         } else if (r < 0) {
1103                 goto err2;
1104         }
1105
1106         tmp = adev->wb.wb[index];
1107         if (tmp == 0xDEADBEEF)
1108                 r = 0;
1109         else
1110                 r = -EINVAL;
1111
1112 err2:
1113         amdgpu_ib_free(adev, &ib, NULL);
1114         dma_fence_put(f);
1115 err1:
1116         amdgpu_device_wb_free(adev, index);
1117         return r;
1118 }
1119
1120
1121 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
1122 {
1123         release_firmware(adev->gfx.pfp_fw);
1124         adev->gfx.pfp_fw = NULL;
1125         release_firmware(adev->gfx.me_fw);
1126         adev->gfx.me_fw = NULL;
1127         release_firmware(adev->gfx.ce_fw);
1128         adev->gfx.ce_fw = NULL;
1129         release_firmware(adev->gfx.rlc_fw);
1130         adev->gfx.rlc_fw = NULL;
1131         release_firmware(adev->gfx.mec_fw);
1132         adev->gfx.mec_fw = NULL;
1133         release_firmware(adev->gfx.mec2_fw);
1134         adev->gfx.mec2_fw = NULL;
1135
1136         kfree(adev->gfx.rlc.register_list_format);
1137 }
1138
1139 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
1140 {
1141         const struct rlc_firmware_header_v2_1 *rlc_hdr;
1142
1143         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
1144         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
1145         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
1146         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
1147         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
1148         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
1149         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
1150         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
1151         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
1152         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
1153         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
1154         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
1155         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
1156         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
1157                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
1158 }
1159
1160 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
1161 {
1162         adev->gfx.me_fw_write_wait = false;
1163         adev->gfx.mec_fw_write_wait = false;
1164
1165         if ((adev->asic_type != CHIP_ARCTURUS) &&
1166             ((adev->gfx.mec_fw_version < 0x000001a5) ||
1167             (adev->gfx.mec_feature_version < 46) ||
1168             (adev->gfx.pfp_fw_version < 0x000000b7) ||
1169             (adev->gfx.pfp_feature_version < 46)))
1170                 DRM_WARN_ONCE("CP firmware version too old, please update!");
1171
1172         switch (adev->asic_type) {
1173         case CHIP_VEGA10:
1174                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1175                     (adev->gfx.me_feature_version >= 42) &&
1176                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1177                     (adev->gfx.pfp_feature_version >= 42))
1178                         adev->gfx.me_fw_write_wait = true;
1179
1180                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1181                     (adev->gfx.mec_feature_version >= 42))
1182                         adev->gfx.mec_fw_write_wait = true;
1183                 break;
1184         case CHIP_VEGA12:
1185                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1186                     (adev->gfx.me_feature_version >= 44) &&
1187                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1188                     (adev->gfx.pfp_feature_version >= 44))
1189                         adev->gfx.me_fw_write_wait = true;
1190
1191                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1192                     (adev->gfx.mec_feature_version >= 44))
1193                         adev->gfx.mec_fw_write_wait = true;
1194                 break;
1195         case CHIP_VEGA20:
1196                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1197                     (adev->gfx.me_feature_version >= 44) &&
1198                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1199                     (adev->gfx.pfp_feature_version >= 44))
1200                         adev->gfx.me_fw_write_wait = true;
1201
1202                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1203                     (adev->gfx.mec_feature_version >= 44))
1204                         adev->gfx.mec_fw_write_wait = true;
1205                 break;
1206         case CHIP_RAVEN:
1207                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1208                     (adev->gfx.me_feature_version >= 42) &&
1209                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1210                     (adev->gfx.pfp_feature_version >= 42))
1211                         adev->gfx.me_fw_write_wait = true;
1212
1213                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1214                     (adev->gfx.mec_feature_version >= 42))
1215                         adev->gfx.mec_fw_write_wait = true;
1216                 break;
1217         default:
1218                 adev->gfx.me_fw_write_wait = true;
1219                 adev->gfx.mec_fw_write_wait = true;
1220                 break;
1221         }
1222 }
1223
1224 struct amdgpu_gfxoff_quirk {
1225         u16 chip_vendor;
1226         u16 chip_device;
1227         u16 subsys_vendor;
1228         u16 subsys_device;
1229         u8 revision;
1230 };
1231
1232 static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
1233         /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
1234         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1235         /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
1236         { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
1237         /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
1238         { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
1239         { 0, 0, 0, 0, 0 },
1240 };
1241
1242 static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
1243 {
1244         const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;
1245
1246         while (p && p->chip_device != 0) {
1247                 if (pdev->vendor == p->chip_vendor &&
1248                     pdev->device == p->chip_device &&
1249                     pdev->subsystem_vendor == p->subsys_vendor &&
1250                     pdev->subsystem_device == p->subsys_device &&
1251                     pdev->revision == p->revision) {
1252                         return true;
1253                 }
1254                 ++p;
1255         }
1256         return false;
1257 }
1258
1259 static bool is_raven_kicker(struct amdgpu_device *adev)
1260 {
1261         if (adev->pm.fw_version >= 0x41e2b)
1262                 return true;
1263         else
1264                 return false;
1265 }
1266
1267 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1268 {
1269         if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
1270                 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1271
1272         switch (adev->asic_type) {
1273         case CHIP_VEGA10:
1274         case CHIP_VEGA12:
1275         case CHIP_VEGA20:
1276                 break;
1277         case CHIP_RAVEN:
1278                 if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1279                       (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
1280                     ((!is_raven_kicker(adev) &&
1281                       adev->gfx.rlc_fw_version < 531) ||
1282                      (adev->gfx.rlc_feature_version < 1) ||
1283                      !adev->gfx.rlc.is_rlc_v2_1))
1284                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1285
1286                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1287                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1288                                 AMD_PG_SUPPORT_CP |
1289                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1290                 break;
1291         case CHIP_RENOIR:
1292                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1293                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1294                                 AMD_PG_SUPPORT_CP |
1295                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1296                 break;
1297         default:
1298                 break;
1299         }
1300 }
1301
1302 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1303                                           const char *chip_name)
1304 {
1305         char fw_name[30];
1306         int err;
1307         struct amdgpu_firmware_info *info = NULL;
1308         const struct common_firmware_header *header = NULL;
1309         const struct gfx_firmware_header_v1_0 *cp_hdr;
1310
1311         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1312         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1313         if (err)
1314                 goto out;
1315         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1316         if (err)
1317                 goto out;
1318         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1319         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1320         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1321
1322         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1323         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1324         if (err)
1325                 goto out;
1326         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1327         if (err)
1328                 goto out;
1329         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1330         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1331         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1332
1333         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1334         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1335         if (err)
1336                 goto out;
1337         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1338         if (err)
1339                 goto out;
1340         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1341         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1342         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1343
1344         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1345                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1346                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1347                 info->fw = adev->gfx.pfp_fw;
1348                 header = (const struct common_firmware_header *)info->fw->data;
1349                 adev->firmware.fw_size +=
1350                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1351
1352                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1353                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1354                 info->fw = adev->gfx.me_fw;
1355                 header = (const struct common_firmware_header *)info->fw->data;
1356                 adev->firmware.fw_size +=
1357                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1358
1359                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1360                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1361                 info->fw = adev->gfx.ce_fw;
1362                 header = (const struct common_firmware_header *)info->fw->data;
1363                 adev->firmware.fw_size +=
1364                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1365         }
1366
1367 out:
1368         if (err) {
1369                 dev_err(adev->dev,
1370                         "gfx9: Failed to load firmware \"%s\"\n",
1371                         fw_name);
1372                 release_firmware(adev->gfx.pfp_fw);
1373                 adev->gfx.pfp_fw = NULL;
1374                 release_firmware(adev->gfx.me_fw);
1375                 adev->gfx.me_fw = NULL;
1376                 release_firmware(adev->gfx.ce_fw);
1377                 adev->gfx.ce_fw = NULL;
1378         }
1379         return err;
1380 }
1381
1382 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1383                                           const char *chip_name)
1384 {
1385         char fw_name[30];
1386         int err;
1387         struct amdgpu_firmware_info *info = NULL;
1388         const struct common_firmware_header *header = NULL;
1389         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1390         unsigned int *tmp = NULL;
1391         unsigned int i = 0;
1392         uint16_t version_major;
1393         uint16_t version_minor;
1394         uint32_t smu_version;
1395
1396         /*
1397          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1398          * instead of picasso_rlc.bin.
1399          * Judgment method:
1400          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1401          *          or revision >= 0xD8 && revision <= 0xDF
1402          * otherwise is PCO FP5
1403          */
1404         if (!strcmp(chip_name, "picasso") &&
1405                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1406                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1407                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1408         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1409                 (smu_version >= 0x41e2b))
1410                 /**
1411                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1412                 */
1413                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1414         else
1415                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1416         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1417         if (err)
1418                 goto out;
1419         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1420         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1421
1422         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1423         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1424         if (version_major == 2 && version_minor == 1)
1425                 adev->gfx.rlc.is_rlc_v2_1 = true;
1426
1427         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1428         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1429         adev->gfx.rlc.save_and_restore_offset =
1430                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1431         adev->gfx.rlc.clear_state_descriptor_offset =
1432                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1433         adev->gfx.rlc.avail_scratch_ram_locations =
1434                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1435         adev->gfx.rlc.reg_restore_list_size =
1436                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1437         adev->gfx.rlc.reg_list_format_start =
1438                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1439         adev->gfx.rlc.reg_list_format_separate_start =
1440                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1441         adev->gfx.rlc.starting_offsets_start =
1442                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1443         adev->gfx.rlc.reg_list_format_size_bytes =
1444                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1445         adev->gfx.rlc.reg_list_size_bytes =
1446                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1447         adev->gfx.rlc.register_list_format =
1448                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1449                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1450         if (!adev->gfx.rlc.register_list_format) {
1451                 err = -ENOMEM;
1452                 goto out;
1453         }
1454
1455         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1456                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1457         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1458                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1459
1460         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1461
1462         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1463                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1464         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1465                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1466
1467         if (adev->gfx.rlc.is_rlc_v2_1)
1468                 gfx_v9_0_init_rlc_ext_microcode(adev);
1469
1470         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1471                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1472                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1473                 info->fw = adev->gfx.rlc_fw;
1474                 header = (const struct common_firmware_header *)info->fw->data;
1475                 adev->firmware.fw_size +=
1476                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1477
1478                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1479                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1480                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1481                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1482                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1483                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1484                         info->fw = adev->gfx.rlc_fw;
1485                         adev->firmware.fw_size +=
1486                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1487
1488                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1489                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1490                         info->fw = adev->gfx.rlc_fw;
1491                         adev->firmware.fw_size +=
1492                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1493
1494                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1495                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1496                         info->fw = adev->gfx.rlc_fw;
1497                         adev->firmware.fw_size +=
1498                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1499                 }
1500         }
1501
1502 out:
1503         if (err) {
1504                 dev_err(adev->dev,
1505                         "gfx9: Failed to load firmware \"%s\"\n",
1506                         fw_name);
1507                 release_firmware(adev->gfx.rlc_fw);
1508                 adev->gfx.rlc_fw = NULL;
1509         }
1510         return err;
1511 }
1512
1513 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1514                                           const char *chip_name)
1515 {
1516         char fw_name[30];
1517         int err;
1518         struct amdgpu_firmware_info *info = NULL;
1519         const struct common_firmware_header *header = NULL;
1520         const struct gfx_firmware_header_v1_0 *cp_hdr;
1521
1522         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1523         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1524         if (err)
1525                 goto out;
1526         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1527         if (err)
1528                 goto out;
1529         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1530         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1531         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1532
1533
1534         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1535         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1536         if (!err) {
1537                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1538                 if (err)
1539                         goto out;
1540                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1541                 adev->gfx.mec2_fw->data;
1542                 adev->gfx.mec2_fw_version =
1543                 le32_to_cpu(cp_hdr->header.ucode_version);
1544                 adev->gfx.mec2_feature_version =
1545                 le32_to_cpu(cp_hdr->ucode_feature_version);
1546         } else {
1547                 err = 0;
1548                 adev->gfx.mec2_fw = NULL;
1549         }
1550
1551         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1552                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1553                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1554                 info->fw = adev->gfx.mec_fw;
1555                 header = (const struct common_firmware_header *)info->fw->data;
1556                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1557                 adev->firmware.fw_size +=
1558                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1559
1560                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1561                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1562                 info->fw = adev->gfx.mec_fw;
1563                 adev->firmware.fw_size +=
1564                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1565
1566                 if (adev->gfx.mec2_fw) {
1567                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1568                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1569                         info->fw = adev->gfx.mec2_fw;
1570                         header = (const struct common_firmware_header *)info->fw->data;
1571                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1572                         adev->firmware.fw_size +=
1573                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1574
1575                         /* TODO: Determine if MEC2 JT FW loading can be removed
1576                                  for all GFX V9 asic and above */
1577                         if (adev->asic_type != CHIP_ARCTURUS &&
1578                             adev->asic_type != CHIP_RENOIR) {
1579                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1580                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1581                                 info->fw = adev->gfx.mec2_fw;
1582                                 adev->firmware.fw_size +=
1583                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1584                                         PAGE_SIZE);
1585                         }
1586                 }
1587         }
1588
1589 out:
1590         gfx_v9_0_check_if_need_gfxoff(adev);
1591         gfx_v9_0_check_fw_write_wait(adev);
1592         if (err) {
1593                 dev_err(adev->dev,
1594                         "gfx9: Failed to load firmware \"%s\"\n",
1595                         fw_name);
1596                 release_firmware(adev->gfx.mec_fw);
1597                 adev->gfx.mec_fw = NULL;
1598                 release_firmware(adev->gfx.mec2_fw);
1599                 adev->gfx.mec2_fw = NULL;
1600         }
1601         return err;
1602 }
1603
1604 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1605 {
1606         const char *chip_name;
1607         int r;
1608
1609         DRM_DEBUG("\n");
1610
1611         switch (adev->asic_type) {
1612         case CHIP_VEGA10:
1613                 chip_name = "vega10";
1614                 break;
1615         case CHIP_VEGA12:
1616                 chip_name = "vega12";
1617                 break;
1618         case CHIP_VEGA20:
1619                 chip_name = "vega20";
1620                 break;
1621         case CHIP_RAVEN:
1622                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1623                         chip_name = "raven2";
1624                 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1625                         chip_name = "picasso";
1626                 else
1627                         chip_name = "raven";
1628                 break;
1629         case CHIP_ARCTURUS:
1630                 chip_name = "arcturus";
1631                 break;
1632         case CHIP_RENOIR:
1633                 chip_name = "renoir";
1634                 break;
1635         default:
1636                 BUG();
1637         }
1638
1639         /* No CPG in Arcturus */
1640         if (adev->asic_type != CHIP_ARCTURUS) {
1641                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1642                 if (r)
1643                         return r;
1644         }
1645
1646         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1647         if (r)
1648                 return r;
1649
1650         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1651         if (r)
1652                 return r;
1653
1654         return r;
1655 }
1656
1657 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1658 {
1659         u32 count = 0;
1660         const struct cs_section_def *sect = NULL;
1661         const struct cs_extent_def *ext = NULL;
1662
1663         /* begin clear state */
1664         count += 2;
1665         /* context control state */
1666         count += 3;
1667
1668         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1669                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1670                         if (sect->id == SECT_CONTEXT)
1671                                 count += 2 + ext->reg_count;
1672                         else
1673                                 return 0;
1674                 }
1675         }
1676
1677         /* end clear state */
1678         count += 2;
1679         /* clear state */
1680         count += 2;
1681
1682         return count;
1683 }
1684
1685 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1686                                     volatile u32 *buffer)
1687 {
1688         u32 count = 0, i;
1689         const struct cs_section_def *sect = NULL;
1690         const struct cs_extent_def *ext = NULL;
1691
1692         if (adev->gfx.rlc.cs_data == NULL)
1693                 return;
1694         if (buffer == NULL)
1695                 return;
1696
1697         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1698         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1699
1700         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1701         buffer[count++] = cpu_to_le32(0x80000000);
1702         buffer[count++] = cpu_to_le32(0x80000000);
1703
1704         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1705                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1706                         if (sect->id == SECT_CONTEXT) {
1707                                 buffer[count++] =
1708                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1709                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1710                                                 PACKET3_SET_CONTEXT_REG_START);
1711                                 for (i = 0; i < ext->reg_count; i++)
1712                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1713                         } else {
1714                                 return;
1715                         }
1716                 }
1717         }
1718
1719         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1720         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1721
1722         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1723         buffer[count++] = cpu_to_le32(0);
1724 }
1725
1726 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1727 {
1728         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1729         uint32_t pg_always_on_cu_num = 2;
1730         uint32_t always_on_cu_num;
1731         uint32_t i, j, k;
1732         uint32_t mask, cu_bitmap, counter;
1733
1734         if (adev->flags & AMD_IS_APU)
1735                 always_on_cu_num = 4;
1736         else if (adev->asic_type == CHIP_VEGA12)
1737                 always_on_cu_num = 8;
1738         else
1739                 always_on_cu_num = 12;
1740
1741         mutex_lock(&adev->grbm_idx_mutex);
1742         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1743                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1744                         mask = 1;
1745                         cu_bitmap = 0;
1746                         counter = 0;
1747                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1748
1749                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1750                                 if (cu_info->bitmap[i][j] & mask) {
1751                                         if (counter == pg_always_on_cu_num)
1752                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1753                                         if (counter < always_on_cu_num)
1754                                                 cu_bitmap |= mask;
1755                                         else
1756                                                 break;
1757                                         counter++;
1758                                 }
1759                                 mask <<= 1;
1760                         }
1761
1762                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1763                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1764                 }
1765         }
1766         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1767         mutex_unlock(&adev->grbm_idx_mutex);
1768 }
1769
1770 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1771 {
1772         uint32_t data;
1773
1774         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1775         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1776         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1777         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1778         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1779
1780         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1781         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1782
1783         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1784         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1785
1786         mutex_lock(&adev->grbm_idx_mutex);
1787         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1788         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1789         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1790
1791         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1792         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1793         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1794         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1795         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1796
1797         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1798         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1799         data &= 0x0000FFFF;
1800         data |= 0x00C00000;
1801         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1802
1803         /*
1804          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1805          * programmed in gfx_v9_0_init_always_on_cu_mask()
1806          */
1807
1808         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1809          * but used for RLC_LB_CNTL configuration */
1810         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1811         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1812         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1813         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1814         mutex_unlock(&adev->grbm_idx_mutex);
1815
1816         gfx_v9_0_init_always_on_cu_mask(adev);
1817 }
1818
1819 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1820 {
1821         uint32_t data;
1822
1823         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1824         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1825         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1826         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1827         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1828
1829         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1830         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1831
1832         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1833         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1834
1835         mutex_lock(&adev->grbm_idx_mutex);
1836         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1837         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1838         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1839
1840         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1841         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1842         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1843         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1844         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1845
1846         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1847         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1848         data &= 0x0000FFFF;
1849         data |= 0x00C00000;
1850         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1851
1852         /*
1853          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1854          * programmed in gfx_v9_0_init_always_on_cu_mask()
1855          */
1856
1857         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1858          * but used for RLC_LB_CNTL configuration */
1859         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1860         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1861         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1862         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1863         mutex_unlock(&adev->grbm_idx_mutex);
1864
1865         gfx_v9_0_init_always_on_cu_mask(adev);
1866 }
1867
1868 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1869 {
1870         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1871 }
1872
1873 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1874 {
1875         return 5;
1876 }
1877
1878 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1879 {
1880         const struct cs_section_def *cs_data;
1881         int r;
1882
1883         adev->gfx.rlc.cs_data = gfx9_cs_data;
1884
1885         cs_data = adev->gfx.rlc.cs_data;
1886
1887         if (cs_data) {
1888                 /* init clear state block */
1889                 r = amdgpu_gfx_rlc_init_csb(adev);
1890                 if (r)
1891                         return r;
1892         }
1893
1894         if (adev->flags & AMD_IS_APU) {
1895                 /* TODO: double check the cp_table_size for RV */
1896                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1897                 r = amdgpu_gfx_rlc_init_cpt(adev);
1898                 if (r)
1899                         return r;
1900         }
1901
1902         switch (adev->asic_type) {
1903         case CHIP_RAVEN:
1904                 gfx_v9_0_init_lbpw(adev);
1905                 break;
1906         case CHIP_VEGA20:
1907                 gfx_v9_4_init_lbpw(adev);
1908                 break;
1909         default:
1910                 break;
1911         }
1912
1913         /* init spm vmid with 0xf */
1914         if (adev->gfx.rlc.funcs->update_spm_vmid)
1915                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1916
1917         return 0;
1918 }
1919
1920 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1921 {
1922         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1923         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1924 }
1925
1926 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1927 {
1928         int r;
1929         u32 *hpd;
1930         const __le32 *fw_data;
1931         unsigned fw_size;
1932         u32 *fw;
1933         size_t mec_hpd_size;
1934
1935         const struct gfx_firmware_header_v1_0 *mec_hdr;
1936
1937         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1938
1939         /* take ownership of the relevant compute queues */
1940         amdgpu_gfx_compute_queue_acquire(adev);
1941         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1942         if (mec_hpd_size) {
1943                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1944                                               AMDGPU_GEM_DOMAIN_VRAM,
1945                                               &adev->gfx.mec.hpd_eop_obj,
1946                                               &adev->gfx.mec.hpd_eop_gpu_addr,
1947                                               (void **)&hpd);
1948                 if (r) {
1949                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1950                         gfx_v9_0_mec_fini(adev);
1951                         return r;
1952                 }
1953
1954                 memset(hpd, 0, mec_hpd_size);
1955
1956                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1957                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1958         }
1959
1960         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1961
1962         fw_data = (const __le32 *)
1963                 (adev->gfx.mec_fw->data +
1964                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1965         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
1966
1967         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1968                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1969                                       &adev->gfx.mec.mec_fw_obj,
1970                                       &adev->gfx.mec.mec_fw_gpu_addr,
1971                                       (void **)&fw);
1972         if (r) {
1973                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1974                 gfx_v9_0_mec_fini(adev);
1975                 return r;
1976         }
1977
1978         memcpy(fw, fw_data, fw_size);
1979
1980         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1981         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1982
1983         return 0;
1984 }
1985
1986 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1987 {
1988         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
1989                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1990                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1991                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1992                 (SQ_IND_INDEX__FORCE_READ_MASK));
1993         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1994 }
1995
1996 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1997                            uint32_t wave, uint32_t thread,
1998                            uint32_t regno, uint32_t num, uint32_t *out)
1999 {
2000         WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
2001                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
2002                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
2003                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
2004                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
2005                 (SQ_IND_INDEX__FORCE_READ_MASK) |
2006                 (SQ_IND_INDEX__AUTO_INCR_MASK));
2007         while (num--)
2008                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
2009 }
2010
2011 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
2012 {
2013         /* type 1 wave data */
2014         dst[(*no_fields)++] = 1;
2015         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
2016         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
2017         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
2018         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
2019         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
2020         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
2021         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
2022         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
2023         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
2024         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
2025         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
2026         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
2027         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
2028         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
2029 }
2030
2031 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
2032                                      uint32_t wave, uint32_t start,
2033                                      uint32_t size, uint32_t *dst)
2034 {
2035         wave_read_regs(
2036                 adev, simd, wave, 0,
2037                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
2038 }
2039
2040 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
2041                                      uint32_t wave, uint32_t thread,
2042                                      uint32_t start, uint32_t size,
2043                                      uint32_t *dst)
2044 {
2045         wave_read_regs(
2046                 adev, simd, wave, thread,
2047                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
2048 }
2049
2050 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
2051                                   u32 me, u32 pipe, u32 q, u32 vm)
2052 {
2053         soc15_grbm_select(adev, me, pipe, q, vm);
2054 }
2055
2056 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
2057         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2058         .select_se_sh = &gfx_v9_0_select_se_sh,
2059         .read_wave_data = &gfx_v9_0_read_wave_data,
2060         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2061         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2062         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2063         .ras_error_inject = &gfx_v9_0_ras_error_inject,
2064         .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
2065         .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
2066 };
2067
2068 static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
2069         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
2070         .select_se_sh = &gfx_v9_0_select_se_sh,
2071         .read_wave_data = &gfx_v9_0_read_wave_data,
2072         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
2073         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
2074         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
2075         .ras_error_inject = &gfx_v9_4_ras_error_inject,
2076         .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
2077         .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
2078         .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
2079 };
2080
2081 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
2082 {
2083         u32 gb_addr_config;
2084         int err;
2085
2086         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
2087
2088         switch (adev->asic_type) {
2089         case CHIP_VEGA10:
2090                 adev->gfx.config.max_hw_contexts = 8;
2091                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2092                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2093                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2094                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2095                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
2096                 break;
2097         case CHIP_VEGA12:
2098                 adev->gfx.config.max_hw_contexts = 8;
2099                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2100                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2101                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2102                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2103                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
2104                 DRM_INFO("fix gfx.config for vega12\n");
2105                 break;
2106         case CHIP_VEGA20:
2107                 adev->gfx.config.max_hw_contexts = 8;
2108                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2109                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2110                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2111                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2112                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2113                 gb_addr_config &= ~0xf3e777ff;
2114                 gb_addr_config |= 0x22014042;
2115                 /* check vbios table if gpu info is not available */
2116                 err = amdgpu_atomfirmware_get_gfx_info(adev);
2117                 if (err)
2118                         return err;
2119                 break;
2120         case CHIP_RAVEN:
2121                 adev->gfx.config.max_hw_contexts = 8;
2122                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2123                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2124                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2125                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2126                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
2127                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
2128                 else
2129                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
2130                 break;
2131         case CHIP_ARCTURUS:
2132                 adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
2133                 adev->gfx.config.max_hw_contexts = 8;
2134                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2135                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2136                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
2137                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2138                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2139                 gb_addr_config &= ~0xf3e777ff;
2140                 gb_addr_config |= 0x22014042;
2141                 break;
2142         case CHIP_RENOIR:
2143                 adev->gfx.config.max_hw_contexts = 8;
2144                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
2145                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
2146                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
2147                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
2148                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
2149                 gb_addr_config &= ~0xf3e777ff;
2150                 gb_addr_config |= 0x22010042;
2151                 break;
2152         default:
2153                 BUG();
2154                 break;
2155         }
2156
2157         adev->gfx.config.gb_addr_config = gb_addr_config;
2158
2159         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
2160                         REG_GET_FIELD(
2161                                         adev->gfx.config.gb_addr_config,
2162                                         GB_ADDR_CONFIG,
2163                                         NUM_PIPES);
2164
2165         adev->gfx.config.max_tile_pipes =
2166                 adev->gfx.config.gb_addr_config_fields.num_pipes;
2167
2168         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
2169                         REG_GET_FIELD(
2170                                         adev->gfx.config.gb_addr_config,
2171                                         GB_ADDR_CONFIG,
2172                                         NUM_BANKS);
2173         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
2174                         REG_GET_FIELD(
2175                                         adev->gfx.config.gb_addr_config,
2176                                         GB_ADDR_CONFIG,
2177                                         MAX_COMPRESSED_FRAGS);
2178         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
2179                         REG_GET_FIELD(
2180                                         adev->gfx.config.gb_addr_config,
2181                                         GB_ADDR_CONFIG,
2182                                         NUM_RB_PER_SE);
2183         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
2184                         REG_GET_FIELD(
2185                                         adev->gfx.config.gb_addr_config,
2186                                         GB_ADDR_CONFIG,
2187                                         NUM_SHADER_ENGINES);
2188         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
2189                         REG_GET_FIELD(
2190                                         adev->gfx.config.gb_addr_config,
2191                                         GB_ADDR_CONFIG,
2192                                         PIPE_INTERLEAVE_SIZE));
2193
2194         return 0;
2195 }
2196
2197 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
2198                                       int mec, int pipe, int queue)
2199 {
2200         unsigned irq_type;
2201         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
2202         unsigned int hw_prio;
2203
2204         ring = &adev->gfx.compute_ring[ring_id];
2205
2206         /* mec0 is me1 */
2207         ring->me = mec + 1;
2208         ring->pipe = pipe;
2209         ring->queue = queue;
2210
2211         ring->ring_obj = NULL;
2212         ring->use_doorbell = true;
2213         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
2214         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
2215                                 + (ring_id * GFX9_MEC_HPD_SIZE);
2216         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
2217
2218         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
2219                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
2220                 + ring->pipe;
2221         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
2222                         AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
2223         /* type-2 packets are deprecated on MEC, use type-3 instead */
2224         return amdgpu_ring_init(adev, ring, 1024,
2225                                 &adev->gfx.eop_irq, irq_type, hw_prio);
2226 }
2227
2228 static int gfx_v9_0_sw_init(void *handle)
2229 {
2230         int i, j, k, r, ring_id;
2231         struct amdgpu_ring *ring;
2232         struct amdgpu_kiq *kiq;
2233         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2234
2235         switch (adev->asic_type) {
2236         case CHIP_VEGA10:
2237         case CHIP_VEGA12:
2238         case CHIP_VEGA20:
2239         case CHIP_RAVEN:
2240         case CHIP_ARCTURUS:
2241         case CHIP_RENOIR:
2242                 adev->gfx.mec.num_mec = 2;
2243                 break;
2244         default:
2245                 adev->gfx.mec.num_mec = 1;
2246                 break;
2247         }
2248
2249         adev->gfx.mec.num_pipe_per_mec = 4;
2250         adev->gfx.mec.num_queue_per_pipe = 8;
2251
2252         /* EOP Event */
2253         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2254         if (r)
2255                 return r;
2256
2257         /* Privileged reg */
2258         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2259                               &adev->gfx.priv_reg_irq);
2260         if (r)
2261                 return r;
2262
2263         /* Privileged inst */
2264         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2265                               &adev->gfx.priv_inst_irq);
2266         if (r)
2267                 return r;
2268
2269         /* ECC error */
2270         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2271                               &adev->gfx.cp_ecc_error_irq);
2272         if (r)
2273                 return r;
2274
2275         /* FUE error */
2276         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2277                               &adev->gfx.cp_ecc_error_irq);
2278         if (r)
2279                 return r;
2280
2281         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2282
2283         gfx_v9_0_scratch_init(adev);
2284
2285         r = gfx_v9_0_init_microcode(adev);
2286         if (r) {
2287                 DRM_ERROR("Failed to load gfx firmware!\n");
2288                 return r;
2289         }
2290
2291         r = adev->gfx.rlc.funcs->init(adev);
2292         if (r) {
2293                 DRM_ERROR("Failed to init rlc BOs!\n");
2294                 return r;
2295         }
2296
2297         r = gfx_v9_0_mec_init(adev);
2298         if (r) {
2299                 DRM_ERROR("Failed to init MEC BOs!\n");
2300                 return r;
2301         }
2302
2303         /* set up the gfx ring */
2304         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2305                 ring = &adev->gfx.gfx_ring[i];
2306                 ring->ring_obj = NULL;
2307                 if (!i)
2308                         sprintf(ring->name, "gfx");
2309                 else
2310                         sprintf(ring->name, "gfx_%d", i);
2311                 ring->use_doorbell = true;
2312                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2313                 r = amdgpu_ring_init(adev, ring, 1024,
2314                                      &adev->gfx.eop_irq,
2315                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2316                                      AMDGPU_RING_PRIO_DEFAULT);
2317                 if (r)
2318                         return r;
2319         }
2320
2321         /* set up the compute queues - allocate horizontally across pipes */
2322         ring_id = 0;
2323         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2324                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2325                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2326                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2327                                         continue;
2328
2329                                 r = gfx_v9_0_compute_ring_init(adev,
2330                                                                ring_id,
2331                                                                i, k, j);
2332                                 if (r)
2333                                         return r;
2334
2335                                 ring_id++;
2336                         }
2337                 }
2338         }
2339
2340         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2341         if (r) {
2342                 DRM_ERROR("Failed to init KIQ BOs!\n");
2343                 return r;
2344         }
2345
2346         kiq = &adev->gfx.kiq;
2347         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2348         if (r)
2349                 return r;
2350
2351         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2352         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2353         if (r)
2354                 return r;
2355
2356         adev->gfx.ce_ram_size = 0x8000;
2357
2358         r = gfx_v9_0_gpu_early_init(adev);
2359         if (r)
2360                 return r;
2361
2362         return 0;
2363 }
2364
2365
2366 static int gfx_v9_0_sw_fini(void *handle)
2367 {
2368         int i;
2369         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2370
2371         amdgpu_gfx_ras_fini(adev);
2372
2373         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2374                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2375         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2376                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2377
2378         amdgpu_gfx_mqd_sw_fini(adev);
2379         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2380         amdgpu_gfx_kiq_fini(adev);
2381
2382         gfx_v9_0_mec_fini(adev);
2383         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2384         if (adev->flags & AMD_IS_APU) {
2385                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2386                                 &adev->gfx.rlc.cp_table_gpu_addr,
2387                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2388         }
2389         gfx_v9_0_free_microcode(adev);
2390
2391         return 0;
2392 }
2393
2394
2395 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2396 {
2397         /* TODO */
2398 }
2399
2400 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
2401 {
2402         u32 data;
2403
2404         if (instance == 0xffffffff)
2405                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2406         else
2407                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2408
2409         if (se_num == 0xffffffff)
2410                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2411         else
2412                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2413
2414         if (sh_num == 0xffffffff)
2415                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2416         else
2417                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2418
2419         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2420 }
2421
2422 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2423 {
2424         u32 data, mask;
2425
2426         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2427         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2428
2429         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2430         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2431
2432         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2433                                          adev->gfx.config.max_sh_per_se);
2434
2435         return (~data) & mask;
2436 }
2437
2438 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2439 {
2440         int i, j;
2441         u32 data;
2442         u32 active_rbs = 0;
2443         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2444                                         adev->gfx.config.max_sh_per_se;
2445
2446         mutex_lock(&adev->grbm_idx_mutex);
2447         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2448                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2449                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2450                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2451                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2452                                                rb_bitmap_width_per_sh);
2453                 }
2454         }
2455         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2456         mutex_unlock(&adev->grbm_idx_mutex);
2457
2458         adev->gfx.config.backend_enable_mask = active_rbs;
2459         adev->gfx.config.num_rbs = hweight32(active_rbs);
2460 }
2461
2462 #define DEFAULT_SH_MEM_BASES    (0x6000)
2463 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2464 {
2465         int i;
2466         uint32_t sh_mem_config;
2467         uint32_t sh_mem_bases;
2468
2469         /*
2470          * Configure apertures:
2471          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2472          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2473          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2474          */
2475         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2476
2477         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2478                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2479                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2480
2481         mutex_lock(&adev->srbm_mutex);
2482         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2483                 soc15_grbm_select(adev, 0, 0, 0, i);
2484                 /* CP and shaders */
2485                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2486                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2487         }
2488         soc15_grbm_select(adev, 0, 0, 0, 0);
2489         mutex_unlock(&adev->srbm_mutex);
2490
2491         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2492            acccess. These should be enabled by FW for target VMIDs. */
2493         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
2494                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2495                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2496                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2497                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2498         }
2499 }
2500
2501 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2502 {
2503         int vmid;
2504
2505         /*
2506          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2507          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2508          * the driver can enable them for graphics. VMID0 should maintain
2509          * access so that HWS firmware can save/restore entries.
2510          */
2511         for (vmid = 1; vmid < 16; vmid++) {
2512                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2513                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2514                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2515                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2516         }
2517 }
2518
2519 static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
2520 {
2521         uint32_t tmp;
2522
2523         switch (adev->asic_type) {
2524         case CHIP_ARCTURUS:
2525                 tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
2526                 tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
2527                                         DISABLE_BARRIER_WAITCNT, 1);
2528                 WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
2529                 break;
2530         default:
2531                 break;
2532         }
2533 }
2534
2535 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2536 {
2537         u32 tmp;
2538         int i;
2539
2540         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2541
2542         gfx_v9_0_tiling_mode_table_init(adev);
2543
2544         gfx_v9_0_setup_rb(adev);
2545         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2546         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2547
2548         /* XXX SH_MEM regs */
2549         /* where to put LDS, scratch, GPUVM in FSA64 space */
2550         mutex_lock(&adev->srbm_mutex);
2551         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2552                 soc15_grbm_select(adev, 0, 0, 0, i);
2553                 /* CP and shaders */
2554                 if (i == 0) {
2555                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2556                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2557                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2558                                             !!amdgpu_noretry);
2559                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2560                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2561                 } else {
2562                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2563                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2564                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2565                                             !!amdgpu_noretry);
2566                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2567                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2568                                 (adev->gmc.private_aperture_start >> 48));
2569                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2570                                 (adev->gmc.shared_aperture_start >> 48));
2571                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2572                 }
2573         }
2574         soc15_grbm_select(adev, 0, 0, 0, 0);
2575
2576         mutex_unlock(&adev->srbm_mutex);
2577
2578         gfx_v9_0_init_compute_vmid(adev);
2579         gfx_v9_0_init_gds_vmid(adev);
2580         gfx_v9_0_init_sq_config(adev);
2581 }
2582
2583 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2584 {
2585         u32 i, j, k;
2586         u32 mask;
2587
2588         mutex_lock(&adev->grbm_idx_mutex);
2589         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2590                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2591                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2592                         for (k = 0; k < adev->usec_timeout; k++) {
2593                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2594                                         break;
2595                                 udelay(1);
2596                         }
2597                         if (k == adev->usec_timeout) {
2598                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2599                                                       0xffffffff, 0xffffffff);
2600                                 mutex_unlock(&adev->grbm_idx_mutex);
2601                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2602                                          i, j);
2603                                 return;
2604                         }
2605                 }
2606         }
2607         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2608         mutex_unlock(&adev->grbm_idx_mutex);
2609
2610         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2611                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2612                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2613                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2614         for (k = 0; k < adev->usec_timeout; k++) {
2615                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2616                         break;
2617                 udelay(1);
2618         }
2619 }
2620
2621 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2622                                                bool enable)
2623 {
2624         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2625
2626         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2627         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2628         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2629         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2630
2631         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2632 }
2633
2634 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2635 {
2636         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2637         /* csib */
2638         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2639                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2640         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2641                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2642         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2643                         adev->gfx.rlc.clear_state_size);
2644 }
2645
2646 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2647                                 int indirect_offset,
2648                                 int list_size,
2649                                 int *unique_indirect_regs,
2650                                 int unique_indirect_reg_count,
2651                                 int *indirect_start_offsets,
2652                                 int *indirect_start_offsets_count,
2653                                 int max_start_offsets_count)
2654 {
2655         int idx;
2656
2657         for (; indirect_offset < list_size; indirect_offset++) {
2658                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2659                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2660                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2661
2662                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2663                         indirect_offset += 2;
2664
2665                         /* look for the matching indice */
2666                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2667                                 if (unique_indirect_regs[idx] ==
2668                                         register_list_format[indirect_offset] ||
2669                                         !unique_indirect_regs[idx])
2670                                         break;
2671                         }
2672
2673                         BUG_ON(idx >= unique_indirect_reg_count);
2674
2675                         if (!unique_indirect_regs[idx])
2676                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2677
2678                         indirect_offset++;
2679                 }
2680         }
2681 }
2682
2683 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2684 {
2685         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2686         int unique_indirect_reg_count = 0;
2687
2688         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2689         int indirect_start_offsets_count = 0;
2690
2691         int list_size = 0;
2692         int i = 0, j = 0;
2693         u32 tmp = 0;
2694
2695         u32 *register_list_format =
2696                 kmemdup(adev->gfx.rlc.register_list_format,
2697                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2698         if (!register_list_format)
2699                 return -ENOMEM;
2700
2701         /* setup unique_indirect_regs array and indirect_start_offsets array */
2702         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2703         gfx_v9_1_parse_ind_reg_list(register_list_format,
2704                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2705                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2706                                     unique_indirect_regs,
2707                                     unique_indirect_reg_count,
2708                                     indirect_start_offsets,
2709                                     &indirect_start_offsets_count,
2710                                     ARRAY_SIZE(indirect_start_offsets));
2711
2712         /* enable auto inc in case it is disabled */
2713         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2714         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2715         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2716
2717         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2718         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2719                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2720         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2721                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2722                         adev->gfx.rlc.register_restore[i]);
2723
2724         /* load indirect register */
2725         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2726                 adev->gfx.rlc.reg_list_format_start);
2727
2728         /* direct register portion */
2729         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2730                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2731                         register_list_format[i]);
2732
2733         /* indirect register portion */
2734         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2735                 if (register_list_format[i] == 0xFFFFFFFF) {
2736                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2737                         continue;
2738                 }
2739
2740                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2741                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2742
2743                 for (j = 0; j < unique_indirect_reg_count; j++) {
2744                         if (register_list_format[i] == unique_indirect_regs[j]) {
2745                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2746                                 break;
2747                         }
2748                 }
2749
2750                 BUG_ON(j >= unique_indirect_reg_count);
2751
2752                 i++;
2753         }
2754
2755         /* set save/restore list size */
2756         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2757         list_size = list_size >> 1;
2758         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2759                 adev->gfx.rlc.reg_restore_list_size);
2760         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2761
2762         /* write the starting offsets to RLC scratch ram */
2763         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2764                 adev->gfx.rlc.starting_offsets_start);
2765         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2766                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2767                        indirect_start_offsets[i]);
2768
2769         /* load unique indirect regs*/
2770         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2771                 if (unique_indirect_regs[i] != 0) {
2772                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2773                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2774                                unique_indirect_regs[i] & 0x3FFFF);
2775
2776                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2777                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2778                                unique_indirect_regs[i] >> 20);
2779                 }
2780         }
2781
2782         kfree(register_list_format);
2783         return 0;
2784 }
2785
2786 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2787 {
2788         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2789 }
2790
2791 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2792                                              bool enable)
2793 {
2794         uint32_t data = 0;
2795         uint32_t default_data = 0;
2796
2797         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2798         if (enable) {
2799                 /* enable GFXIP control over CGPG */
2800                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2801                 if(default_data != data)
2802                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2803
2804                 /* update status */
2805                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2806                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2807                 if(default_data != data)
2808                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2809         } else {
2810                 /* restore GFXIP control over GCPG */
2811                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2812                 if(default_data != data)
2813                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2814         }
2815 }
2816
2817 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2818 {
2819         uint32_t data = 0;
2820
2821         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2822                               AMD_PG_SUPPORT_GFX_SMG |
2823                               AMD_PG_SUPPORT_GFX_DMG)) {
2824                 /* init IDLE_POLL_COUNT = 60 */
2825                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2826                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2827                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2828                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2829
2830                 /* init RLC PG Delay */
2831                 data = 0;
2832                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2833                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2834                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2835                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2836                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2837
2838                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2839                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2840                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2841                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2842
2843                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2844                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2845                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2846                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2847
2848                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2849                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2850
2851                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2852                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2853                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2854                 if (adev->asic_type != CHIP_RENOIR)
2855                         pwr_10_0_gfxip_control_over_cgpg(adev, true);
2856         }
2857 }
2858
2859 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2860                                                 bool enable)
2861 {
2862         uint32_t data = 0;
2863         uint32_t default_data = 0;
2864
2865         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2866         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2867                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2868                              enable ? 1 : 0);
2869         if (default_data != data)
2870                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2871 }
2872
2873 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2874                                                 bool enable)
2875 {
2876         uint32_t data = 0;
2877         uint32_t default_data = 0;
2878
2879         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2880         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2881                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2882                              enable ? 1 : 0);
2883         if(default_data != data)
2884                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2885 }
2886
2887 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2888                                         bool enable)
2889 {
2890         uint32_t data = 0;
2891         uint32_t default_data = 0;
2892
2893         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2894         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2895                              CP_PG_DISABLE,
2896                              enable ? 0 : 1);
2897         if(default_data != data)
2898                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2899 }
2900
2901 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2902                                                 bool enable)
2903 {
2904         uint32_t data, default_data;
2905
2906         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2907         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2908                              GFX_POWER_GATING_ENABLE,
2909                              enable ? 1 : 0);
2910         if(default_data != data)
2911                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2912 }
2913
2914 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2915                                                 bool enable)
2916 {
2917         uint32_t data, default_data;
2918
2919         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2920         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2921                              GFX_PIPELINE_PG_ENABLE,
2922                              enable ? 1 : 0);
2923         if(default_data != data)
2924                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2925
2926         if (!enable)
2927                 /* read any GFX register to wake up GFX */
2928                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2929 }
2930
2931 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2932                                                        bool enable)
2933 {
2934         uint32_t data, default_data;
2935
2936         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2937         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2938                              STATIC_PER_CU_PG_ENABLE,
2939                              enable ? 1 : 0);
2940         if(default_data != data)
2941                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2942 }
2943
2944 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2945                                                 bool enable)
2946 {
2947         uint32_t data, default_data;
2948
2949         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2950         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2951                              DYN_PER_CU_PG_ENABLE,
2952                              enable ? 1 : 0);
2953         if(default_data != data)
2954                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2955 }
2956
2957 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2958 {
2959         gfx_v9_0_init_csb(adev);
2960
2961         /*
2962          * Rlc save restore list is workable since v2_1.
2963          * And it's needed by gfxoff feature.
2964          */
2965         if (adev->gfx.rlc.is_rlc_v2_1) {
2966                 if (adev->asic_type == CHIP_VEGA12 ||
2967                     (adev->apu_flags & AMD_APU_IS_RAVEN2))
2968                         gfx_v9_1_init_rlc_save_restore_list(adev);
2969                 gfx_v9_0_enable_save_restore_machine(adev);
2970         }
2971
2972         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2973                               AMD_PG_SUPPORT_GFX_SMG |
2974                               AMD_PG_SUPPORT_GFX_DMG |
2975                               AMD_PG_SUPPORT_CP |
2976                               AMD_PG_SUPPORT_GDS |
2977                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2978                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2979                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2980                 gfx_v9_0_init_gfx_power_gating(adev);
2981         }
2982 }
2983
2984 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2985 {
2986         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2987         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2988         gfx_v9_0_wait_for_rlc_serdes(adev);
2989 }
2990
2991 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2992 {
2993         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2994         udelay(50);
2995         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2996         udelay(50);
2997 }
2998
2999 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
3000 {
3001 #ifdef AMDGPU_RLC_DEBUG_RETRY
3002         u32 rlc_ucode_ver;
3003 #endif
3004
3005         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
3006         udelay(50);
3007
3008         /* carrizo do enable cp interrupt after cp inited */
3009         if (!(adev->flags & AMD_IS_APU)) {
3010                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3011                 udelay(50);
3012         }
3013
3014 #ifdef AMDGPU_RLC_DEBUG_RETRY
3015         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
3016         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
3017         if(rlc_ucode_ver == 0x108) {
3018                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
3019                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
3020                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
3021                  * default is 0x9C4 to create a 100us interval */
3022                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
3023                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
3024                  * to disable the page fault retry interrupts, default is
3025                  * 0x100 (256) */
3026                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
3027         }
3028 #endif
3029 }
3030
3031 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
3032 {
3033         const struct rlc_firmware_header_v2_0 *hdr;
3034         const __le32 *fw_data;
3035         unsigned i, fw_size;
3036
3037         if (!adev->gfx.rlc_fw)
3038                 return -EINVAL;
3039
3040         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
3041         amdgpu_ucode_print_rlc_hdr(&hdr->header);
3042
3043         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
3044                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
3045         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
3046
3047         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
3048                         RLCG_UCODE_LOADING_START_ADDRESS);
3049         for (i = 0; i < fw_size; i++)
3050                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
3051         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
3052
3053         return 0;
3054 }
3055
3056 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
3057 {
3058         int r;
3059
3060         if (amdgpu_sriov_vf(adev)) {
3061                 gfx_v9_0_init_csb(adev);
3062                 return 0;
3063         }
3064
3065         adev->gfx.rlc.funcs->stop(adev);
3066
3067         /* disable CG */
3068         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
3069
3070         gfx_v9_0_init_pg(adev);
3071
3072         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3073                 /* legacy rlc firmware loading */
3074                 r = gfx_v9_0_rlc_load_microcode(adev);
3075                 if (r)
3076                         return r;
3077         }
3078
3079         switch (adev->asic_type) {
3080         case CHIP_RAVEN:
3081                 if (amdgpu_lbpw == 0)
3082                         gfx_v9_0_enable_lbpw(adev, false);
3083                 else
3084                         gfx_v9_0_enable_lbpw(adev, true);
3085                 break;
3086         case CHIP_VEGA20:
3087                 if (amdgpu_lbpw > 0)
3088                         gfx_v9_0_enable_lbpw(adev, true);
3089                 else
3090                         gfx_v9_0_enable_lbpw(adev, false);
3091                 break;
3092         default:
3093                 break;
3094         }
3095
3096         adev->gfx.rlc.funcs->start(adev);
3097
3098         return 0;
3099 }
3100
3101 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
3102 {
3103         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
3104
3105         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
3106         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
3107         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
3108         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
3109         udelay(50);
3110 }
3111
3112 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3113 {
3114         const struct gfx_firmware_header_v1_0 *pfp_hdr;
3115         const struct gfx_firmware_header_v1_0 *ce_hdr;
3116         const struct gfx_firmware_header_v1_0 *me_hdr;
3117         const __le32 *fw_data;
3118         unsigned i, fw_size;
3119
3120         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
3121                 return -EINVAL;
3122
3123         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
3124                 adev->gfx.pfp_fw->data;
3125         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
3126                 adev->gfx.ce_fw->data;
3127         me_hdr = (const struct gfx_firmware_header_v1_0 *)
3128                 adev->gfx.me_fw->data;
3129
3130         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
3131         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
3132         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3133
3134         gfx_v9_0_cp_gfx_enable(adev, false);
3135
3136         /* PFP */
3137         fw_data = (const __le32 *)
3138                 (adev->gfx.pfp_fw->data +
3139                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3140         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3141         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
3142         for (i = 0; i < fw_size; i++)
3143                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3144         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
3145
3146         /* CE */
3147         fw_data = (const __le32 *)
3148                 (adev->gfx.ce_fw->data +
3149                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3150         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3151         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
3152         for (i = 0; i < fw_size; i++)
3153                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3154         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
3155
3156         /* ME */
3157         fw_data = (const __le32 *)
3158                 (adev->gfx.me_fw->data +
3159                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3160         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3161         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
3162         for (i = 0; i < fw_size; i++)
3163                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3164         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
3165
3166         return 0;
3167 }
3168
3169 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
3170 {
3171         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
3172         const struct cs_section_def *sect = NULL;
3173         const struct cs_extent_def *ext = NULL;
3174         int r, i, tmp;
3175
3176         /* init the CP */
3177         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
3178         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
3179
3180         gfx_v9_0_cp_gfx_enable(adev, true);
3181
3182         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
3183         if (r) {
3184                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3185                 return r;
3186         }
3187
3188         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3189         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3190
3191         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3192         amdgpu_ring_write(ring, 0x80000000);
3193         amdgpu_ring_write(ring, 0x80000000);
3194
3195         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
3196                 for (ext = sect->section; ext->extent != NULL; ++ext) {
3197                         if (sect->id == SECT_CONTEXT) {
3198                                 amdgpu_ring_write(ring,
3199                                        PACKET3(PACKET3_SET_CONTEXT_REG,
3200                                                ext->reg_count));
3201                                 amdgpu_ring_write(ring,
3202                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
3203                                 for (i = 0; i < ext->reg_count; i++)
3204                                         amdgpu_ring_write(ring, ext->extent[i]);
3205                         }
3206                 }
3207         }
3208
3209         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3210         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3211
3212         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3213         amdgpu_ring_write(ring, 0);
3214
3215         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3216         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3217         amdgpu_ring_write(ring, 0x8000);
3218         amdgpu_ring_write(ring, 0x8000);
3219
3220         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
3221         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
3222                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
3223         amdgpu_ring_write(ring, tmp);
3224         amdgpu_ring_write(ring, 0);
3225
3226         amdgpu_ring_commit(ring);
3227
3228         return 0;
3229 }
3230
3231 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
3232 {
3233         struct amdgpu_ring *ring;
3234         u32 tmp;
3235         u32 rb_bufsz;
3236         u64 rb_addr, rptr_addr, wptr_gpu_addr;
3237
3238         /* Set the write pointer delay */
3239         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
3240
3241         /* set the RB to use vmid 0 */
3242         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
3243
3244         /* Set ring buffer size */
3245         ring = &adev->gfx.gfx_ring[0];
3246         rb_bufsz = order_base_2(ring->ring_size / 8);
3247         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3248         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3249 #ifdef __BIG_ENDIAN
3250         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3251 #endif
3252         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3253
3254         /* Initialize the ring buffer's write pointers */
3255         ring->wptr = 0;
3256         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3257         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3258
3259         /* set the wb address wether it's enabled or not */
3260         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3261         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3262         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3263
3264         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3265         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3266         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3267
3268         mdelay(1);
3269         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3270
3271         rb_addr = ring->gpu_addr >> 8;
3272         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3273         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3274
3275         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3276         if (ring->use_doorbell) {
3277                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3278                                     DOORBELL_OFFSET, ring->doorbell_index);
3279                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3280                                     DOORBELL_EN, 1);
3281         } else {
3282                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3283         }
3284         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3285
3286         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3287                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3288         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3289
3290         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3291                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3292
3293
3294         /* start the ring */
3295         gfx_v9_0_cp_gfx_start(adev);
3296         ring->sched.ready = true;
3297
3298         return 0;
3299 }
3300
3301 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3302 {
3303         if (enable) {
3304                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3305         } else {
3306                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3307                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3308                 adev->gfx.kiq.ring.sched.ready = false;
3309         }
3310         udelay(50);
3311 }
3312
3313 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3314 {
3315         const struct gfx_firmware_header_v1_0 *mec_hdr;
3316         const __le32 *fw_data;
3317         unsigned i;
3318         u32 tmp;
3319
3320         if (!adev->gfx.mec_fw)
3321                 return -EINVAL;
3322
3323         gfx_v9_0_cp_compute_enable(adev, false);
3324
3325         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3326         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3327
3328         fw_data = (const __le32 *)
3329                 (adev->gfx.mec_fw->data +
3330                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3331         tmp = 0;
3332         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3333         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3334         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3335
3336         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3337                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3338         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3339                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3340
3341         /* MEC1 */
3342         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3343                          mec_hdr->jt_offset);
3344         for (i = 0; i < mec_hdr->jt_size; i++)
3345                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3346                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3347
3348         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3349                         adev->gfx.mec_fw_version);
3350         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3351
3352         return 0;
3353 }
3354
3355 /* KIQ functions */
3356 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3357 {
3358         uint32_t tmp;
3359         struct amdgpu_device *adev = ring->adev;
3360
3361         /* tell RLC which is KIQ queue */
3362         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3363         tmp &= 0xffffff00;
3364         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3365         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3366         tmp |= 0x80;
3367         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3368 }
3369
3370 static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
3371 {
3372         struct amdgpu_device *adev = ring->adev;
3373
3374         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3375                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
3376                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
3377                         mqd->cp_hqd_queue_priority =
3378                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
3379                 }
3380         }
3381 }
3382
3383 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3384 {
3385         struct amdgpu_device *adev = ring->adev;
3386         struct v9_mqd *mqd = ring->mqd_ptr;
3387         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3388         uint32_t tmp;
3389
3390         mqd->header = 0xC0310800;
3391         mqd->compute_pipelinestat_enable = 0x00000001;
3392         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3393         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3394         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3395         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3396         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3397         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3398         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3399         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3400         mqd->compute_misc_reserved = 0x00000003;
3401
3402         mqd->dynamic_cu_mask_addr_lo =
3403                 lower_32_bits(ring->mqd_gpu_addr
3404                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3405         mqd->dynamic_cu_mask_addr_hi =
3406                 upper_32_bits(ring->mqd_gpu_addr
3407                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3408
3409         eop_base_addr = ring->eop_gpu_addr >> 8;
3410         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3411         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3412
3413         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3414         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3415         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3416                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3417
3418         mqd->cp_hqd_eop_control = tmp;
3419
3420         /* enable doorbell? */
3421         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3422
3423         if (ring->use_doorbell) {
3424                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3425                                     DOORBELL_OFFSET, ring->doorbell_index);
3426                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3427                                     DOORBELL_EN, 1);
3428                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3429                                     DOORBELL_SOURCE, 0);
3430                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3431                                     DOORBELL_HIT, 0);
3432         } else {
3433                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3434                                          DOORBELL_EN, 0);
3435         }
3436
3437         mqd->cp_hqd_pq_doorbell_control = tmp;
3438
3439         /* disable the queue if it's active */
3440         ring->wptr = 0;
3441         mqd->cp_hqd_dequeue_request = 0;
3442         mqd->cp_hqd_pq_rptr = 0;
3443         mqd->cp_hqd_pq_wptr_lo = 0;
3444         mqd->cp_hqd_pq_wptr_hi = 0;
3445
3446         /* set the pointer to the MQD */
3447         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3448         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3449
3450         /* set MQD vmid to 0 */
3451         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3452         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3453         mqd->cp_mqd_control = tmp;
3454
3455         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3456         hqd_gpu_addr = ring->gpu_addr >> 8;
3457         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3458         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3459
3460         /* set up the HQD, this is similar to CP_RB0_CNTL */
3461         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3462         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3463                             (order_base_2(ring->ring_size / 4) - 1));
3464         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3465                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3466 #ifdef __BIG_ENDIAN
3467         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3468 #endif
3469         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3470         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3471         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3472         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3473         mqd->cp_hqd_pq_control = tmp;
3474
3475         /* set the wb address whether it's enabled or not */
3476         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3477         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3478         mqd->cp_hqd_pq_rptr_report_addr_hi =
3479                 upper_32_bits(wb_gpu_addr) & 0xffff;
3480
3481         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3482         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3483         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3484         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3485
3486         tmp = 0;
3487         /* enable the doorbell if requested */
3488         if (ring->use_doorbell) {
3489                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3490                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3491                                 DOORBELL_OFFSET, ring->doorbell_index);
3492
3493                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3494                                          DOORBELL_EN, 1);
3495                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3496                                          DOORBELL_SOURCE, 0);
3497                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3498                                          DOORBELL_HIT, 0);
3499         }
3500
3501         mqd->cp_hqd_pq_doorbell_control = tmp;
3502
3503         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3504         ring->wptr = 0;
3505         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3506
3507         /* set the vmid for the queue */
3508         mqd->cp_hqd_vmid = 0;
3509
3510         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3511         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3512         mqd->cp_hqd_persistent_state = tmp;
3513
3514         /* set MIN_IB_AVAIL_SIZE */
3515         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3516         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3517         mqd->cp_hqd_ib_control = tmp;
3518
3519         /* set static priority for a queue/ring */
3520         gfx_v9_0_mqd_set_priority(ring, mqd);
3521         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
3522
3523         /* map_queues packet doesn't need activate the queue,
3524          * so only kiq need set this field.
3525          */
3526         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
3527                 mqd->cp_hqd_active = 1;
3528
3529         return 0;
3530 }
3531
3532 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3533 {
3534         struct amdgpu_device *adev = ring->adev;
3535         struct v9_mqd *mqd = ring->mqd_ptr;
3536         int j;
3537
3538         /* disable wptr polling */
3539         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3540
3541         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3542                mqd->cp_hqd_eop_base_addr_lo);
3543         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3544                mqd->cp_hqd_eop_base_addr_hi);
3545
3546         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3547         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3548                mqd->cp_hqd_eop_control);
3549
3550         /* enable doorbell? */
3551         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3552                mqd->cp_hqd_pq_doorbell_control);
3553
3554         /* disable the queue if it's active */
3555         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3556                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3557                 for (j = 0; j < adev->usec_timeout; j++) {
3558                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3559                                 break;
3560                         udelay(1);
3561                 }
3562                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3563                        mqd->cp_hqd_dequeue_request);
3564                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3565                        mqd->cp_hqd_pq_rptr);
3566                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3567                        mqd->cp_hqd_pq_wptr_lo);
3568                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3569                        mqd->cp_hqd_pq_wptr_hi);
3570         }
3571
3572         /* set the pointer to the MQD */
3573         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3574                mqd->cp_mqd_base_addr_lo);
3575         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3576                mqd->cp_mqd_base_addr_hi);
3577
3578         /* set MQD vmid to 0 */
3579         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3580                mqd->cp_mqd_control);
3581
3582         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3583         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3584                mqd->cp_hqd_pq_base_lo);
3585         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3586                mqd->cp_hqd_pq_base_hi);
3587
3588         /* set up the HQD, this is similar to CP_RB0_CNTL */
3589         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3590                mqd->cp_hqd_pq_control);
3591
3592         /* set the wb address whether it's enabled or not */
3593         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3594                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3595         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3596                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3597
3598         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3599         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3600                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3601         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3602                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3603
3604         /* enable the doorbell if requested */
3605         if (ring->use_doorbell) {
3606                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3607                                         (adev->doorbell_index.kiq * 2) << 2);
3608                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3609                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3610         }
3611
3612         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3613                mqd->cp_hqd_pq_doorbell_control);
3614
3615         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3616         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3617                mqd->cp_hqd_pq_wptr_lo);
3618         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3619                mqd->cp_hqd_pq_wptr_hi);
3620
3621         /* set the vmid for the queue */
3622         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3623
3624         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3625                mqd->cp_hqd_persistent_state);
3626
3627         /* activate the queue */
3628         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3629                mqd->cp_hqd_active);
3630
3631         if (ring->use_doorbell)
3632                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3633
3634         return 0;
3635 }
3636
3637 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3638 {
3639         struct amdgpu_device *adev = ring->adev;
3640         int j;
3641
3642         /* disable the queue if it's active */
3643         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3644
3645                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3646
3647                 for (j = 0; j < adev->usec_timeout; j++) {
3648                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3649                                 break;
3650                         udelay(1);
3651                 }
3652
3653                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3654                         DRM_DEBUG("KIQ dequeue request failed.\n");
3655
3656                         /* Manual disable if dequeue request times out */
3657                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3658                 }
3659
3660                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3661                       0);
3662         }
3663
3664         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3665         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3666         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3667         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3668         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3669         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3670         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3671         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3672
3673         return 0;
3674 }
3675
3676 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3677 {
3678         struct amdgpu_device *adev = ring->adev;
3679         struct v9_mqd *mqd = ring->mqd_ptr;
3680         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3681
3682         gfx_v9_0_kiq_setting(ring);
3683
3684         if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3685                 /* reset MQD to a clean status */
3686                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3687                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3688
3689                 /* reset ring buffer */
3690                 ring->wptr = 0;
3691                 amdgpu_ring_clear_ring(ring);
3692
3693                 mutex_lock(&adev->srbm_mutex);
3694                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3695                 gfx_v9_0_kiq_init_register(ring);
3696                 soc15_grbm_select(adev, 0, 0, 0, 0);
3697                 mutex_unlock(&adev->srbm_mutex);
3698         } else {
3699                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3700                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3701                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3702                 mutex_lock(&adev->srbm_mutex);
3703                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3704                 gfx_v9_0_mqd_init(ring);
3705                 gfx_v9_0_kiq_init_register(ring);
3706                 soc15_grbm_select(adev, 0, 0, 0, 0);
3707                 mutex_unlock(&adev->srbm_mutex);
3708
3709                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3710                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3711         }
3712
3713         return 0;
3714 }
3715
3716 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3717 {
3718         struct amdgpu_device *adev = ring->adev;
3719         struct v9_mqd *mqd = ring->mqd_ptr;
3720         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3721
3722         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3723                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3724                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3725                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3726                 mutex_lock(&adev->srbm_mutex);
3727                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3728                 gfx_v9_0_mqd_init(ring);
3729                 soc15_grbm_select(adev, 0, 0, 0, 0);
3730                 mutex_unlock(&adev->srbm_mutex);
3731
3732                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3733                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3734         } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3735                 /* reset MQD to a clean status */
3736                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3737                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3738
3739                 /* reset ring buffer */
3740                 ring->wptr = 0;
3741                 atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
3742                 amdgpu_ring_clear_ring(ring);
3743         } else {
3744                 amdgpu_ring_clear_ring(ring);
3745         }
3746
3747         return 0;
3748 }
3749
3750 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3751 {
3752         struct amdgpu_ring *ring;
3753         int r;
3754
3755         ring = &adev->gfx.kiq.ring;
3756
3757         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3758         if (unlikely(r != 0))
3759                 return r;
3760
3761         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3762         if (unlikely(r != 0))
3763                 return r;
3764
3765         gfx_v9_0_kiq_init_queue(ring);
3766         amdgpu_bo_kunmap(ring->mqd_obj);
3767         ring->mqd_ptr = NULL;
3768         amdgpu_bo_unreserve(ring->mqd_obj);
3769         ring->sched.ready = true;
3770         return 0;
3771 }
3772
3773 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3774 {
3775         struct amdgpu_ring *ring = NULL;
3776         int r = 0, i;
3777
3778         gfx_v9_0_cp_compute_enable(adev, true);
3779
3780         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3781                 ring = &adev->gfx.compute_ring[i];
3782
3783                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3784                 if (unlikely(r != 0))
3785                         goto done;
3786                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3787                 if (!r) {
3788                         r = gfx_v9_0_kcq_init_queue(ring);
3789                         amdgpu_bo_kunmap(ring->mqd_obj);
3790                         ring->mqd_ptr = NULL;
3791                 }
3792                 amdgpu_bo_unreserve(ring->mqd_obj);
3793                 if (r)
3794                         goto done;
3795         }
3796
3797         r = amdgpu_gfx_enable_kcq(adev);
3798 done:
3799         return r;
3800 }
3801
3802 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3803 {
3804         int r, i;
3805         struct amdgpu_ring *ring;
3806
3807         if (!(adev->flags & AMD_IS_APU))
3808                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3809
3810         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3811                 if (adev->asic_type != CHIP_ARCTURUS) {
3812                         /* legacy firmware loading */
3813                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3814                         if (r)
3815                                 return r;
3816                 }
3817
3818                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3819                 if (r)
3820                         return r;
3821         }
3822
3823         r = gfx_v9_0_kiq_resume(adev);
3824         if (r)
3825                 return r;
3826
3827         if (adev->asic_type != CHIP_ARCTURUS) {
3828                 r = gfx_v9_0_cp_gfx_resume(adev);
3829                 if (r)
3830                         return r;
3831         }
3832
3833         r = gfx_v9_0_kcq_resume(adev);
3834         if (r)
3835                 return r;
3836
3837         if (adev->asic_type != CHIP_ARCTURUS) {
3838                 ring = &adev->gfx.gfx_ring[0];
3839                 r = amdgpu_ring_test_helper(ring);
3840                 if (r)
3841                         return r;
3842         }
3843
3844         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3845                 ring = &adev->gfx.compute_ring[i];
3846                 amdgpu_ring_test_helper(ring);
3847         }
3848
3849         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3850
3851         return 0;
3852 }
3853
3854 static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
3855 {
3856         u32 tmp;
3857
3858         if (adev->asic_type != CHIP_ARCTURUS)
3859                 return;
3860
3861         tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
3862         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
3863                                 adev->df.hash_status.hash_64k);
3864         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
3865                                 adev->df.hash_status.hash_2m);
3866         tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
3867                                 adev->df.hash_status.hash_1g);
3868         WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
3869 }
3870
3871 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3872 {
3873         if (adev->asic_type != CHIP_ARCTURUS)
3874                 gfx_v9_0_cp_gfx_enable(adev, enable);
3875         gfx_v9_0_cp_compute_enable(adev, enable);
3876 }
3877
3878 static int gfx_v9_0_hw_init(void *handle)
3879 {
3880         int r;
3881         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3882
3883         if (!amdgpu_sriov_vf(adev))
3884                 gfx_v9_0_init_golden_registers(adev);
3885
3886         gfx_v9_0_constants_init(adev);
3887
3888         gfx_v9_0_init_tcp_config(adev);
3889
3890         r = adev->gfx.rlc.funcs->resume(adev);
3891         if (r)
3892                 return r;
3893
3894         r = gfx_v9_0_cp_resume(adev);
3895         if (r)
3896                 return r;
3897
3898         return r;
3899 }
3900
3901 static int gfx_v9_0_hw_fini(void *handle)
3902 {
3903         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3904
3905         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3906         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3907         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3908
3909         /* DF freeze and kcq disable will fail */
3910         if (!amdgpu_ras_intr_triggered())
3911                 /* disable KCQ to avoid CPC touch memory not valid anymore */
3912                 amdgpu_gfx_disable_kcq(adev);
3913
3914         if (amdgpu_sriov_vf(adev)) {
3915                 gfx_v9_0_cp_gfx_enable(adev, false);
3916                 /* must disable polling for SRIOV when hw finished, otherwise
3917                  * CPC engine may still keep fetching WB address which is already
3918                  * invalid after sw finished and trigger DMAR reading error in
3919                  * hypervisor side.
3920                  */
3921                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3922                 return 0;
3923         }
3924
3925         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3926          * otherwise KIQ is hanging when binding back
3927          */
3928         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3929                 mutex_lock(&adev->srbm_mutex);
3930                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3931                                 adev->gfx.kiq.ring.pipe,
3932                                 adev->gfx.kiq.ring.queue, 0);
3933                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3934                 soc15_grbm_select(adev, 0, 0, 0, 0);
3935                 mutex_unlock(&adev->srbm_mutex);
3936         }
3937
3938         gfx_v9_0_cp_enable(adev, false);
3939         adev->gfx.rlc.funcs->stop(adev);
3940
3941         return 0;
3942 }
3943
3944 static int gfx_v9_0_suspend(void *handle)
3945 {
3946         return gfx_v9_0_hw_fini(handle);
3947 }
3948
3949 static int gfx_v9_0_resume(void *handle)
3950 {
3951         return gfx_v9_0_hw_init(handle);
3952 }
3953
3954 static bool gfx_v9_0_is_idle(void *handle)
3955 {
3956         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3957
3958         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3959                                 GRBM_STATUS, GUI_ACTIVE))
3960                 return false;
3961         else
3962                 return true;
3963 }
3964
3965 static int gfx_v9_0_wait_for_idle(void *handle)
3966 {
3967         unsigned i;
3968         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3969
3970         for (i = 0; i < adev->usec_timeout; i++) {
3971                 if (gfx_v9_0_is_idle(handle))
3972                         return 0;
3973                 udelay(1);
3974         }
3975         return -ETIMEDOUT;
3976 }
3977
3978 static int gfx_v9_0_soft_reset(void *handle)
3979 {
3980         u32 grbm_soft_reset = 0;
3981         u32 tmp;
3982         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3983
3984         /* GRBM_STATUS */
3985         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3986         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3987                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3988                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3989                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3990                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3991                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3992                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3993                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3994                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3995                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3996         }
3997
3998         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3999                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4000                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4001         }
4002
4003         /* GRBM_STATUS2 */
4004         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
4005         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4006                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4007                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4008
4009
4010         if (grbm_soft_reset) {
4011                 /* stop the rlc */
4012                 adev->gfx.rlc.funcs->stop(adev);
4013
4014                 if (adev->asic_type != CHIP_ARCTURUS)
4015                         /* Disable GFX parsing/prefetching */
4016                         gfx_v9_0_cp_gfx_enable(adev, false);
4017
4018                 /* Disable MEC parsing/prefetching */
4019                 gfx_v9_0_cp_compute_enable(adev, false);
4020
4021                 if (grbm_soft_reset) {
4022                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4023                         tmp |= grbm_soft_reset;
4024                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4025                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4026                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4027
4028                         udelay(50);
4029
4030                         tmp &= ~grbm_soft_reset;
4031                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
4032                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
4033                 }
4034
4035                 /* Wait a little for things to settle down */
4036                 udelay(50);
4037         }
4038         return 0;
4039 }
4040
4041 static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
4042 {
4043         signed long r, cnt = 0;
4044         unsigned long flags;
4045         uint32_t seq, reg_val_offs = 0;
4046         uint64_t value = 0;
4047         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4048         struct amdgpu_ring *ring = &kiq->ring;
4049
4050         BUG_ON(!ring->funcs->emit_rreg);
4051
4052         spin_lock_irqsave(&kiq->ring_lock, flags);
4053         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
4054                 pr_err("critical bug! too many kiq readers\n");
4055                 goto failed_unlock;
4056         }
4057         amdgpu_ring_alloc(ring, 32);
4058         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4059         amdgpu_ring_write(ring, 9 |     /* src: register*/
4060                                 (5 << 8) |      /* dst: memory */
4061                                 (1 << 16) |     /* count sel */
4062                                 (1 << 20));     /* write confirm */
4063         amdgpu_ring_write(ring, 0);
4064         amdgpu_ring_write(ring, 0);
4065         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4066                                 reg_val_offs * 4));
4067         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4068                                 reg_val_offs * 4));
4069         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
4070         if (r)
4071                 goto failed_undo;
4072
4073         amdgpu_ring_commit(ring);
4074         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4075
4076         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4077
4078         /* don't wait anymore for gpu reset case because this way may
4079          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
4080          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
4081          * never return if we keep waiting in virt_kiq_rreg, which cause
4082          * gpu_recover() hang there.
4083          *
4084          * also don't wait anymore for IRQ context
4085          * */
4086         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
4087                 goto failed_kiq_read;
4088
4089         might_sleep();
4090         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
4091                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
4092                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
4093         }
4094
4095         if (cnt > MAX_KIQ_REG_TRY)
4096                 goto failed_kiq_read;
4097
4098         mb();
4099         value = (uint64_t)adev->wb.wb[reg_val_offs] |
4100                 (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
4101         amdgpu_device_wb_free(adev, reg_val_offs);
4102         return value;
4103
4104 failed_undo:
4105         amdgpu_ring_undo(ring);
4106 failed_unlock:
4107         spin_unlock_irqrestore(&kiq->ring_lock, flags);
4108 failed_kiq_read:
4109         if (reg_val_offs)
4110                 amdgpu_device_wb_free(adev, reg_val_offs);
4111         pr_err("failed to read gpu clock\n");
4112         return ~0;
4113 }
4114
4115 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4116 {
4117         uint64_t clock;
4118
4119         amdgpu_gfx_off_ctrl(adev, false);
4120         mutex_lock(&adev->gfx.gpu_clock_mutex);
4121         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
4122                 clock = gfx_v9_0_kiq_read_clock(adev);
4123         } else {
4124                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
4125                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
4126                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
4127         }
4128         mutex_unlock(&adev->gfx.gpu_clock_mutex);
4129         amdgpu_gfx_off_ctrl(adev, true);
4130         return clock;
4131 }
4132
4133 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4134                                           uint32_t vmid,
4135                                           uint32_t gds_base, uint32_t gds_size,
4136                                           uint32_t gws_base, uint32_t gws_size,
4137                                           uint32_t oa_base, uint32_t oa_size)
4138 {
4139         struct amdgpu_device *adev = ring->adev;
4140
4141         /* GDS Base */
4142         gfx_v9_0_write_data_to_reg(ring, 0, false,
4143                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
4144                                    gds_base);
4145
4146         /* GDS Size */
4147         gfx_v9_0_write_data_to_reg(ring, 0, false,
4148                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
4149                                    gds_size);
4150
4151         /* GWS */
4152         gfx_v9_0_write_data_to_reg(ring, 0, false,
4153                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
4154                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4155
4156         /* OA */
4157         gfx_v9_0_write_data_to_reg(ring, 0, false,
4158                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
4159                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
4160 }
4161
4162 static const u32 vgpr_init_compute_shader[] =
4163 {
4164         0xb07c0000, 0xbe8000ff,
4165         0x000000f8, 0xbf110800,
4166         0x7e000280, 0x7e020280,
4167         0x7e040280, 0x7e060280,
4168         0x7e080280, 0x7e0a0280,
4169         0x7e0c0280, 0x7e0e0280,
4170         0x80808800, 0xbe803200,
4171         0xbf84fff5, 0xbf9c0000,
4172         0xd28c0001, 0x0001007f,
4173         0xd28d0001, 0x0002027e,
4174         0x10020288, 0xb8810904,
4175         0xb7814000, 0xd1196a01,
4176         0x00000301, 0xbe800087,
4177         0xbefc00c1, 0xd89c4000,
4178         0x00020201, 0xd89cc080,
4179         0x00040401, 0x320202ff,
4180         0x00000800, 0x80808100,
4181         0xbf84fff8, 0x7e020280,
4182         0xbf810000, 0x00000000,
4183 };
4184
4185 static const u32 sgpr_init_compute_shader[] =
4186 {
4187         0xb07c0000, 0xbe8000ff,
4188         0x0000005f, 0xbee50080,
4189         0xbe812c65, 0xbe822c65,
4190         0xbe832c65, 0xbe842c65,
4191         0xbe852c65, 0xb77c0005,
4192         0x80808500, 0xbf84fff8,
4193         0xbe800080, 0xbf810000,
4194 };
4195
4196 static const u32 vgpr_init_compute_shader_arcturus[] = {
4197         0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
4198         0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
4199         0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
4200         0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
4201         0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
4202         0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
4203         0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
4204         0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
4205         0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
4206         0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
4207         0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
4208         0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
4209         0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
4210         0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
4211         0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
4212         0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
4213         0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
4214         0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
4215         0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
4216         0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
4217         0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
4218         0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
4219         0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
4220         0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
4221         0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
4222         0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
4223         0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
4224         0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
4225         0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
4226         0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
4227         0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
4228         0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
4229         0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
4230         0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
4231         0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
4232         0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
4233         0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
4234         0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
4235         0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
4236         0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
4237         0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
4238         0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
4239         0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
4240         0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
4241         0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
4242         0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
4243         0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
4244         0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
4245         0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
4246         0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
4247         0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
4248         0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
4249         0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
4250         0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
4251         0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
4252         0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
4253         0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
4254         0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
4255         0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
4256         0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
4257         0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
4258         0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
4259         0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
4260         0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
4261         0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
4262         0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
4263         0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
4264         0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
4265         0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
4266         0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
4267         0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
4268         0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
4269         0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
4270         0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
4271         0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
4272         0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
4273         0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
4274         0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
4275         0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
4276         0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
4277         0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
4278         0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
4279         0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
4280         0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
4281         0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
4282         0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
4283         0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
4284         0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
4285         0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
4286         0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
4287         0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
4288         0xbf84fff8, 0xbf810000,
4289 };
4290
4291 /* When below register arrays changed, please update gpr_reg_size,
4292   and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
4293   to cover all gfx9 ASICs */
4294 static const struct soc15_reg_entry vgpr_init_regs[] = {
4295    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4296    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4297    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4298    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4299    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
4300    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4301    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4302    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4303    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4304    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4305    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4306    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4307    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4308    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4309 };
4310
4311 static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
4312    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4313    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4314    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
4315    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4316    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
4317    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
4318    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
4319    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
4320    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
4321    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
4322    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
4323    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
4324    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
4325    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
4326 };
4327
4328 static const struct soc15_reg_entry sgpr1_init_regs[] = {
4329    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4330    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4331    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4332    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4333    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4334    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4335    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
4336    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
4337    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
4338    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
4339    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
4340    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
4341    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
4342    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
4343 };
4344
4345 static const struct soc15_reg_entry sgpr2_init_regs[] = {
4346    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
4347    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
4348    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
4349    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
4350    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
4351    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
4352    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
4353    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
4354    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
4355    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
4356    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
4357    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
4358    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
4359    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
4360 };
4361
4362 static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
4363    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
4364    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
4365    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
4366    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
4367    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
4368    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
4369    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
4370    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
4371    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
4372    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
4373    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
4374    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
4375    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
4376    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
4377    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
4378    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
4379    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
4380    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
4381    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
4382    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
4383    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
4384    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
4385    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
4386    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
4387    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
4388    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
4389    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4390    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4391    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4392    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4393    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4394    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4395    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4396 };
4397
4398 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4399 {
4400         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4401         int i, r;
4402
4403         /* only support when RAS is enabled */
4404         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4405                 return 0;
4406
4407         r = amdgpu_ring_alloc(ring, 7);
4408         if (r) {
4409                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4410                         ring->name, r);
4411                 return r;
4412         }
4413
4414         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4415         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4416
4417         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4418         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4419                                 PACKET3_DMA_DATA_DST_SEL(1) |
4420                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4421                                 PACKET3_DMA_DATA_ENGINE(0)));
4422         amdgpu_ring_write(ring, 0);
4423         amdgpu_ring_write(ring, 0);
4424         amdgpu_ring_write(ring, 0);
4425         amdgpu_ring_write(ring, 0);
4426         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4427                                 adev->gds.gds_size);
4428
4429         amdgpu_ring_commit(ring);
4430
4431         for (i = 0; i < adev->usec_timeout; i++) {
4432                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4433                         break;
4434                 udelay(1);
4435         }
4436
4437         if (i >= adev->usec_timeout)
4438                 r = -ETIMEDOUT;
4439
4440         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4441
4442         return r;
4443 }
4444
4445 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4446 {
4447         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4448         struct amdgpu_ib ib;
4449         struct dma_fence *f = NULL;
4450         int r, i;
4451         unsigned total_size, vgpr_offset, sgpr_offset;
4452         u64 gpu_addr;
4453
4454         int compute_dim_x = adev->gfx.config.max_shader_engines *
4455                                                 adev->gfx.config.max_cu_per_sh *
4456                                                 adev->gfx.config.max_sh_per_se;
4457         int sgpr_work_group_size = 5;
4458         int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
4459         int vgpr_init_shader_size;
4460         const u32 *vgpr_init_shader_ptr;
4461         const struct soc15_reg_entry *vgpr_init_regs_ptr;
4462
4463         /* only support when RAS is enabled */
4464         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4465                 return 0;
4466
4467         /* bail if the compute ring is not ready */
4468         if (!ring->sched.ready)
4469                 return 0;
4470
4471         if (adev->asic_type == CHIP_ARCTURUS) {
4472                 vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
4473                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
4474                 vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
4475         } else {
4476                 vgpr_init_shader_ptr = vgpr_init_compute_shader;
4477                 vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
4478                 vgpr_init_regs_ptr = vgpr_init_regs;
4479         }
4480
4481         total_size =
4482                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
4483         total_size +=
4484                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
4485         total_size +=
4486                 (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
4487         total_size = ALIGN(total_size, 256);
4488         vgpr_offset = total_size;
4489         total_size += ALIGN(vgpr_init_shader_size, 256);
4490         sgpr_offset = total_size;
4491         total_size += sizeof(sgpr_init_compute_shader);
4492
4493         /* allocate an indirect buffer to put the commands in */
4494         memset(&ib, 0, sizeof(ib));
4495         r = amdgpu_ib_get(adev, NULL, total_size,
4496                                         AMDGPU_IB_POOL_DIRECT, &ib);
4497         if (r) {
4498                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4499                 return r;
4500         }
4501
4502         /* load the compute shaders */
4503         for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
4504                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
4505
4506         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4507                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4508
4509         /* init the ib length to 0 */
4510         ib.length_dw = 0;
4511
4512         /* VGPR */
4513         /* write the register state for the compute dispatch */
4514         for (i = 0; i < gpr_reg_size; i++) {
4515                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4516                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
4517                                                                 - PACKET3_SET_SH_REG_START;
4518                 ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
4519         }
4520         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4521         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4522         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4523         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4524                                                         - PACKET3_SET_SH_REG_START;
4525         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4526         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4527
4528         /* write dispatch packet */
4529         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4530         ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
4531         ib.ptr[ib.length_dw++] = 1; /* y */
4532         ib.ptr[ib.length_dw++] = 1; /* z */
4533         ib.ptr[ib.length_dw++] =
4534                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4535
4536         /* write CS partial flush packet */
4537         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4538         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4539
4540         /* SGPR1 */
4541         /* write the register state for the compute dispatch */
4542         for (i = 0; i < gpr_reg_size; i++) {
4543                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4544                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
4545                                                                 - PACKET3_SET_SH_REG_START;
4546                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
4547         }
4548         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4549         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4550         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4551         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4552                                                         - PACKET3_SET_SH_REG_START;
4553         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4554         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4555
4556         /* write dispatch packet */
4557         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4558         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4559         ib.ptr[ib.length_dw++] = 1; /* y */
4560         ib.ptr[ib.length_dw++] = 1; /* z */
4561         ib.ptr[ib.length_dw++] =
4562                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4563
4564         /* write CS partial flush packet */
4565         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4566         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4567
4568         /* SGPR2 */
4569         /* write the register state for the compute dispatch */
4570         for (i = 0; i < gpr_reg_size; i++) {
4571                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4572                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
4573                                                                 - PACKET3_SET_SH_REG_START;
4574                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
4575         }
4576         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4577         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4578         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4579         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4580                                                         - PACKET3_SET_SH_REG_START;
4581         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4582         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4583
4584         /* write dispatch packet */
4585         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4586         ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
4587         ib.ptr[ib.length_dw++] = 1; /* y */
4588         ib.ptr[ib.length_dw++] = 1; /* z */
4589         ib.ptr[ib.length_dw++] =
4590                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4591
4592         /* write CS partial flush packet */
4593         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4594         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4595
4596         /* shedule the ib on the ring */
4597         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4598         if (r) {
4599                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4600                 goto fail;
4601         }
4602
4603         /* wait for the GPU to finish processing the IB */
4604         r = dma_fence_wait(f, false);
4605         if (r) {
4606                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4607                 goto fail;
4608         }
4609
4610 fail:
4611         amdgpu_ib_free(adev, &ib, NULL);
4612         dma_fence_put(f);
4613
4614         return r;
4615 }
4616
4617 static int gfx_v9_0_early_init(void *handle)
4618 {
4619         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4620
4621         if (adev->asic_type == CHIP_ARCTURUS)
4622                 adev->gfx.num_gfx_rings = 0;
4623         else
4624                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4625         adev->gfx.num_compute_rings = amdgpu_num_kcq;
4626         gfx_v9_0_set_kiq_pm4_funcs(adev);
4627         gfx_v9_0_set_ring_funcs(adev);
4628         gfx_v9_0_set_irq_funcs(adev);
4629         gfx_v9_0_set_gds_init(adev);
4630         gfx_v9_0_set_rlc_funcs(adev);
4631
4632         return 0;
4633 }
4634
4635 static int gfx_v9_0_ecc_late_init(void *handle)
4636 {
4637         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4638         int r;
4639
4640         /*
4641          * Temp workaround to fix the issue that CP firmware fails to
4642          * update read pointer when CPDMA is writing clearing operation
4643          * to GDS in suspend/resume sequence on several cards. So just
4644          * limit this operation in cold boot sequence.
4645          */
4646         if (!adev->in_suspend) {
4647                 r = gfx_v9_0_do_edc_gds_workarounds(adev);
4648                 if (r)
4649                         return r;
4650         }
4651
4652         /* requires IBs so do in late init after IB pool is initialized */
4653         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4654         if (r)
4655                 return r;
4656
4657         if (adev->gfx.funcs &&
4658             adev->gfx.funcs->reset_ras_error_count)
4659                 adev->gfx.funcs->reset_ras_error_count(adev);
4660
4661         r = amdgpu_gfx_ras_late_init(adev);
4662         if (r)
4663                 return r;
4664
4665         return 0;
4666 }
4667
4668 static int gfx_v9_0_late_init(void *handle)
4669 {
4670         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4671         int r;
4672
4673         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4674         if (r)
4675                 return r;
4676
4677         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4678         if (r)
4679                 return r;
4680
4681         r = gfx_v9_0_ecc_late_init(handle);
4682         if (r)
4683                 return r;
4684
4685         return 0;
4686 }
4687
4688 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4689 {
4690         uint32_t rlc_setting;
4691
4692         /* if RLC is not enabled, do nothing */
4693         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4694         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4695                 return false;
4696
4697         return true;
4698 }
4699
4700 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4701 {
4702         uint32_t data;
4703         unsigned i;
4704
4705         data = RLC_SAFE_MODE__CMD_MASK;
4706         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4707         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4708
4709         /* wait for RLC_SAFE_MODE */
4710         for (i = 0; i < adev->usec_timeout; i++) {
4711                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4712                         break;
4713                 udelay(1);
4714         }
4715 }
4716
4717 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4718 {
4719         uint32_t data;
4720
4721         data = RLC_SAFE_MODE__CMD_MASK;
4722         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4723 }
4724
4725 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4726                                                 bool enable)
4727 {
4728         amdgpu_gfx_rlc_enter_safe_mode(adev);
4729
4730         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4731                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4732                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4733                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4734         } else {
4735                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4736                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4737                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4738         }
4739
4740         amdgpu_gfx_rlc_exit_safe_mode(adev);
4741 }
4742
4743 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4744                                                 bool enable)
4745 {
4746         /* TODO: double check if we need to perform under safe mode */
4747         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4748
4749         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4750                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4751         else
4752                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4753
4754         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4755                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4756         else
4757                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4758
4759         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4760 }
4761
4762 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4763                                                       bool enable)
4764 {
4765         uint32_t data, def;
4766
4767         amdgpu_gfx_rlc_enter_safe_mode(adev);
4768
4769         /* It is disabled by HW by default */
4770         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4771                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4772                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4773
4774                 if (adev->asic_type != CHIP_VEGA12)
4775                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4776
4777                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4778                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4779                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4780
4781                 /* only for Vega10 & Raven1 */
4782                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4783
4784                 if (def != data)
4785                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4786
4787                 /* MGLS is a global flag to control all MGLS in GFX */
4788                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4789                         /* 2 - RLC memory Light sleep */
4790                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4791                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4792                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4793                                 if (def != data)
4794                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4795                         }
4796                         /* 3 - CP memory Light sleep */
4797                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4798                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4799                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4800                                 if (def != data)
4801                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4802                         }
4803                 }
4804         } else {
4805                 /* 1 - MGCG_OVERRIDE */
4806                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4807
4808                 if (adev->asic_type != CHIP_VEGA12)
4809                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4810
4811                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4812                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4813                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4814                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4815
4816                 if (def != data)
4817                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4818
4819                 /* 2 - disable MGLS in RLC */
4820                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4821                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4822                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4823                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4824                 }
4825
4826                 /* 3 - disable MGLS in CP */
4827                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4828                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4829                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4830                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4831                 }
4832         }
4833
4834         amdgpu_gfx_rlc_exit_safe_mode(adev);
4835 }
4836
4837 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4838                                            bool enable)
4839 {
4840         uint32_t data, def;
4841
4842         if (adev->asic_type == CHIP_ARCTURUS)
4843                 return;
4844
4845         amdgpu_gfx_rlc_enter_safe_mode(adev);
4846
4847         /* Enable 3D CGCG/CGLS */
4848         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4849                 /* write cmd to clear cgcg/cgls ov */
4850                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4851                 /* unset CGCG override */
4852                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4853                 /* update CGCG and CGLS override bits */
4854                 if (def != data)
4855                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4856
4857                 /* enable 3Dcgcg FSM(0x0000363f) */
4858                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4859
4860                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4861                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4862                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4863                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4864                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4865                 if (def != data)
4866                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4867
4868                 /* set IDLE_POLL_COUNT(0x00900100) */
4869                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4870                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4871                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4872                 if (def != data)
4873                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4874         } else {
4875                 /* Disable CGCG/CGLS */
4876                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4877                 /* disable cgcg, cgls should be disabled */
4878                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4879                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4880                 /* disable cgcg and cgls in FSM */
4881                 if (def != data)
4882                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4883         }
4884
4885         amdgpu_gfx_rlc_exit_safe_mode(adev);
4886 }
4887
4888 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4889                                                       bool enable)
4890 {
4891         uint32_t def, data;
4892
4893         amdgpu_gfx_rlc_enter_safe_mode(adev);
4894
4895         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4896                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4897                 /* unset CGCG override */
4898                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4899                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4900                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4901                 else
4902                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4903                 /* update CGCG and CGLS override bits */
4904                 if (def != data)
4905                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4906
4907                 /* enable cgcg FSM(0x0000363F) */
4908                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4909
4910                 if (adev->asic_type == CHIP_ARCTURUS)
4911                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4912                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4913                 else
4914                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4915                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4916                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4917                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4918                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4919                 if (def != data)
4920                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4921
4922                 /* set IDLE_POLL_COUNT(0x00900100) */
4923                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4924                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4925                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4926                 if (def != data)
4927                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4928         } else {
4929                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4930                 /* reset CGCG/CGLS bits */
4931                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4932                 /* disable cgcg and cgls in FSM */
4933                 if (def != data)
4934                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4935         }
4936
4937         amdgpu_gfx_rlc_exit_safe_mode(adev);
4938 }
4939
4940 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4941                                             bool enable)
4942 {
4943         if (enable) {
4944                 /* CGCG/CGLS should be enabled after MGCG/MGLS
4945                  * ===  MGCG + MGLS ===
4946                  */
4947                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4948                 /* ===  CGCG /CGLS for GFX 3D Only === */
4949                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4950                 /* ===  CGCG + CGLS === */
4951                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4952         } else {
4953                 /* CGCG/CGLS should be disabled before MGCG/MGLS
4954                  * ===  CGCG + CGLS ===
4955                  */
4956                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4957                 /* ===  CGCG /CGLS for GFX 3D Only === */
4958                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4959                 /* ===  MGCG + MGLS === */
4960                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4961         }
4962         return 0;
4963 }
4964
4965 static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4966 {
4967         u32 reg, data;
4968
4969         reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
4970         if (amdgpu_sriov_is_pp_one_vf(adev))
4971                 data = RREG32_NO_KIQ(reg);
4972         else
4973                 data = RREG32(reg);
4974
4975         data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4976         data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4977
4978         if (amdgpu_sriov_is_pp_one_vf(adev))
4979                 WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
4980         else
4981                 WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
4982 }
4983
4984 static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
4985                                         uint32_t offset,
4986                                         struct soc15_reg_rlcg *entries, int arr_size)
4987 {
4988         int i;
4989         uint32_t reg;
4990
4991         if (!entries)
4992                 return false;
4993
4994         for (i = 0; i < arr_size; i++) {
4995                 const struct soc15_reg_rlcg *entry;
4996
4997                 entry = &entries[i];
4998                 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
4999                 if (offset == reg)
5000                         return true;
5001         }
5002
5003         return false;
5004 }
5005
5006 static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
5007 {
5008         return gfx_v9_0_check_rlcg_range(adev, offset,
5009                                         (void *)rlcg_access_gc_9_0,
5010                                         ARRAY_SIZE(rlcg_access_gc_9_0));
5011 }
5012
5013 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
5014         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
5015         .set_safe_mode = gfx_v9_0_set_safe_mode,
5016         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
5017         .init = gfx_v9_0_rlc_init,
5018         .get_csb_size = gfx_v9_0_get_csb_size,
5019         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
5020         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
5021         .resume = gfx_v9_0_rlc_resume,
5022         .stop = gfx_v9_0_rlc_stop,
5023         .reset = gfx_v9_0_rlc_reset,
5024         .start = gfx_v9_0_rlc_start,
5025         .update_spm_vmid = gfx_v9_0_update_spm_vmid,
5026         .rlcg_wreg = gfx_v9_0_rlcg_wreg,
5027         .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
5028 };
5029
5030 static int gfx_v9_0_set_powergating_state(void *handle,
5031                                           enum amd_powergating_state state)
5032 {
5033         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5034         bool enable = (state == AMD_PG_STATE_GATE);
5035
5036         switch (adev->asic_type) {
5037         case CHIP_RAVEN:
5038         case CHIP_RENOIR:
5039                 if (!enable)
5040                         amdgpu_gfx_off_ctrl(adev, false);
5041
5042                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5043                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
5044                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
5045                 } else {
5046                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
5047                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
5048                 }
5049
5050                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5051                         gfx_v9_0_enable_cp_power_gating(adev, true);
5052                 else
5053                         gfx_v9_0_enable_cp_power_gating(adev, false);
5054
5055                 /* update gfx cgpg state */
5056                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
5057
5058                 /* update mgcg state */
5059                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
5060
5061                 if (enable)
5062                         amdgpu_gfx_off_ctrl(adev, true);
5063                 break;
5064         case CHIP_VEGA12:
5065                 amdgpu_gfx_off_ctrl(adev, enable);
5066                 break;
5067         default:
5068                 break;
5069         }
5070
5071         return 0;
5072 }
5073
5074 static int gfx_v9_0_set_clockgating_state(void *handle,
5075                                           enum amd_clockgating_state state)
5076 {
5077         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5078
5079         if (amdgpu_sriov_vf(adev))
5080                 return 0;
5081
5082         switch (adev->asic_type) {
5083         case CHIP_VEGA10:
5084         case CHIP_VEGA12:
5085         case CHIP_VEGA20:
5086         case CHIP_RAVEN:
5087         case CHIP_ARCTURUS:
5088         case CHIP_RENOIR:
5089                 gfx_v9_0_update_gfx_clock_gating(adev,
5090                                                  state == AMD_CG_STATE_GATE);
5091                 break;
5092         default:
5093                 break;
5094         }
5095         return 0;
5096 }
5097
5098 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
5099 {
5100         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5101         int data;
5102
5103         if (amdgpu_sriov_vf(adev))
5104                 *flags = 0;
5105
5106         /* AMD_CG_SUPPORT_GFX_MGCG */
5107         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
5108         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5109                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5110
5111         /* AMD_CG_SUPPORT_GFX_CGCG */
5112         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
5113         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5114                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5115
5116         /* AMD_CG_SUPPORT_GFX_CGLS */
5117         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5118                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5119
5120         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5121         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
5122         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5123                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5124
5125         /* AMD_CG_SUPPORT_GFX_CP_LS */
5126         data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
5127         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5128                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5129
5130         if (adev->asic_type != CHIP_ARCTURUS) {
5131                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5132                 data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
5133                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5134                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5135
5136                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5137                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5138                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5139         }
5140 }
5141
5142 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5143 {
5144         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
5145 }
5146
5147 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5148 {
5149         struct amdgpu_device *adev = ring->adev;
5150         u64 wptr;
5151
5152         /* XXX check if swapping is necessary on BE */
5153         if (ring->use_doorbell) {
5154                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
5155         } else {
5156                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
5157                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
5158         }
5159
5160         return wptr;
5161 }
5162
5163 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5164 {
5165         struct amdgpu_device *adev = ring->adev;
5166
5167         if (ring->use_doorbell) {
5168                 /* XXX check if swapping is necessary on BE */
5169                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5170                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5171         } else {
5172                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
5173                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
5174         }
5175 }
5176
5177 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5178 {
5179         struct amdgpu_device *adev = ring->adev;
5180         u32 ref_and_mask, reg_mem_engine;
5181         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5182
5183         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5184                 switch (ring->me) {
5185                 case 1:
5186                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5187                         break;
5188                 case 2:
5189                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5190                         break;
5191                 default:
5192                         return;
5193                 }
5194                 reg_mem_engine = 0;
5195         } else {
5196                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5197                 reg_mem_engine = 1; /* pfp */
5198         }
5199
5200         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5201                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5202                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5203                               ref_and_mask, ref_and_mask, 0x20);
5204 }
5205
5206 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5207                                         struct amdgpu_job *job,
5208                                         struct amdgpu_ib *ib,
5209                                         uint32_t flags)
5210 {
5211         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5212         u32 header, control = 0;
5213
5214         if (ib->flags & AMDGPU_IB_FLAG_CE)
5215                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
5216         else
5217                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5218
5219         control |= ib->length_dw | (vmid << 24);
5220
5221         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5222                 control |= INDIRECT_BUFFER_PRE_ENB(1);
5223
5224                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
5225                         gfx_v9_0_ring_emit_de_meta(ring);
5226         }
5227
5228         amdgpu_ring_write(ring, header);
5229         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5230         amdgpu_ring_write(ring,
5231 #ifdef __BIG_ENDIAN
5232                 (2 << 0) |
5233 #endif
5234                 lower_32_bits(ib->gpu_addr));
5235         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5236         amdgpu_ring_write(ring, control);
5237 }
5238
5239 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5240                                           struct amdgpu_job *job,
5241                                           struct amdgpu_ib *ib,
5242                                           uint32_t flags)
5243 {
5244         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5245         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5246
5247         /* Currently, there is a high possibility to get wave ID mismatch
5248          * between ME and GDS, leading to a hw deadlock, because ME generates
5249          * different wave IDs than the GDS expects. This situation happens
5250          * randomly when at least 5 compute pipes use GDS ordered append.
5251          * The wave IDs generated by ME are also wrong after suspend/resume.
5252          * Those are probably bugs somewhere else in the kernel driver.
5253          *
5254          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5255          * GDS to 0 for this ring (me/pipe).
5256          */
5257         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5258                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5259                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
5260                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5261         }
5262
5263         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5264         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5265         amdgpu_ring_write(ring,
5266 #ifdef __BIG_ENDIAN
5267                                 (2 << 0) |
5268 #endif
5269                                 lower_32_bits(ib->gpu_addr));
5270         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5271         amdgpu_ring_write(ring, control);
5272 }
5273
5274 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5275                                      u64 seq, unsigned flags)
5276 {
5277         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5278         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5279         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
5280
5281         /* RELEASE_MEM - flush caches, send int */
5282         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5283         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
5284                                                EOP_TC_NC_ACTION_EN) :
5285                                               (EOP_TCL1_ACTION_EN |
5286                                                EOP_TC_ACTION_EN |
5287                                                EOP_TC_WB_ACTION_EN |
5288                                                EOP_TC_MD_ACTION_EN)) |
5289                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5290                                  EVENT_INDEX(5)));
5291         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
5292
5293         /*
5294          * the address should be Qword aligned if 64bit write, Dword
5295          * aligned if only send 32bit data low (discard data high)
5296          */
5297         if (write64bit)
5298                 BUG_ON(addr & 0x7);
5299         else
5300                 BUG_ON(addr & 0x3);
5301         amdgpu_ring_write(ring, lower_32_bits(addr));
5302         amdgpu_ring_write(ring, upper_32_bits(addr));
5303         amdgpu_ring_write(ring, lower_32_bits(seq));
5304         amdgpu_ring_write(ring, upper_32_bits(seq));
5305         amdgpu_ring_write(ring, 0);
5306 }
5307
5308 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5309 {
5310         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5311         uint32_t seq = ring->fence_drv.sync_seq;
5312         uint64_t addr = ring->fence_drv.gpu_addr;
5313
5314         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
5315                               lower_32_bits(addr), upper_32_bits(addr),
5316                               seq, 0xffffffff, 4);
5317 }
5318
5319 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5320                                         unsigned vmid, uint64_t pd_addr)
5321 {
5322         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5323
5324         /* compute doesn't have PFP */
5325         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5326                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5327                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5328                 amdgpu_ring_write(ring, 0x0);
5329         }
5330 }
5331
5332 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5333 {
5334         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
5335 }
5336
5337 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5338 {
5339         u64 wptr;
5340
5341         /* XXX check if swapping is necessary on BE */
5342         if (ring->use_doorbell)
5343                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
5344         else
5345                 BUG();
5346         return wptr;
5347 }
5348
5349 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5350 {
5351         struct amdgpu_device *adev = ring->adev;
5352
5353         /* XXX check if swapping is necessary on BE */
5354         if (ring->use_doorbell) {
5355                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
5356                 WDOORBELL64(ring->doorbell_index, ring->wptr);
5357         } else{
5358                 BUG(); /* only DOORBELL method supported on gfx9 now */
5359         }
5360 }
5361
5362 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5363                                          u64 seq, unsigned int flags)
5364 {
5365         struct amdgpu_device *adev = ring->adev;
5366
5367         /* we only allocate 32bit for each seq wb address */
5368         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5369
5370         /* write fence seq to the "addr" */
5371         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5372         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5373                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5374         amdgpu_ring_write(ring, lower_32_bits(addr));
5375         amdgpu_ring_write(ring, upper_32_bits(addr));
5376         amdgpu_ring_write(ring, lower_32_bits(seq));
5377
5378         if (flags & AMDGPU_FENCE_FLAG_INT) {
5379                 /* set register to trigger INT */
5380                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5381                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5382                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5383                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
5384                 amdgpu_ring_write(ring, 0);
5385                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5386         }
5387 }
5388
5389 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5390 {
5391         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5392         amdgpu_ring_write(ring, 0);
5393 }
5394
5395 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5396 {
5397         struct v9_ce_ib_state ce_payload = {0};
5398         uint64_t csa_addr;
5399         int cnt;
5400
5401         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5402         csa_addr = amdgpu_csa_vaddr(ring->adev);
5403
5404         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5405         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5406                                  WRITE_DATA_DST_SEL(8) |
5407                                  WR_CONFIRM) |
5408                                  WRITE_DATA_CACHE_POLICY(0));
5409         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5410         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5411         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5412 }
5413
5414 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5415 {
5416         struct v9_de_ib_state de_payload = {0};
5417         uint64_t csa_addr, gds_addr;
5418         int cnt;
5419
5420         csa_addr = amdgpu_csa_vaddr(ring->adev);
5421         gds_addr = csa_addr + 4096;
5422         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5423         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5424
5425         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5426         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5427         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5428                                  WRITE_DATA_DST_SEL(8) |
5429                                  WR_CONFIRM) |
5430                                  WRITE_DATA_CACHE_POLICY(0));
5431         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5432         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5433         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5434 }
5435
5436 static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5437                                    bool secure)
5438 {
5439         uint32_t v = secure ? FRAME_TMZ : 0;
5440
5441         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5442         amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5443 }
5444
5445 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5446 {
5447         uint32_t dw2 = 0;
5448
5449         if (amdgpu_sriov_vf(ring->adev))
5450                 gfx_v9_0_ring_emit_ce_meta(ring);
5451
5452         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5453         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5454                 /* set load_global_config & load_global_uconfig */
5455                 dw2 |= 0x8001;
5456                 /* set load_cs_sh_regs */
5457                 dw2 |= 0x01000000;
5458                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5459                 dw2 |= 0x10002;
5460
5461                 /* set load_ce_ram if preamble presented */
5462                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5463                         dw2 |= 0x10000000;
5464         } else {
5465                 /* still load_ce_ram if this is the first time preamble presented
5466                  * although there is no context switch happens.
5467                  */
5468                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5469                         dw2 |= 0x10000000;
5470         }
5471
5472         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5473         amdgpu_ring_write(ring, dw2);
5474         amdgpu_ring_write(ring, 0);
5475 }
5476
5477 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5478 {
5479         unsigned ret;
5480         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5481         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5482         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5483         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5484         ret = ring->wptr & ring->buf_mask;
5485         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5486         return ret;
5487 }
5488
5489 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5490 {
5491         unsigned cur;
5492         BUG_ON(offset > ring->buf_mask);
5493         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5494
5495         cur = (ring->wptr & ring->buf_mask) - 1;
5496         if (likely(cur > offset))
5497                 ring->ring[offset] = cur - offset;
5498         else
5499                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5500 }
5501
5502 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5503                                     uint32_t reg_val_offs)
5504 {
5505         struct amdgpu_device *adev = ring->adev;
5506
5507         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5508         amdgpu_ring_write(ring, 0 |     /* src: register*/
5509                                 (5 << 8) |      /* dst: memory */
5510                                 (1 << 20));     /* write confirm */
5511         amdgpu_ring_write(ring, reg);
5512         amdgpu_ring_write(ring, 0);
5513         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5514                                 reg_val_offs * 4));
5515         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5516                                 reg_val_offs * 4));
5517 }
5518
5519 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5520                                     uint32_t val)
5521 {
5522         uint32_t cmd = 0;
5523
5524         switch (ring->funcs->type) {
5525         case AMDGPU_RING_TYPE_GFX:
5526                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5527                 break;
5528         case AMDGPU_RING_TYPE_KIQ:
5529                 cmd = (1 << 16); /* no inc addr */
5530                 break;
5531         default:
5532                 cmd = WR_CONFIRM;
5533                 break;
5534         }
5535         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5536         amdgpu_ring_write(ring, cmd);
5537         amdgpu_ring_write(ring, reg);
5538         amdgpu_ring_write(ring, 0);
5539         amdgpu_ring_write(ring, val);
5540 }
5541
5542 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5543                                         uint32_t val, uint32_t mask)
5544 {
5545         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5546 }
5547
5548 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5549                                                   uint32_t reg0, uint32_t reg1,
5550                                                   uint32_t ref, uint32_t mask)
5551 {
5552         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5553         struct amdgpu_device *adev = ring->adev;
5554         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5555                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5556
5557         if (fw_version_ok)
5558                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5559                                       ref, mask, 0x20);
5560         else
5561                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5562                                                            ref, mask);
5563 }
5564
5565 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5566 {
5567         struct amdgpu_device *adev = ring->adev;
5568         uint32_t value = 0;
5569
5570         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5571         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5572         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5573         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5574         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5575 }
5576
5577 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5578                                                  enum amdgpu_interrupt_state state)
5579 {
5580         switch (state) {
5581         case AMDGPU_IRQ_STATE_DISABLE:
5582         case AMDGPU_IRQ_STATE_ENABLE:
5583                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5584                                TIME_STAMP_INT_ENABLE,
5585                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5586                 break;
5587         default:
5588                 break;
5589         }
5590 }
5591
5592 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5593                                                      int me, int pipe,
5594                                                      enum amdgpu_interrupt_state state)
5595 {
5596         u32 mec_int_cntl, mec_int_cntl_reg;
5597
5598         /*
5599          * amdgpu controls only the first MEC. That's why this function only
5600          * handles the setting of interrupts for this specific MEC. All other
5601          * pipes' interrupts are set by amdkfd.
5602          */
5603
5604         if (me == 1) {
5605                 switch (pipe) {
5606                 case 0:
5607                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5608                         break;
5609                 case 1:
5610                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5611                         break;
5612                 case 2:
5613                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5614                         break;
5615                 case 3:
5616                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5617                         break;
5618                 default:
5619                         DRM_DEBUG("invalid pipe %d\n", pipe);
5620                         return;
5621                 }
5622         } else {
5623                 DRM_DEBUG("invalid me %d\n", me);
5624                 return;
5625         }
5626
5627         switch (state) {
5628         case AMDGPU_IRQ_STATE_DISABLE:
5629                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5630                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5631                                              TIME_STAMP_INT_ENABLE, 0);
5632                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5633                 break;
5634         case AMDGPU_IRQ_STATE_ENABLE:
5635                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5636                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5637                                              TIME_STAMP_INT_ENABLE, 1);
5638                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5639                 break;
5640         default:
5641                 break;
5642         }
5643 }
5644
5645 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5646                                              struct amdgpu_irq_src *source,
5647                                              unsigned type,
5648                                              enum amdgpu_interrupt_state state)
5649 {
5650         switch (state) {
5651         case AMDGPU_IRQ_STATE_DISABLE:
5652         case AMDGPU_IRQ_STATE_ENABLE:
5653                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5654                                PRIV_REG_INT_ENABLE,
5655                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5656                 break;
5657         default:
5658                 break;
5659         }
5660
5661         return 0;
5662 }
5663
5664 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5665                                               struct amdgpu_irq_src *source,
5666                                               unsigned type,
5667                                               enum amdgpu_interrupt_state state)
5668 {
5669         switch (state) {
5670         case AMDGPU_IRQ_STATE_DISABLE:
5671         case AMDGPU_IRQ_STATE_ENABLE:
5672                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5673                                PRIV_INSTR_INT_ENABLE,
5674                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5675         default:
5676                 break;
5677         }
5678
5679         return 0;
5680 }
5681
5682 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5683         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5684                         CP_ECC_ERROR_INT_ENABLE, 1)
5685
5686 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5687         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5688                         CP_ECC_ERROR_INT_ENABLE, 0)
5689
5690 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5691                                               struct amdgpu_irq_src *source,
5692                                               unsigned type,
5693                                               enum amdgpu_interrupt_state state)
5694 {
5695         switch (state) {
5696         case AMDGPU_IRQ_STATE_DISABLE:
5697                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5698                                 CP_ECC_ERROR_INT_ENABLE, 0);
5699                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5700                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5701                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5702                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5703                 break;
5704
5705         case AMDGPU_IRQ_STATE_ENABLE:
5706                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5707                                 CP_ECC_ERROR_INT_ENABLE, 1);
5708                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5709                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5710                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5711                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5712                 break;
5713         default:
5714                 break;
5715         }
5716
5717         return 0;
5718 }
5719
5720
5721 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5722                                             struct amdgpu_irq_src *src,
5723                                             unsigned type,
5724                                             enum amdgpu_interrupt_state state)
5725 {
5726         switch (type) {
5727         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5728                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5729                 break;
5730         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5731                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5732                 break;
5733         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5734                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5735                 break;
5736         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5737                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5738                 break;
5739         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5740                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5741                 break;
5742         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5743                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5744                 break;
5745         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5746                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5747                 break;
5748         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5749                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5750                 break;
5751         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5752                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5753                 break;
5754         default:
5755                 break;
5756         }
5757         return 0;
5758 }
5759
5760 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5761                             struct amdgpu_irq_src *source,
5762                             struct amdgpu_iv_entry *entry)
5763 {
5764         int i;
5765         u8 me_id, pipe_id, queue_id;
5766         struct amdgpu_ring *ring;
5767
5768         DRM_DEBUG("IH: CP EOP\n");
5769         me_id = (entry->ring_id & 0x0c) >> 2;
5770         pipe_id = (entry->ring_id & 0x03) >> 0;
5771         queue_id = (entry->ring_id & 0x70) >> 4;
5772
5773         switch (me_id) {
5774         case 0:
5775                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5776                 break;
5777         case 1:
5778         case 2:
5779                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5780                         ring = &adev->gfx.compute_ring[i];
5781                         /* Per-queue interrupt is supported for MEC starting from VI.
5782                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5783                           */
5784                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5785                                 amdgpu_fence_process(ring);
5786                 }
5787                 break;
5788         }
5789         return 0;
5790 }
5791
5792 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5793                            struct amdgpu_iv_entry *entry)
5794 {
5795         u8 me_id, pipe_id, queue_id;
5796         struct amdgpu_ring *ring;
5797         int i;
5798
5799         me_id = (entry->ring_id & 0x0c) >> 2;
5800         pipe_id = (entry->ring_id & 0x03) >> 0;
5801         queue_id = (entry->ring_id & 0x70) >> 4;
5802
5803         switch (me_id) {
5804         case 0:
5805                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5806                 break;
5807         case 1:
5808         case 2:
5809                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5810                         ring = &adev->gfx.compute_ring[i];
5811                         if (ring->me == me_id && ring->pipe == pipe_id &&
5812                             ring->queue == queue_id)
5813                                 drm_sched_fault(&ring->sched);
5814                 }
5815                 break;
5816         }
5817 }
5818
5819 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5820                                  struct amdgpu_irq_src *source,
5821                                  struct amdgpu_iv_entry *entry)
5822 {
5823         DRM_ERROR("Illegal register access in command stream\n");
5824         gfx_v9_0_fault(adev, entry);
5825         return 0;
5826 }
5827
5828 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5829                                   struct amdgpu_irq_src *source,
5830                                   struct amdgpu_iv_entry *entry)
5831 {
5832         DRM_ERROR("Illegal instruction in command stream\n");
5833         gfx_v9_0_fault(adev, entry);
5834         return 0;
5835 }
5836
5837
5838 static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
5839         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5840           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5841           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5842         },
5843         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5844           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5845           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5846         },
5847         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5848           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5849           0, 0
5850         },
5851         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5852           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5853           0, 0
5854         },
5855         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5856           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5857           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5858         },
5859         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5860           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5861           0, 0
5862         },
5863         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5864           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5865           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5866         },
5867         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5868           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5869           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5870         },
5871         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5872           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5873           0, 0
5874         },
5875         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5876           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5877           0, 0
5878         },
5879         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5880           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5881           0, 0
5882         },
5883         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5884           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5885           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5886         },
5887         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5888           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5889           0, 0
5890         },
5891         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5892           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5893           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5894         },
5895         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5896           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5897           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5898           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5899         },
5900         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5901           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5902           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5903           0, 0
5904         },
5905         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5906           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5907           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5908           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5909         },
5910         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5911           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5912           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5913           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5914         },
5915         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5916           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5917           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5918           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5919         },
5920         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5921           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5922           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5923           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5924         },
5925         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5926           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5927           0, 0
5928         },
5929         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5930           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5931           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5932         },
5933         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5934           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
5935           0, 0
5936         },
5937         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5938           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
5939           0, 0
5940         },
5941         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5942           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
5943           0, 0
5944         },
5945         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5946           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
5947           0, 0
5948         },
5949         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5950           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
5951           0, 0
5952         },
5953         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5954           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
5955           0, 0
5956         },
5957         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5958           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5959           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
5960         },
5961         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5962           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5963           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
5964         },
5965         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5966           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5967           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
5968         },
5969         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5970           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5971           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
5972         },
5973         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5974           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5975           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
5976         },
5977         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5978           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
5979           0, 0
5980         },
5981         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5982           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
5983           0, 0
5984         },
5985         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5986           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
5987           0, 0
5988         },
5989         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5990           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
5991           0, 0
5992         },
5993         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5994           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
5995           0, 0
5996         },
5997         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5998           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
5999           0, 0
6000         },
6001         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6002           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
6003           0, 0
6004         },
6005         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6006           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
6007           0, 0
6008         },
6009         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6010           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
6011           0, 0
6012         },
6013         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6014           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
6015           0, 0
6016         },
6017         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6018           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
6019           0, 0
6020         },
6021         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6022           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
6023           0, 0
6024         },
6025         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
6026           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
6027           0, 0
6028         },
6029         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
6030           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
6031           0, 0
6032         },
6033         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6034           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
6035           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
6036         },
6037         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6038           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
6039           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
6040         },
6041         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6042           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
6043           0, 0
6044         },
6045         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6046           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
6047           0, 0
6048         },
6049         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6050           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
6051           0, 0
6052         },
6053         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6054           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
6055           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
6056         },
6057         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
6058           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
6059           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
6060         },
6061         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6062           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
6063           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
6064         },
6065         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6066           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
6067           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
6068         },
6069         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
6070           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
6071           0, 0
6072         },
6073         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6074           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
6075           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
6076         },
6077         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6078           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
6079           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
6080         },
6081         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6082           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
6083           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
6084         },
6085         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6086           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
6087           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
6088         },
6089         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6090           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
6091           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
6092         },
6093         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6094           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
6095           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
6096         },
6097         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
6098           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
6099           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
6100         },
6101         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6102           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
6103           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
6104         },
6105         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6106           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
6107           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
6108         },
6109         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6110           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
6111           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
6112         },
6113         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6114           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
6115           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
6116         },
6117         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6118           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
6119           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
6120         },
6121         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
6122           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
6123           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
6124         },
6125         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6126           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
6127           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
6128         },
6129         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6130           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
6131           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
6132         },
6133         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6134           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
6135           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
6136         },
6137         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6138           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
6139           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
6140         },
6141         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6142           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
6143           0, 0
6144         },
6145         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6146           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
6147           0, 0
6148         },
6149         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6150           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
6151           0, 0
6152         },
6153         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6154           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
6155           0, 0
6156         },
6157         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6158           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
6159           0, 0
6160         },
6161         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
6162           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
6163           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
6164         },
6165         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6166           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
6167           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
6168         },
6169         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6170           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
6171           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
6172         },
6173         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6174           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
6175           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
6176         },
6177         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6178           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
6179           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
6180         },
6181         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6182           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
6183           0, 0
6184         },
6185         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6186           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
6187           0, 0
6188         },
6189         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6190           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
6191           0, 0
6192         },
6193         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6194           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
6195           0, 0
6196         },
6197         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
6198           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
6199           0, 0
6200         },
6201         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6202           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
6203           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
6204         },
6205         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6206           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
6207           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
6208         },
6209         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6210           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
6211           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
6212         },
6213         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6214           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
6215           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
6216         },
6217         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6218           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
6219           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
6220         },
6221         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6222           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
6223           0, 0
6224         },
6225         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6226           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
6227           0, 0
6228         },
6229         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6230           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
6231           0, 0
6232         },
6233         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6234           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
6235           0, 0
6236         },
6237         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
6238           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
6239           0, 0
6240         },
6241         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6242           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
6243           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
6244         },
6245         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6246           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
6247           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
6248         },
6249         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6250           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
6251           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
6252         },
6253         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6254           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
6255           0, 0
6256         },
6257         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6258           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
6259           0, 0
6260         },
6261         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6262           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
6263           0, 0
6264         },
6265         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6266           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
6267           0, 0
6268         },
6269         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6270           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
6271           0, 0
6272         },
6273         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
6274           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
6275           0, 0
6276         }
6277 };
6278
6279 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
6280                                      void *inject_if)
6281 {
6282         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
6283         int ret;
6284         struct ta_ras_trigger_error_input block_info = { 0 };
6285
6286         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6287                 return -EINVAL;
6288
6289         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
6290                 return -EINVAL;
6291
6292         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
6293                 return -EPERM;
6294
6295         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
6296               info->head.type)) {
6297                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
6298                         ras_gfx_subblocks[info->head.sub_block_index].name,
6299                         info->head.type);
6300                 return -EPERM;
6301         }
6302
6303         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
6304               info->head.type)) {
6305                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
6306                         ras_gfx_subblocks[info->head.sub_block_index].name,
6307                         info->head.type);
6308                 return -EPERM;
6309         }
6310
6311         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
6312         block_info.sub_block_index =
6313                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
6314         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
6315         block_info.address = info->address;
6316         block_info.value = info->value;
6317
6318         mutex_lock(&adev->grbm_idx_mutex);
6319         ret = psp_ras_trigger_error(&adev->psp, &block_info);
6320         mutex_unlock(&adev->grbm_idx_mutex);
6321
6322         return ret;
6323 }
6324
6325 static const char *vml2_mems[] = {
6326         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
6327         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
6328         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
6329         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
6330         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
6331         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
6332         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
6333         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
6334         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
6335         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
6336         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
6337         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
6338         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
6339         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
6340         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
6341         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
6342 };
6343
6344 static const char *vml2_walker_mems[] = {
6345         "UTC_VML2_CACHE_PDE0_MEM0",
6346         "UTC_VML2_CACHE_PDE0_MEM1",
6347         "UTC_VML2_CACHE_PDE1_MEM0",
6348         "UTC_VML2_CACHE_PDE1_MEM1",
6349         "UTC_VML2_CACHE_PDE2_MEM0",
6350         "UTC_VML2_CACHE_PDE2_MEM1",
6351         "UTC_VML2_RDIF_LOG_FIFO",
6352 };
6353
6354 static const char *atc_l2_cache_2m_mems[] = {
6355         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
6356         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
6357         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
6358         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
6359 };
6360
6361 static const char *atc_l2_cache_4k_mems[] = {
6362         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
6363         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
6364         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
6365         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
6366         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
6367         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
6368         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
6369         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
6370         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
6371         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
6372         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
6373         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
6374         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
6375         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
6376         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
6377         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
6378         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
6379         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
6380         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
6381         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
6382         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
6383         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
6384         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
6385         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
6386         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
6387         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6388         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6389         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6390         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6391         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6392         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6393         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6394 };
6395
6396 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6397                                          struct ras_err_data *err_data)
6398 {
6399         uint32_t i, data;
6400         uint32_t sec_count, ded_count;
6401
6402         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6403         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6404         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6405         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6406         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6407         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6408         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6409         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6410
6411         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6412                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6413                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6414
6415                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6416                 if (sec_count) {
6417                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6418                                 "SEC %d\n", i, vml2_mems[i], sec_count);
6419                         err_data->ce_count += sec_count;
6420                 }
6421
6422                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6423                 if (ded_count) {
6424                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6425                                 "DED %d\n", i, vml2_mems[i], ded_count);
6426                         err_data->ue_count += ded_count;
6427                 }
6428         }
6429
6430         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6431                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6432                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6433
6434                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6435                                                 SEC_COUNT);
6436                 if (sec_count) {
6437                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6438                                 "SEC %d\n", i, vml2_walker_mems[i], sec_count);
6439                         err_data->ce_count += sec_count;
6440                 }
6441
6442                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6443                                                 DED_COUNT);
6444                 if (ded_count) {
6445                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6446                                 "DED %d\n", i, vml2_walker_mems[i], ded_count);
6447                         err_data->ue_count += ded_count;
6448                 }
6449         }
6450
6451         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6452                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6453                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6454
6455                 sec_count = (data & 0x00006000L) >> 0xd;
6456                 if (sec_count) {
6457                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6458                                 "SEC %d\n", i, atc_l2_cache_2m_mems[i],
6459                                 sec_count);
6460                         err_data->ce_count += sec_count;
6461                 }
6462         }
6463
6464         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6465                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6466                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6467
6468                 sec_count = (data & 0x00006000L) >> 0xd;
6469                 if (sec_count) {
6470                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6471                                 "SEC %d\n", i, atc_l2_cache_4k_mems[i],
6472                                 sec_count);
6473                         err_data->ce_count += sec_count;
6474                 }
6475
6476                 ded_count = (data & 0x00018000L) >> 0xf;
6477                 if (ded_count) {
6478                         dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
6479                                 "DED %d\n", i, atc_l2_cache_4k_mems[i],
6480                                 ded_count);
6481                         err_data->ue_count += ded_count;
6482                 }
6483         }
6484
6485         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6486         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6487         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6488         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6489
6490         return 0;
6491 }
6492
6493 static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
6494         const struct soc15_reg_entry *reg,
6495         uint32_t se_id, uint32_t inst_id, uint32_t value,
6496         uint32_t *sec_count, uint32_t *ded_count)
6497 {
6498         uint32_t i;
6499         uint32_t sec_cnt, ded_cnt;
6500
6501         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
6502                 if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
6503                         gfx_v9_0_ras_fields[i].seg != reg->seg ||
6504                         gfx_v9_0_ras_fields[i].inst != reg->inst)
6505                         continue;
6506
6507                 sec_cnt = (value &
6508                                 gfx_v9_0_ras_fields[i].sec_count_mask) >>
6509                                 gfx_v9_0_ras_fields[i].sec_count_shift;
6510                 if (sec_cnt) {
6511                         dev_info(adev->dev, "GFX SubBlock %s, "
6512                                 "Instance[%d][%d], SEC %d\n",
6513                                 gfx_v9_0_ras_fields[i].name,
6514                                 se_id, inst_id,
6515                                 sec_cnt);
6516                         *sec_count += sec_cnt;
6517                 }
6518
6519                 ded_cnt = (value &
6520                                 gfx_v9_0_ras_fields[i].ded_count_mask) >>
6521                                 gfx_v9_0_ras_fields[i].ded_count_shift;
6522                 if (ded_cnt) {
6523                         dev_info(adev->dev, "GFX SubBlock %s, "
6524                                 "Instance[%d][%d], DED %d\n",
6525                                 gfx_v9_0_ras_fields[i].name,
6526                                 se_id, inst_id,
6527                                 ded_cnt);
6528                         *ded_count += ded_cnt;
6529                 }
6530         }
6531
6532         return 0;
6533 }
6534
6535 static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
6536 {
6537         int i, j, k;
6538
6539         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6540                 return;
6541
6542         /* read back registers to clear the counters */
6543         mutex_lock(&adev->grbm_idx_mutex);
6544         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6545                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6546                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6547                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
6548                                 RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6549                         }
6550                 }
6551         }
6552         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
6553         mutex_unlock(&adev->grbm_idx_mutex);
6554
6555         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6556         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6557         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6558         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6559         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6560         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6561         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6562         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6563
6564         for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
6565                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6566                 RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6567         }
6568
6569         for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
6570                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6571                 RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6572         }
6573
6574         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
6575                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6576                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6577         }
6578
6579         for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
6580                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6581                 RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6582         }
6583
6584         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6585         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6586         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6587         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6588 }
6589
6590 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6591                                           void *ras_error_status)
6592 {
6593         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6594         uint32_t sec_count = 0, ded_count = 0;
6595         uint32_t i, j, k;
6596         uint32_t reg_value;
6597
6598         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
6599                 return -EINVAL;
6600
6601         err_data->ue_count = 0;
6602         err_data->ce_count = 0;
6603
6604         mutex_lock(&adev->grbm_idx_mutex);
6605
6606         for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
6607                 for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
6608                         for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
6609                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6610                                 reg_value =
6611                                         RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
6612                                 if (reg_value)
6613                                         gfx_v9_0_ras_error_count(adev,
6614                                                 &gfx_v9_0_edc_counter_regs[i],
6615                                                 j, k, reg_value,
6616                                                 &sec_count, &ded_count);
6617                         }
6618                 }
6619         }
6620
6621         err_data->ce_count += sec_count;
6622         err_data->ue_count += ded_count;
6623
6624         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6625         mutex_unlock(&adev->grbm_idx_mutex);
6626
6627         gfx_v9_0_query_utc_edc_status(adev, err_data);
6628
6629         return 0;
6630 }
6631
6632 static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
6633 {
6634         const unsigned int cp_coher_cntl =
6635                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
6636                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
6637                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
6638                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
6639                         PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
6640
6641         /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
6642         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6643         amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
6644         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6645         amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
6646         amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6647         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
6648         amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6649 }
6650
6651 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6652         .name = "gfx_v9_0",
6653         .early_init = gfx_v9_0_early_init,
6654         .late_init = gfx_v9_0_late_init,
6655         .sw_init = gfx_v9_0_sw_init,
6656         .sw_fini = gfx_v9_0_sw_fini,
6657         .hw_init = gfx_v9_0_hw_init,
6658         .hw_fini = gfx_v9_0_hw_fini,
6659         .suspend = gfx_v9_0_suspend,
6660         .resume = gfx_v9_0_resume,
6661         .is_idle = gfx_v9_0_is_idle,
6662         .wait_for_idle = gfx_v9_0_wait_for_idle,
6663         .soft_reset = gfx_v9_0_soft_reset,
6664         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6665         .set_powergating_state = gfx_v9_0_set_powergating_state,
6666         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6667 };
6668
6669 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6670         .type = AMDGPU_RING_TYPE_GFX,
6671         .align_mask = 0xff,
6672         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6673         .support_64bit_ptrs = true,
6674         .vmhub = AMDGPU_GFXHUB_0,
6675         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6676         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6677         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6678         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6679                 5 +  /* COND_EXEC */
6680                 7 +  /* PIPELINE_SYNC */
6681                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6682                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6683                 2 + /* VM_FLUSH */
6684                 8 +  /* FENCE for VM_FLUSH */
6685                 20 + /* GDS switch */
6686                 4 + /* double SWITCH_BUFFER,
6687                        the first COND_EXEC jump to the place just
6688                            prior to this double SWITCH_BUFFER  */
6689                 5 + /* COND_EXEC */
6690                 7 +      /*     HDP_flush */
6691                 4 +      /*     VGT_flush */
6692                 14 + /* CE_META */
6693                 31 + /* DE_META */
6694                 3 + /* CNTX_CTRL */
6695                 5 + /* HDP_INVL */
6696                 8 + 8 + /* FENCE x2 */
6697                 2 + /* SWITCH_BUFFER */
6698                 7, /* gfx_v9_0_emit_mem_sync */
6699         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6700         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6701         .emit_fence = gfx_v9_0_ring_emit_fence,
6702         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6703         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6704         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6705         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6706         .test_ring = gfx_v9_0_ring_test_ring,
6707         .test_ib = gfx_v9_0_ring_test_ib,
6708         .insert_nop = amdgpu_ring_insert_nop,
6709         .pad_ib = amdgpu_ring_generic_pad_ib,
6710         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6711         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6712         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6713         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6714         .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
6715         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6716         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6717         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6718         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6719         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6720 };
6721
6722 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6723         .type = AMDGPU_RING_TYPE_COMPUTE,
6724         .align_mask = 0xff,
6725         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6726         .support_64bit_ptrs = true,
6727         .vmhub = AMDGPU_GFXHUB_0,
6728         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6729         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6730         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6731         .emit_frame_size =
6732                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6733                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6734                 5 + /* hdp invalidate */
6735                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6736                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6737                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6738                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6739                 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6740                 7, /* gfx_v9_0_emit_mem_sync */
6741         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6742         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6743         .emit_fence = gfx_v9_0_ring_emit_fence,
6744         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6745         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6746         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6747         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6748         .test_ring = gfx_v9_0_ring_test_ring,
6749         .test_ib = gfx_v9_0_ring_test_ib,
6750         .insert_nop = amdgpu_ring_insert_nop,
6751         .pad_ib = amdgpu_ring_generic_pad_ib,
6752         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6753         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6754         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6755         .emit_mem_sync = gfx_v9_0_emit_mem_sync,
6756 };
6757
6758 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6759         .type = AMDGPU_RING_TYPE_KIQ,
6760         .align_mask = 0xff,
6761         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6762         .support_64bit_ptrs = true,
6763         .vmhub = AMDGPU_GFXHUB_0,
6764         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6765         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6766         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6767         .emit_frame_size =
6768                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6769                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6770                 5 + /* hdp invalidate */
6771                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6772                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6773                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6774                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6775                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6776         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6777         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6778         .test_ring = gfx_v9_0_ring_test_ring,
6779         .insert_nop = amdgpu_ring_insert_nop,
6780         .pad_ib = amdgpu_ring_generic_pad_ib,
6781         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6782         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6783         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6784         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6785 };
6786
6787 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6788 {
6789         int i;
6790
6791         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6792
6793         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6794                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6795
6796         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6797                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6798 }
6799
6800 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6801         .set = gfx_v9_0_set_eop_interrupt_state,
6802         .process = gfx_v9_0_eop_irq,
6803 };
6804
6805 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6806         .set = gfx_v9_0_set_priv_reg_fault_state,
6807         .process = gfx_v9_0_priv_reg_irq,
6808 };
6809
6810 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6811         .set = gfx_v9_0_set_priv_inst_fault_state,
6812         .process = gfx_v9_0_priv_inst_irq,
6813 };
6814
6815 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6816         .set = gfx_v9_0_set_cp_ecc_error_state,
6817         .process = amdgpu_gfx_cp_ecc_error_irq,
6818 };
6819
6820
6821 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6822 {
6823         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6824         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6825
6826         adev->gfx.priv_reg_irq.num_types = 1;
6827         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6828
6829         adev->gfx.priv_inst_irq.num_types = 1;
6830         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6831
6832         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6833         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6834 }
6835
6836 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6837 {
6838         switch (adev->asic_type) {
6839         case CHIP_VEGA10:
6840         case CHIP_VEGA12:
6841         case CHIP_VEGA20:
6842         case CHIP_RAVEN:
6843         case CHIP_ARCTURUS:
6844         case CHIP_RENOIR:
6845                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6846                 break;
6847         default:
6848                 break;
6849         }
6850 }
6851
6852 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6853 {
6854         /* init asci gds info */
6855         switch (adev->asic_type) {
6856         case CHIP_VEGA10:
6857         case CHIP_VEGA12:
6858         case CHIP_VEGA20:
6859                 adev->gds.gds_size = 0x10000;
6860                 break;
6861         case CHIP_RAVEN:
6862         case CHIP_ARCTURUS:
6863                 adev->gds.gds_size = 0x1000;
6864                 break;
6865         default:
6866                 adev->gds.gds_size = 0x10000;
6867                 break;
6868         }
6869
6870         switch (adev->asic_type) {
6871         case CHIP_VEGA10:
6872         case CHIP_VEGA20:
6873                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6874                 break;
6875         case CHIP_VEGA12:
6876                 adev->gds.gds_compute_max_wave_id = 0x27f;
6877                 break;
6878         case CHIP_RAVEN:
6879                 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
6880                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6881                 else
6882                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6883                 break;
6884         case CHIP_ARCTURUS:
6885                 adev->gds.gds_compute_max_wave_id = 0xfff;
6886                 break;
6887         default:
6888                 /* this really depends on the chip */
6889                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6890                 break;
6891         }
6892
6893         adev->gds.gws_size = 64;
6894         adev->gds.oa_size = 16;
6895 }
6896
6897 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6898                                                  u32 bitmap)
6899 {
6900         u32 data;
6901
6902         if (!bitmap)
6903                 return;
6904
6905         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6906         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6907
6908         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6909 }
6910
6911 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6912 {
6913         u32 data, mask;
6914
6915         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6916         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6917
6918         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6919         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6920
6921         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6922
6923         return (~data) & mask;
6924 }
6925
6926 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6927                                  struct amdgpu_cu_info *cu_info)
6928 {
6929         int i, j, k, counter, active_cu_number = 0;
6930         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6931         unsigned disable_masks[4 * 4];
6932
6933         if (!adev || !cu_info)
6934                 return -EINVAL;
6935
6936         /*
6937          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6938          */
6939         if (adev->gfx.config.max_shader_engines *
6940                 adev->gfx.config.max_sh_per_se > 16)
6941                 return -EINVAL;
6942
6943         amdgpu_gfx_parse_disable_cu(disable_masks,
6944                                     adev->gfx.config.max_shader_engines,
6945                                     adev->gfx.config.max_sh_per_se);
6946
6947         mutex_lock(&adev->grbm_idx_mutex);
6948         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6949                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6950                         mask = 1;
6951                         ao_bitmap = 0;
6952                         counter = 0;
6953                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6954                         gfx_v9_0_set_user_cu_inactive_bitmap(
6955                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6956                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6957
6958                         /*
6959                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
6960                          * 4x4 size array, and it's usually suitable for Vega
6961                          * ASICs which has 4*2 SE/SH layout.
6962                          * But for Arcturus, SE/SH layout is changed to 8*1.
6963                          * To mostly reduce the impact, we make it compatible
6964                          * with current bitmap array as below:
6965                          *    SE4,SH0 --> bitmap[0][1]
6966                          *    SE5,SH0 --> bitmap[1][1]
6967                          *    SE6,SH0 --> bitmap[2][1]
6968                          *    SE7,SH0 --> bitmap[3][1]
6969                          */
6970                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6971
6972                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6973                                 if (bitmap & mask) {
6974                                         if (counter < adev->gfx.config.max_cu_per_sh)
6975                                                 ao_bitmap |= mask;
6976                                         counter ++;
6977                                 }
6978                                 mask <<= 1;
6979                         }
6980                         active_cu_number += counter;
6981                         if (i < 2 && j < 2)
6982                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
6983                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
6984                 }
6985         }
6986         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6987         mutex_unlock(&adev->grbm_idx_mutex);
6988
6989         cu_info->number = active_cu_number;
6990         cu_info->ao_cu_mask = ao_cu_mask;
6991         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6992
6993         return 0;
6994 }
6995
6996 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
6997 {
6998         .type = AMD_IP_BLOCK_TYPE_GFX,
6999         .major = 9,
7000         .minor = 0,
7001         .rev = 0,
7002         .funcs = &gfx_v9_0_ip_funcs,
7003 };
This page took 0.463121 seconds and 4 git commands to generate.