]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
Merge tag 'drm-misc-next-fixes-2019-12-12' of git://anongit.freedesktop.org/drm/drm...
[linux.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "soc15.h"
33 #include "soc15d.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "amdgpu_pm.h"
36
37 #include "gc/gc_9_0_offset.h"
38 #include "gc/gc_9_0_sh_mask.h"
39
40 #include "vega10_enum.h"
41 #include "hdp/hdp_4_0_offset.h"
42
43 #include "soc15_common.h"
44 #include "clearstate_gfx9.h"
45 #include "v9_structs.h"
46
47 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
48
49 #include "amdgpu_ras.h"
50
51 #define GFX9_NUM_GFX_RINGS     1
52 #define GFX9_MEC_HPD_SIZE 4096
53 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
54 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
55
56 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
57 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
58 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
59 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
60 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
61 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
62
63 #define mmGCEA_PROBE_MAP                        0x070c
64 #define mmGCEA_PROBE_MAP_BASE_IDX               0
65
66 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
67 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
69 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
70 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
71 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
72
73 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
74 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
75 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
76 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
77 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
78 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
79
80 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
81 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
83 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
84 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
85 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
86
87 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
88 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
89 MODULE_FIRMWARE("amdgpu/raven_me.bin");
90 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
91 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
92 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
93
94 MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
95 MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
96 MODULE_FIRMWARE("amdgpu/picasso_me.bin");
97 MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
98 MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
99 MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
100 MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
101
102 MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
103 MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
104 MODULE_FIRMWARE("amdgpu/raven2_me.bin");
105 MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
106 MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
107 MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
108 MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
109
110 MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
111 MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
112 MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
113
114 MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
115 MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
116 MODULE_FIRMWARE("amdgpu/renoir_me.bin");
117 MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
118 MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
119 MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
120
121 #define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
122 #define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
123 #define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
124 #define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
125 #define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
126 #define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
127 #define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
128 #define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
129 #define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
130 #define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
131 #define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
132 #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0
133
134 struct ras_gfx_subblock_reg {
135         const char *name;
136         uint32_t hwip;
137         uint32_t inst;
138         uint32_t seg;
139         uint32_t reg_offset;
140         uint32_t sec_count_mask;
141         uint32_t sec_count_shift;
142         uint32_t ded_count_mask;
143         uint32_t ded_count_shift;
144 };
145
146 enum ta_ras_gfx_subblock {
147         /*CPC*/
148         TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
149         TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
150         TA_RAS_BLOCK__GFX_CPC_UCODE,
151         TA_RAS_BLOCK__GFX_DC_STATE_ME1,
152         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
153         TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
154         TA_RAS_BLOCK__GFX_DC_STATE_ME2,
155         TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
156         TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
157         TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
158         /* CPF*/
159         TA_RAS_BLOCK__GFX_CPF_INDEX_START,
160         TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
161         TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
162         TA_RAS_BLOCK__GFX_CPF_TAG,
163         TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
164         /* CPG*/
165         TA_RAS_BLOCK__GFX_CPG_INDEX_START,
166         TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
167         TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
168         TA_RAS_BLOCK__GFX_CPG_TAG,
169         TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
170         /* GDS*/
171         TA_RAS_BLOCK__GFX_GDS_INDEX_START,
172         TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
173         TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
174         TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
175         TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
176         TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
177         TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
178         /* SPI*/
179         TA_RAS_BLOCK__GFX_SPI_SR_MEM,
180         /* SQ*/
181         TA_RAS_BLOCK__GFX_SQ_INDEX_START,
182         TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
183         TA_RAS_BLOCK__GFX_SQ_LDS_D,
184         TA_RAS_BLOCK__GFX_SQ_LDS_I,
185         TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
186         TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
187         /* SQC (3 ranges)*/
188         TA_RAS_BLOCK__GFX_SQC_INDEX_START,
189         /* SQC range 0*/
190         TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
191         TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
192                 TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
193         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
194         TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
195         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
196         TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
197         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
198         TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
199         TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
200                 TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
201         /* SQC range 1*/
202         TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
203         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
204                 TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
205         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
206         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
207         TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
208         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
209         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
210         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
211         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
212         TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
213         TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
214                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
215         /* SQC range 2*/
216         TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
217         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
218                 TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
219         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
220         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
221         TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
222         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
223         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
224         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
225         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
226         TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
227         TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
228                 TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
229         TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
230         /* TA*/
231         TA_RAS_BLOCK__GFX_TA_INDEX_START,
232         TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
233         TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
234         TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
235         TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
236         TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
237         TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
238         /* TCA*/
239         TA_RAS_BLOCK__GFX_TCA_INDEX_START,
240         TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
241         TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
242         TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
243         /* TCC (5 sub-ranges)*/
244         TA_RAS_BLOCK__GFX_TCC_INDEX_START,
245         /* TCC range 0*/
246         TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
247         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
248         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
249         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
250         TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
251         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
252         TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
253         TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
254         TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
255         TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
256         /* TCC range 1*/
257         TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
258         TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
259         TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
260         TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
261                 TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
262         /* TCC range 2*/
263         TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
264         TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
265         TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
266         TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
267         TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
268         TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
269         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
270         TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
271         TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
272         TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
273                 TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
274         /* TCC range 3*/
275         TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
276         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
277         TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
278         TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
279                 TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
280         /* TCC range 4*/
281         TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
282         TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
283                 TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
284         TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
285         TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
286                 TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
287         TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
288         /* TCI*/
289         TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
290         /* TCP*/
291         TA_RAS_BLOCK__GFX_TCP_INDEX_START,
292         TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
293         TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
294         TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
295         TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
296         TA_RAS_BLOCK__GFX_TCP_DB_RAM,
297         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
298         TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
299         TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
300         /* TD*/
301         TA_RAS_BLOCK__GFX_TD_INDEX_START,
302         TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
303         TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
304         TA_RAS_BLOCK__GFX_TD_CS_FIFO,
305         TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
306         /* EA (3 sub-ranges)*/
307         TA_RAS_BLOCK__GFX_EA_INDEX_START,
308         /* EA range 0*/
309         TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
310         TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
311         TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
312         TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
313         TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
314         TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
315         TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
316         TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
317         TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
318         TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
319         /* EA range 1*/
320         TA_RAS_BLOCK__GFX_EA_INDEX1_START,
321         TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
322         TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
323         TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
324         TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
325         TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
326         TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
327         TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
328         TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
329         /* EA range 2*/
330         TA_RAS_BLOCK__GFX_EA_INDEX2_START,
331         TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
332         TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
333         TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
334         TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
335         TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
336         TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
337         /* UTC VM L2 bank*/
338         TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
339         /* UTC VM walker*/
340         TA_RAS_BLOCK__UTC_VML2_WALKER,
341         /* UTC ATC L2 2MB cache*/
342         TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
343         /* UTC ATC L2 4KB cache*/
344         TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
345         TA_RAS_BLOCK__GFX_MAX
346 };
347
348 struct ras_gfx_subblock {
349         unsigned char *name;
350         int ta_subblock;
351         int hw_supported_error_type;
352         int sw_supported_error_type;
353 };
354
355 #define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
356         [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
357                 #subblock,                                                     \
358                 TA_RAS_BLOCK__##subblock,                                      \
359                 ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
360                 (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
361         }
362
363 static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
364         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
365         AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
366         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
367         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
368         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
369         AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
370         AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
371         AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
372         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
373         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
374         AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
375         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
376         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
377         AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
378         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
379         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
380         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
381                              0),
382         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
383                              0),
384         AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
385         AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
386         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
387         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
388         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
389         AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
390         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
391         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
392                              0, 0),
393         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
394                              0),
395         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
396                              0, 0),
397         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
398                              0),
399         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
400                              0, 0),
401         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
402                              0),
403         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
404                              1),
405         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
406                              0, 0, 0),
407         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
408                              0),
409         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
410                              0),
411         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
412                              0),
413         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
414                              0),
415         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
416                              0),
417         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
418                              0, 0),
419         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
420                              0),
421         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
422                              0),
423         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
424                              0, 0, 0),
425         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
426                              0),
427         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
428                              0),
429         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
430                              0),
431         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
432                              0),
433         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
434                              0),
435         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
436                              0, 0),
437         AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
438                              0),
439         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
440         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
441         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
442         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
443         AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
444         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
445         AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
446         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
447         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
448                              1),
449         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
450                              1),
451         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
452                              1),
453         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
454                              0),
455         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
456                              0),
457         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
458         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
459         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
460         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
461         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
462         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
463         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
464         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
465         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
466         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
467         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
468         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
469                              0),
470         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
471         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
472                              0),
473         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
474                              0, 0),
475         AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
476                              0),
477         AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
478         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
479         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
480         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
481         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
482         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
483         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
484         AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
485         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
486         AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
487         AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
488         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
489         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
490         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
491         AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
492         AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
493         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
494         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
495         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
496         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
497         AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
498         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
499         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
500         AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
501         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
502         AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
503         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
504         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
505         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
506         AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
507         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
508         AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
509         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
510         AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
511 };
512
513 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
514 {
515         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
516         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
517         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
518         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
519         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
520         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
521         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
522         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
523         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
524         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
525         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
526         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
527         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
528         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
529         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
530         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
531         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
532         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
533         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
534         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
535 };
536
537 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
538 {
539         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
540         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
541         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
542         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
543         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
544         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
545         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
546         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
547         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
548         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
549         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
550         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
551         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
552         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
553         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
554         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
555         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
556         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
557 };
558
559 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
560 {
561         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
562         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
563         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
564         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
565         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
566         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
567         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
568         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
569         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
570         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
571         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
572 };
573
574 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
575 {
576         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
577         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
578         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
579         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
580         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
581         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
582         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
583         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
584         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
585         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
586         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
587         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
588         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
589         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
590         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
591         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
592         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
593         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
594         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
595         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
596         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
597         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
598         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
599         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
600 };
601
602 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
603 {
604         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
605         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
606         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
607         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
608         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
609         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
610         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
611 };
612
613 static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
614 {
615         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
616         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
617         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
618         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
619         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
620         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
621         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
622         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
623         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
624         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
625         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
626         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
627         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
628         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
629         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
630         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
631         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
632         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
633         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
634 };
635
636 static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
637 {
638         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
639         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
640         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
641         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
642         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
643         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
644         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
645         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
646         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
647         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
648         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
649         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
650 };
651
652 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
653 {
654         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
655         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
656         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
657 };
658
659 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
660 {
661         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
662         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
663         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
664         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
665         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
666         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
667         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
668         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
669         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
670         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
671         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
672         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
673         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
674         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
675         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
676         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
677 };
678
679 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
680 {
681         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
682         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
683         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
684         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
685         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
686         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
687         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
688         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
689         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
690         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
691         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
692         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
693         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
694 };
695
696 static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
697 {
698         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
699         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
700         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
701         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
702         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
703         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
704         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
705         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
706         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
707         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
708 };
709
710 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
711 {
712         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
713         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
714         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
715         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
716         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
717         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
718         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
719         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
720 };
721
722 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
723 {
724         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
725         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
726         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
727         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
728         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
729         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
730         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
731         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
732 };
733
734 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
735 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
736 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
737 #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041
738
739 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
740 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
741 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
742 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
743 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
744                                  struct amdgpu_cu_info *cu_info);
745 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
746 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
747 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
748 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
749 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
750                                           void *ras_error_status);
751 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
752                                      void *inject_if);
753
754 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
755 {
756         switch (adev->asic_type) {
757         case CHIP_VEGA10:
758                 soc15_program_register_sequence(adev,
759                                                 golden_settings_gc_9_0,
760                                                 ARRAY_SIZE(golden_settings_gc_9_0));
761                 soc15_program_register_sequence(adev,
762                                                 golden_settings_gc_9_0_vg10,
763                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
764                 break;
765         case CHIP_VEGA12:
766                 soc15_program_register_sequence(adev,
767                                                 golden_settings_gc_9_2_1,
768                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
769                 soc15_program_register_sequence(adev,
770                                                 golden_settings_gc_9_2_1_vg12,
771                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
772                 break;
773         case CHIP_VEGA20:
774                 soc15_program_register_sequence(adev,
775                                                 golden_settings_gc_9_0,
776                                                 ARRAY_SIZE(golden_settings_gc_9_0));
777                 soc15_program_register_sequence(adev,
778                                                 golden_settings_gc_9_0_vg20,
779                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
780                 break;
781         case CHIP_ARCTURUS:
782                 soc15_program_register_sequence(adev,
783                                                 golden_settings_gc_9_4_1_arct,
784                                                 ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
785                 break;
786         case CHIP_RAVEN:
787                 soc15_program_register_sequence(adev, golden_settings_gc_9_1,
788                                                 ARRAY_SIZE(golden_settings_gc_9_1));
789                 if (adev->rev_id >= 8)
790                         soc15_program_register_sequence(adev,
791                                                         golden_settings_gc_9_1_rv2,
792                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv2));
793                 else
794                         soc15_program_register_sequence(adev,
795                                                         golden_settings_gc_9_1_rv1,
796                                                         ARRAY_SIZE(golden_settings_gc_9_1_rv1));
797                 break;
798          case CHIP_RENOIR:
799                 soc15_program_register_sequence(adev,
800                                                 golden_settings_gc_9_1_rn,
801                                                 ARRAY_SIZE(golden_settings_gc_9_1_rn));
802                 return; /* for renoir, don't need common goldensetting */
803         default:
804                 break;
805         }
806
807         if (adev->asic_type != CHIP_ARCTURUS)
808                 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
809                                                 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
810 }
811
812 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
813 {
814         adev->gfx.scratch.num_reg = 8;
815         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
816         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
817 }
818
819 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
820                                        bool wc, uint32_t reg, uint32_t val)
821 {
822         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
823         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
824                                 WRITE_DATA_DST_SEL(0) |
825                                 (wc ? WR_CONFIRM : 0));
826         amdgpu_ring_write(ring, reg);
827         amdgpu_ring_write(ring, 0);
828         amdgpu_ring_write(ring, val);
829 }
830
831 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
832                                   int mem_space, int opt, uint32_t addr0,
833                                   uint32_t addr1, uint32_t ref, uint32_t mask,
834                                   uint32_t inv)
835 {
836         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
837         amdgpu_ring_write(ring,
838                                  /* memory (1) or register (0) */
839                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
840                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
841                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
842                                  WAIT_REG_MEM_ENGINE(eng_sel)));
843
844         if (mem_space)
845                 BUG_ON(addr0 & 0x3); /* Dword align */
846         amdgpu_ring_write(ring, addr0);
847         amdgpu_ring_write(ring, addr1);
848         amdgpu_ring_write(ring, ref);
849         amdgpu_ring_write(ring, mask);
850         amdgpu_ring_write(ring, inv); /* poll interval */
851 }
852
853 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
854 {
855         struct amdgpu_device *adev = ring->adev;
856         uint32_t scratch;
857         uint32_t tmp = 0;
858         unsigned i;
859         int r;
860
861         r = amdgpu_gfx_scratch_get(adev, &scratch);
862         if (r)
863                 return r;
864
865         WREG32(scratch, 0xCAFEDEAD);
866         r = amdgpu_ring_alloc(ring, 3);
867         if (r)
868                 goto error_free_scratch;
869
870         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
871         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
872         amdgpu_ring_write(ring, 0xDEADBEEF);
873         amdgpu_ring_commit(ring);
874
875         for (i = 0; i < adev->usec_timeout; i++) {
876                 tmp = RREG32(scratch);
877                 if (tmp == 0xDEADBEEF)
878                         break;
879                 udelay(1);
880         }
881
882         if (i >= adev->usec_timeout)
883                 r = -ETIMEDOUT;
884
885 error_free_scratch:
886         amdgpu_gfx_scratch_free(adev, scratch);
887         return r;
888 }
889
890 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
891 {
892         struct amdgpu_device *adev = ring->adev;
893         struct amdgpu_ib ib;
894         struct dma_fence *f = NULL;
895
896         unsigned index;
897         uint64_t gpu_addr;
898         uint32_t tmp;
899         long r;
900
901         r = amdgpu_device_wb_get(adev, &index);
902         if (r)
903                 return r;
904
905         gpu_addr = adev->wb.gpu_addr + (index * 4);
906         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
907         memset(&ib, 0, sizeof(ib));
908         r = amdgpu_ib_get(adev, NULL, 16, &ib);
909         if (r)
910                 goto err1;
911
912         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
913         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
914         ib.ptr[2] = lower_32_bits(gpu_addr);
915         ib.ptr[3] = upper_32_bits(gpu_addr);
916         ib.ptr[4] = 0xDEADBEEF;
917         ib.length_dw = 5;
918
919         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
920         if (r)
921                 goto err2;
922
923         r = dma_fence_wait_timeout(f, false, timeout);
924         if (r == 0) {
925                 r = -ETIMEDOUT;
926                 goto err2;
927         } else if (r < 0) {
928                 goto err2;
929         }
930
931         tmp = adev->wb.wb[index];
932         if (tmp == 0xDEADBEEF)
933                 r = 0;
934         else
935                 r = -EINVAL;
936
937 err2:
938         amdgpu_ib_free(adev, &ib, NULL);
939         dma_fence_put(f);
940 err1:
941         amdgpu_device_wb_free(adev, index);
942         return r;
943 }
944
945
946 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
947 {
948         release_firmware(adev->gfx.pfp_fw);
949         adev->gfx.pfp_fw = NULL;
950         release_firmware(adev->gfx.me_fw);
951         adev->gfx.me_fw = NULL;
952         release_firmware(adev->gfx.ce_fw);
953         adev->gfx.ce_fw = NULL;
954         release_firmware(adev->gfx.rlc_fw);
955         adev->gfx.rlc_fw = NULL;
956         release_firmware(adev->gfx.mec_fw);
957         adev->gfx.mec_fw = NULL;
958         release_firmware(adev->gfx.mec2_fw);
959         adev->gfx.mec2_fw = NULL;
960
961         kfree(adev->gfx.rlc.register_list_format);
962 }
963
964 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
965 {
966         const struct rlc_firmware_header_v2_1 *rlc_hdr;
967
968         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
969         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
970         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
971         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
972         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
973         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
974         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
975         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
976         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
977         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
978         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
979         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
980         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
981         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
982                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
983 }
984
985 static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
986 {
987         adev->gfx.me_fw_write_wait = false;
988         adev->gfx.mec_fw_write_wait = false;
989
990         if ((adev->gfx.mec_fw_version < 0x000001a5) ||
991             (adev->gfx.mec_feature_version < 46) ||
992             (adev->gfx.pfp_fw_version < 0x000000b7) ||
993             (adev->gfx.pfp_feature_version < 46))
994                 DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \
995                               GRBM requires 1-cycle delay in cp firmware\n");
996
997         switch (adev->asic_type) {
998         case CHIP_VEGA10:
999                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1000                     (adev->gfx.me_feature_version >= 42) &&
1001                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1002                     (adev->gfx.pfp_feature_version >= 42))
1003                         adev->gfx.me_fw_write_wait = true;
1004
1005                 if ((adev->gfx.mec_fw_version >=  0x00000193) &&
1006                     (adev->gfx.mec_feature_version >= 42))
1007                         adev->gfx.mec_fw_write_wait = true;
1008                 break;
1009         case CHIP_VEGA12:
1010                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1011                     (adev->gfx.me_feature_version >= 44) &&
1012                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1013                     (adev->gfx.pfp_feature_version >= 44))
1014                         adev->gfx.me_fw_write_wait = true;
1015
1016                 if ((adev->gfx.mec_fw_version >=  0x00000196) &&
1017                     (adev->gfx.mec_feature_version >= 44))
1018                         adev->gfx.mec_fw_write_wait = true;
1019                 break;
1020         case CHIP_VEGA20:
1021                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1022                     (adev->gfx.me_feature_version >= 44) &&
1023                     (adev->gfx.pfp_fw_version >=  0x000000b2) &&
1024                     (adev->gfx.pfp_feature_version >= 44))
1025                         adev->gfx.me_fw_write_wait = true;
1026
1027                 if ((adev->gfx.mec_fw_version >=  0x00000197) &&
1028                     (adev->gfx.mec_feature_version >= 44))
1029                         adev->gfx.mec_fw_write_wait = true;
1030                 break;
1031         case CHIP_RAVEN:
1032                 if ((adev->gfx.me_fw_version >= 0x0000009c) &&
1033                     (adev->gfx.me_feature_version >= 42) &&
1034                     (adev->gfx.pfp_fw_version >=  0x000000b1) &&
1035                     (adev->gfx.pfp_feature_version >= 42))
1036                         adev->gfx.me_fw_write_wait = true;
1037
1038                 if ((adev->gfx.mec_fw_version >=  0x00000192) &&
1039                     (adev->gfx.mec_feature_version >= 42))
1040                         adev->gfx.mec_fw_write_wait = true;
1041                 break;
1042         default:
1043                 break;
1044         }
1045 }
1046
1047 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
1048 {
1049         switch (adev->asic_type) {
1050         case CHIP_VEGA10:
1051         case CHIP_VEGA12:
1052         case CHIP_VEGA20:
1053                 break;
1054         case CHIP_RAVEN:
1055                 /* Disable GFXOFF on original raven.  There are combinations
1056                  * of sbios and platforms that are not stable.
1057                  */
1058                 if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
1059                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1060                 else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
1061                          &&((adev->gfx.rlc_fw_version != 106 &&
1062                              adev->gfx.rlc_fw_version < 531) ||
1063                             (adev->gfx.rlc_fw_version == 53815) ||
1064                             (adev->gfx.rlc_feature_version < 1) ||
1065                             !adev->gfx.rlc.is_rlc_v2_1))
1066                         adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
1067
1068                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1069                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1070                                 AMD_PG_SUPPORT_CP |
1071                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1072                 break;
1073         case CHIP_RENOIR:
1074                 if (adev->pm.pp_feature & PP_GFXOFF_MASK)
1075                         adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
1076                                 AMD_PG_SUPPORT_CP |
1077                                 AMD_PG_SUPPORT_RLC_SMU_HS;
1078                 break;
1079         default:
1080                 break;
1081         }
1082 }
1083
1084 static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
1085                                           const char *chip_name)
1086 {
1087         char fw_name[30];
1088         int err;
1089         struct amdgpu_firmware_info *info = NULL;
1090         const struct common_firmware_header *header = NULL;
1091         const struct gfx_firmware_header_v1_0 *cp_hdr;
1092
1093         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
1094         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
1095         if (err)
1096                 goto out;
1097         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
1098         if (err)
1099                 goto out;
1100         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1101         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1102         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1103
1104         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1105         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
1106         if (err)
1107                 goto out;
1108         err = amdgpu_ucode_validate(adev->gfx.me_fw);
1109         if (err)
1110                 goto out;
1111         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1112         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1113         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1114
1115         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1116         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
1117         if (err)
1118                 goto out;
1119         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
1120         if (err)
1121                 goto out;
1122         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1123         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1124         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1125
1126         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1127                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1128                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1129                 info->fw = adev->gfx.pfp_fw;
1130                 header = (const struct common_firmware_header *)info->fw->data;
1131                 adev->firmware.fw_size +=
1132                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1133
1134                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1135                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1136                 info->fw = adev->gfx.me_fw;
1137                 header = (const struct common_firmware_header *)info->fw->data;
1138                 adev->firmware.fw_size +=
1139                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1140
1141                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1142                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1143                 info->fw = adev->gfx.ce_fw;
1144                 header = (const struct common_firmware_header *)info->fw->data;
1145                 adev->firmware.fw_size +=
1146                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1147         }
1148
1149 out:
1150         if (err) {
1151                 dev_err(adev->dev,
1152                         "gfx9: Failed to load firmware \"%s\"\n",
1153                         fw_name);
1154                 release_firmware(adev->gfx.pfp_fw);
1155                 adev->gfx.pfp_fw = NULL;
1156                 release_firmware(adev->gfx.me_fw);
1157                 adev->gfx.me_fw = NULL;
1158                 release_firmware(adev->gfx.ce_fw);
1159                 adev->gfx.ce_fw = NULL;
1160         }
1161         return err;
1162 }
1163
1164 static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
1165                                           const char *chip_name)
1166 {
1167         char fw_name[30];
1168         int err;
1169         struct amdgpu_firmware_info *info = NULL;
1170         const struct common_firmware_header *header = NULL;
1171         const struct rlc_firmware_header_v2_0 *rlc_hdr;
1172         unsigned int *tmp = NULL;
1173         unsigned int i = 0;
1174         uint16_t version_major;
1175         uint16_t version_minor;
1176         uint32_t smu_version;
1177
1178         /*
1179          * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
1180          * instead of picasso_rlc.bin.
1181          * Judgment method:
1182          * PCO AM4: revision >= 0xC8 && revision <= 0xCF
1183          *          or revision >= 0xD8 && revision <= 0xDF
1184          * otherwise is PCO FP5
1185          */
1186         if (!strcmp(chip_name, "picasso") &&
1187                 (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
1188                 ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
1189                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
1190         else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
1191                 (smu_version >= 0x41e2b))
1192                 /**
1193                 *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
1194                 */
1195                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
1196         else
1197                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1198         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
1199         if (err)
1200                 goto out;
1201         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
1202         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1203
1204         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1205         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1206         if (version_major == 2 && version_minor == 1)
1207                 adev->gfx.rlc.is_rlc_v2_1 = true;
1208
1209         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1210         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1211         adev->gfx.rlc.save_and_restore_offset =
1212                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1213         adev->gfx.rlc.clear_state_descriptor_offset =
1214                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1215         adev->gfx.rlc.avail_scratch_ram_locations =
1216                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1217         adev->gfx.rlc.reg_restore_list_size =
1218                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1219         adev->gfx.rlc.reg_list_format_start =
1220                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1221         adev->gfx.rlc.reg_list_format_separate_start =
1222                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1223         adev->gfx.rlc.starting_offsets_start =
1224                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1225         adev->gfx.rlc.reg_list_format_size_bytes =
1226                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1227         adev->gfx.rlc.reg_list_size_bytes =
1228                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1229         adev->gfx.rlc.register_list_format =
1230                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1231                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1232         if (!adev->gfx.rlc.register_list_format) {
1233                 err = -ENOMEM;
1234                 goto out;
1235         }
1236
1237         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1238                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1239         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1240                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1241
1242         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1243
1244         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1245                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1246         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1247                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1248
1249         if (adev->gfx.rlc.is_rlc_v2_1)
1250                 gfx_v9_0_init_rlc_ext_microcode(adev);
1251
1252         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1253                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1254                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1255                 info->fw = adev->gfx.rlc_fw;
1256                 header = (const struct common_firmware_header *)info->fw->data;
1257                 adev->firmware.fw_size +=
1258                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1259
1260                 if (adev->gfx.rlc.is_rlc_v2_1 &&
1261                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
1262                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
1263                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
1264                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
1265                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
1266                         info->fw = adev->gfx.rlc_fw;
1267                         adev->firmware.fw_size +=
1268                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
1269
1270                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
1271                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
1272                         info->fw = adev->gfx.rlc_fw;
1273                         adev->firmware.fw_size +=
1274                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
1275
1276                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
1277                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
1278                         info->fw = adev->gfx.rlc_fw;
1279                         adev->firmware.fw_size +=
1280                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
1281                 }
1282         }
1283
1284 out:
1285         if (err) {
1286                 dev_err(adev->dev,
1287                         "gfx9: Failed to load firmware \"%s\"\n",
1288                         fw_name);
1289                 release_firmware(adev->gfx.rlc_fw);
1290                 adev->gfx.rlc_fw = NULL;
1291         }
1292         return err;
1293 }
1294
1295 static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
1296                                           const char *chip_name)
1297 {
1298         char fw_name[30];
1299         int err;
1300         struct amdgpu_firmware_info *info = NULL;
1301         const struct common_firmware_header *header = NULL;
1302         const struct gfx_firmware_header_v1_0 *cp_hdr;
1303
1304         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1305         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
1306         if (err)
1307                 goto out;
1308         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
1309         if (err)
1310                 goto out;
1311         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1312         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1313         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1314
1315
1316         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1317         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
1318         if (!err) {
1319                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
1320                 if (err)
1321                         goto out;
1322                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1323                 adev->gfx.mec2_fw->data;
1324                 adev->gfx.mec2_fw_version =
1325                 le32_to_cpu(cp_hdr->header.ucode_version);
1326                 adev->gfx.mec2_feature_version =
1327                 le32_to_cpu(cp_hdr->ucode_feature_version);
1328         } else {
1329                 err = 0;
1330                 adev->gfx.mec2_fw = NULL;
1331         }
1332
1333         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1334                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1335                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1336                 info->fw = adev->gfx.mec_fw;
1337                 header = (const struct common_firmware_header *)info->fw->data;
1338                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1339                 adev->firmware.fw_size +=
1340                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1341
1342                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
1343                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
1344                 info->fw = adev->gfx.mec_fw;
1345                 adev->firmware.fw_size +=
1346                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1347
1348                 if (adev->gfx.mec2_fw) {
1349                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1350                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1351                         info->fw = adev->gfx.mec2_fw;
1352                         header = (const struct common_firmware_header *)info->fw->data;
1353                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
1354                         adev->firmware.fw_size +=
1355                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
1356
1357                         /* TODO: Determine if MEC2 JT FW loading can be removed
1358                                  for all GFX V9 asic and above */
1359                         if (adev->asic_type != CHIP_ARCTURUS &&
1360                             adev->asic_type != CHIP_RENOIR) {
1361                                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
1362                                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
1363                                 info->fw = adev->gfx.mec2_fw;
1364                                 adev->firmware.fw_size +=
1365                                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4,
1366                                         PAGE_SIZE);
1367                         }
1368                 }
1369         }
1370
1371 out:
1372         gfx_v9_0_check_if_need_gfxoff(adev);
1373         gfx_v9_0_check_fw_write_wait(adev);
1374         if (err) {
1375                 dev_err(adev->dev,
1376                         "gfx9: Failed to load firmware \"%s\"\n",
1377                         fw_name);
1378                 release_firmware(adev->gfx.mec_fw);
1379                 adev->gfx.mec_fw = NULL;
1380                 release_firmware(adev->gfx.mec2_fw);
1381                 adev->gfx.mec2_fw = NULL;
1382         }
1383         return err;
1384 }
1385
1386 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
1387 {
1388         const char *chip_name;
1389         int r;
1390
1391         DRM_DEBUG("\n");
1392
1393         switch (adev->asic_type) {
1394         case CHIP_VEGA10:
1395                 chip_name = "vega10";
1396                 break;
1397         case CHIP_VEGA12:
1398                 chip_name = "vega12";
1399                 break;
1400         case CHIP_VEGA20:
1401                 chip_name = "vega20";
1402                 break;
1403         case CHIP_RAVEN:
1404                 if (adev->rev_id >= 8)
1405                         chip_name = "raven2";
1406                 else if (adev->pdev->device == 0x15d8)
1407                         chip_name = "picasso";
1408                 else
1409                         chip_name = "raven";
1410                 break;
1411         case CHIP_ARCTURUS:
1412                 chip_name = "arcturus";
1413                 break;
1414         case CHIP_RENOIR:
1415                 chip_name = "renoir";
1416                 break;
1417         default:
1418                 BUG();
1419         }
1420
1421         /* No CPG in Arcturus */
1422         if (adev->asic_type != CHIP_ARCTURUS) {
1423                 r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
1424                 if (r)
1425                         return r;
1426         }
1427
1428         r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
1429         if (r)
1430                 return r;
1431
1432         r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
1433         if (r)
1434                 return r;
1435
1436         return r;
1437 }
1438
1439 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
1440 {
1441         u32 count = 0;
1442         const struct cs_section_def *sect = NULL;
1443         const struct cs_extent_def *ext = NULL;
1444
1445         /* begin clear state */
1446         count += 2;
1447         /* context control state */
1448         count += 3;
1449
1450         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
1451                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1452                         if (sect->id == SECT_CONTEXT)
1453                                 count += 2 + ext->reg_count;
1454                         else
1455                                 return 0;
1456                 }
1457         }
1458
1459         /* end clear state */
1460         count += 2;
1461         /* clear state */
1462         count += 2;
1463
1464         return count;
1465 }
1466
1467 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
1468                                     volatile u32 *buffer)
1469 {
1470         u32 count = 0, i;
1471         const struct cs_section_def *sect = NULL;
1472         const struct cs_extent_def *ext = NULL;
1473
1474         if (adev->gfx.rlc.cs_data == NULL)
1475                 return;
1476         if (buffer == NULL)
1477                 return;
1478
1479         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1480         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1481
1482         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1483         buffer[count++] = cpu_to_le32(0x80000000);
1484         buffer[count++] = cpu_to_le32(0x80000000);
1485
1486         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1487                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1488                         if (sect->id == SECT_CONTEXT) {
1489                                 buffer[count++] =
1490                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1491                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1492                                                 PACKET3_SET_CONTEXT_REG_START);
1493                                 for (i = 0; i < ext->reg_count; i++)
1494                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1495                         } else {
1496                                 return;
1497                         }
1498                 }
1499         }
1500
1501         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1502         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1503
1504         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1505         buffer[count++] = cpu_to_le32(0);
1506 }
1507
1508 static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
1509 {
1510         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
1511         uint32_t pg_always_on_cu_num = 2;
1512         uint32_t always_on_cu_num;
1513         uint32_t i, j, k;
1514         uint32_t mask, cu_bitmap, counter;
1515
1516         if (adev->flags & AMD_IS_APU)
1517                 always_on_cu_num = 4;
1518         else if (adev->asic_type == CHIP_VEGA12)
1519                 always_on_cu_num = 8;
1520         else
1521                 always_on_cu_num = 12;
1522
1523         mutex_lock(&adev->grbm_idx_mutex);
1524         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1525                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1526                         mask = 1;
1527                         cu_bitmap = 0;
1528                         counter = 0;
1529                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1530
1531                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
1532                                 if (cu_info->bitmap[i][j] & mask) {
1533                                         if (counter == pg_always_on_cu_num)
1534                                                 WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
1535                                         if (counter < always_on_cu_num)
1536                                                 cu_bitmap |= mask;
1537                                         else
1538                                                 break;
1539                                         counter++;
1540                                 }
1541                                 mask <<= 1;
1542                         }
1543
1544                         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
1545                         cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
1546                 }
1547         }
1548         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1549         mutex_unlock(&adev->grbm_idx_mutex);
1550 }
1551
1552 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
1553 {
1554         uint32_t data;
1555
1556         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1557         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1558         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
1559         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1560         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
1561
1562         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1563         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1564
1565         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1566         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
1567
1568         mutex_lock(&adev->grbm_idx_mutex);
1569         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1570         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1571         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1572
1573         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1574         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1575         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1576         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1577         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1578
1579         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1580         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1581         data &= 0x0000FFFF;
1582         data |= 0x00C00000;
1583         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1584
1585         /*
1586          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
1587          * programmed in gfx_v9_0_init_always_on_cu_mask()
1588          */
1589
1590         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1591          * but used for RLC_LB_CNTL configuration */
1592         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1593         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1594         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1595         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1596         mutex_unlock(&adev->grbm_idx_mutex);
1597
1598         gfx_v9_0_init_always_on_cu_mask(adev);
1599 }
1600
1601 static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
1602 {
1603         uint32_t data;
1604
1605         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
1606         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
1607         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
1608         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
1609         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));
1610
1611         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
1612         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
1613
1614         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
1615         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);
1616
1617         mutex_lock(&adev->grbm_idx_mutex);
1618         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
1619         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1620         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
1621
1622         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
1623         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
1624         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
1625         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
1626         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
1627
1628         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
1629         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
1630         data &= 0x0000FFFF;
1631         data |= 0x00C00000;
1632         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
1633
1634         /*
1635          * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
1636          * programmed in gfx_v9_0_init_always_on_cu_mask()
1637          */
1638
1639         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
1640          * but used for RLC_LB_CNTL configuration */
1641         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
1642         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
1643         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
1644         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
1645         mutex_unlock(&adev->grbm_idx_mutex);
1646
1647         gfx_v9_0_init_always_on_cu_mask(adev);
1648 }
1649
1650 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
1651 {
1652         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
1653 }
1654
1655 static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
1656 {
1657         return 5;
1658 }
1659
1660 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
1661 {
1662         const struct cs_section_def *cs_data;
1663         int r;
1664
1665         adev->gfx.rlc.cs_data = gfx9_cs_data;
1666
1667         cs_data = adev->gfx.rlc.cs_data;
1668
1669         if (cs_data) {
1670                 /* init clear state block */
1671                 r = amdgpu_gfx_rlc_init_csb(adev);
1672                 if (r)
1673                         return r;
1674         }
1675
1676         if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
1677                 /* TODO: double check the cp_table_size for RV */
1678                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1679                 r = amdgpu_gfx_rlc_init_cpt(adev);
1680                 if (r)
1681                         return r;
1682         }
1683
1684         switch (adev->asic_type) {
1685         case CHIP_RAVEN:
1686                 gfx_v9_0_init_lbpw(adev);
1687                 break;
1688         case CHIP_VEGA20:
1689                 gfx_v9_4_init_lbpw(adev);
1690                 break;
1691         default:
1692                 break;
1693         }
1694
1695         return 0;
1696 }
1697
1698 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1699 {
1700         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1701         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1702 }
1703
1704 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1705 {
1706         int r;
1707         u32 *hpd;
1708         const __le32 *fw_data;
1709         unsigned fw_size;
1710         u32 *fw;
1711         size_t mec_hpd_size;
1712
1713         const struct gfx_firmware_header_v1_0 *mec_hdr;
1714
1715         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1716
1717         /* take ownership of the relevant compute queues */
1718         amdgpu_gfx_compute_queue_acquire(adev);
1719         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1720
1721         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1722                                       AMDGPU_GEM_DOMAIN_VRAM,
1723                                       &adev->gfx.mec.hpd_eop_obj,
1724                                       &adev->gfx.mec.hpd_eop_gpu_addr,
1725                                       (void **)&hpd);
1726         if (r) {
1727                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1728                 gfx_v9_0_mec_fini(adev);
1729                 return r;
1730         }
1731
1732         memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1733
1734         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1735         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1736
1737         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1738
1739         fw_data = (const __le32 *)
1740                 (adev->gfx.mec_fw->data +
1741                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1742         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1743
1744         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1745                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1746                                       &adev->gfx.mec.mec_fw_obj,
1747                                       &adev->gfx.mec.mec_fw_gpu_addr,
1748                                       (void **)&fw);
1749         if (r) {
1750                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1751                 gfx_v9_0_mec_fini(adev);
1752                 return r;
1753         }
1754
1755         memcpy(fw, fw_data, fw_size);
1756
1757         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1758         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1759
1760         return 0;
1761 }
1762
1763 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1764 {
1765         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1766                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1767                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1768                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1769                 (SQ_IND_INDEX__FORCE_READ_MASK));
1770         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1771 }
1772
1773 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1774                            uint32_t wave, uint32_t thread,
1775                            uint32_t regno, uint32_t num, uint32_t *out)
1776 {
1777         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1778                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1779                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1780                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1781                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1782                 (SQ_IND_INDEX__FORCE_READ_MASK) |
1783                 (SQ_IND_INDEX__AUTO_INCR_MASK));
1784         while (num--)
1785                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1786 }
1787
1788 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1789 {
1790         /* type 1 wave data */
1791         dst[(*no_fields)++] = 1;
1792         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1793         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1794         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1795         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1796         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1797         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1798         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1799         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1800         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1801         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1802         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1803         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1804         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1805         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1806 }
1807
1808 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1809                                      uint32_t wave, uint32_t start,
1810                                      uint32_t size, uint32_t *dst)
1811 {
1812         wave_read_regs(
1813                 adev, simd, wave, 0,
1814                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1815 }
1816
1817 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1818                                      uint32_t wave, uint32_t thread,
1819                                      uint32_t start, uint32_t size,
1820                                      uint32_t *dst)
1821 {
1822         wave_read_regs(
1823                 adev, simd, wave, thread,
1824                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1825 }
1826
1827 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1828                                   u32 me, u32 pipe, u32 q, u32 vm)
1829 {
1830         soc15_grbm_select(adev, me, pipe, q, vm);
1831 }
1832
1833 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1834         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1835         .select_se_sh = &gfx_v9_0_select_se_sh,
1836         .read_wave_data = &gfx_v9_0_read_wave_data,
1837         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1838         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1839         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
1840         .ras_error_inject = &gfx_v9_0_ras_error_inject,
1841         .query_ras_error_count = &gfx_v9_0_query_ras_error_count
1842 };
1843
1844 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1845 {
1846         u32 gb_addr_config;
1847         int err;
1848
1849         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1850
1851         switch (adev->asic_type) {
1852         case CHIP_VEGA10:
1853                 adev->gfx.config.max_hw_contexts = 8;
1854                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1855                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1856                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1857                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1858                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1859                 break;
1860         case CHIP_VEGA12:
1861                 adev->gfx.config.max_hw_contexts = 8;
1862                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1863                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1864                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1865                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1866                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1867                 DRM_INFO("fix gfx.config for vega12\n");
1868                 break;
1869         case CHIP_VEGA20:
1870                 adev->gfx.config.max_hw_contexts = 8;
1871                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1872                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1873                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1874                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1875                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1876                 gb_addr_config &= ~0xf3e777ff;
1877                 gb_addr_config |= 0x22014042;
1878                 /* check vbios table if gpu info is not available */
1879                 err = amdgpu_atomfirmware_get_gfx_info(adev);
1880                 if (err)
1881                         return err;
1882                 break;
1883         case CHIP_RAVEN:
1884                 adev->gfx.config.max_hw_contexts = 8;
1885                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1886                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1887                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1888                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1889                 if (adev->rev_id >= 8)
1890                         gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
1891                 else
1892                         gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1893                 break;
1894         case CHIP_ARCTURUS:
1895                 adev->gfx.config.max_hw_contexts = 8;
1896                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1897                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1898                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1899                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1900                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1901                 gb_addr_config &= ~0xf3e777ff;
1902                 gb_addr_config |= 0x22014042;
1903                 break;
1904         case CHIP_RENOIR:
1905                 adev->gfx.config.max_hw_contexts = 8;
1906                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1907                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1908                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1909                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1910                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1911                 gb_addr_config &= ~0xf3e777ff;
1912                 gb_addr_config |= 0x22010042;
1913                 break;
1914         default:
1915                 BUG();
1916                 break;
1917         }
1918
1919         adev->gfx.config.gb_addr_config = gb_addr_config;
1920
1921         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1922                         REG_GET_FIELD(
1923                                         adev->gfx.config.gb_addr_config,
1924                                         GB_ADDR_CONFIG,
1925                                         NUM_PIPES);
1926
1927         adev->gfx.config.max_tile_pipes =
1928                 adev->gfx.config.gb_addr_config_fields.num_pipes;
1929
1930         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1931                         REG_GET_FIELD(
1932                                         adev->gfx.config.gb_addr_config,
1933                                         GB_ADDR_CONFIG,
1934                                         NUM_BANKS);
1935         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1936                         REG_GET_FIELD(
1937                                         adev->gfx.config.gb_addr_config,
1938                                         GB_ADDR_CONFIG,
1939                                         MAX_COMPRESSED_FRAGS);
1940         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1941                         REG_GET_FIELD(
1942                                         adev->gfx.config.gb_addr_config,
1943                                         GB_ADDR_CONFIG,
1944                                         NUM_RB_PER_SE);
1945         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1946                         REG_GET_FIELD(
1947                                         adev->gfx.config.gb_addr_config,
1948                                         GB_ADDR_CONFIG,
1949                                         NUM_SHADER_ENGINES);
1950         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1951                         REG_GET_FIELD(
1952                                         adev->gfx.config.gb_addr_config,
1953                                         GB_ADDR_CONFIG,
1954                                         PIPE_INTERLEAVE_SIZE));
1955
1956         return 0;
1957 }
1958
1959 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1960                                       int mec, int pipe, int queue)
1961 {
1962         int r;
1963         unsigned irq_type;
1964         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1965
1966         ring = &adev->gfx.compute_ring[ring_id];
1967
1968         /* mec0 is me1 */
1969         ring->me = mec + 1;
1970         ring->pipe = pipe;
1971         ring->queue = queue;
1972
1973         ring->ring_obj = NULL;
1974         ring->use_doorbell = true;
1975         ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1976         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1977                                 + (ring_id * GFX9_MEC_HPD_SIZE);
1978         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1979
1980         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1981                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1982                 + ring->pipe;
1983
1984         /* type-2 packets are deprecated on MEC, use type-3 instead */
1985         r = amdgpu_ring_init(adev, ring, 1024,
1986                              &adev->gfx.eop_irq, irq_type);
1987         if (r)
1988                 return r;
1989
1990
1991         return 0;
1992 }
1993
1994 static int gfx_v9_0_sw_init(void *handle)
1995 {
1996         int i, j, k, r, ring_id;
1997         struct amdgpu_ring *ring;
1998         struct amdgpu_kiq *kiq;
1999         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2000
2001         switch (adev->asic_type) {
2002         case CHIP_VEGA10:
2003         case CHIP_VEGA12:
2004         case CHIP_VEGA20:
2005         case CHIP_RAVEN:
2006         case CHIP_ARCTURUS:
2007         case CHIP_RENOIR:
2008                 adev->gfx.mec.num_mec = 2;
2009                 break;
2010         default:
2011                 adev->gfx.mec.num_mec = 1;
2012                 break;
2013         }
2014
2015         adev->gfx.mec.num_pipe_per_mec = 4;
2016         adev->gfx.mec.num_queue_per_pipe = 8;
2017
2018         /* EOP Event */
2019         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
2020         if (r)
2021                 return r;
2022
2023         /* Privileged reg */
2024         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
2025                               &adev->gfx.priv_reg_irq);
2026         if (r)
2027                 return r;
2028
2029         /* Privileged inst */
2030         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
2031                               &adev->gfx.priv_inst_irq);
2032         if (r)
2033                 return r;
2034
2035         /* ECC error */
2036         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
2037                               &adev->gfx.cp_ecc_error_irq);
2038         if (r)
2039                 return r;
2040
2041         /* FUE error */
2042         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
2043                               &adev->gfx.cp_ecc_error_irq);
2044         if (r)
2045                 return r;
2046
2047         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
2048
2049         gfx_v9_0_scratch_init(adev);
2050
2051         r = gfx_v9_0_init_microcode(adev);
2052         if (r) {
2053                 DRM_ERROR("Failed to load gfx firmware!\n");
2054                 return r;
2055         }
2056
2057         r = adev->gfx.rlc.funcs->init(adev);
2058         if (r) {
2059                 DRM_ERROR("Failed to init rlc BOs!\n");
2060                 return r;
2061         }
2062
2063         r = gfx_v9_0_mec_init(adev);
2064         if (r) {
2065                 DRM_ERROR("Failed to init MEC BOs!\n");
2066                 return r;
2067         }
2068
2069         /* set up the gfx ring */
2070         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
2071                 ring = &adev->gfx.gfx_ring[i];
2072                 ring->ring_obj = NULL;
2073                 if (!i)
2074                         sprintf(ring->name, "gfx");
2075                 else
2076                         sprintf(ring->name, "gfx_%d", i);
2077                 ring->use_doorbell = true;
2078                 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
2079                 r = amdgpu_ring_init(adev, ring, 1024,
2080                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
2081                 if (r)
2082                         return r;
2083         }
2084
2085         /* set up the compute queues - allocate horizontally across pipes */
2086         ring_id = 0;
2087         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2088                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2089                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2090                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
2091                                         continue;
2092
2093                                 r = gfx_v9_0_compute_ring_init(adev,
2094                                                                ring_id,
2095                                                                i, k, j);
2096                                 if (r)
2097                                         return r;
2098
2099                                 ring_id++;
2100                         }
2101                 }
2102         }
2103
2104         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
2105         if (r) {
2106                 DRM_ERROR("Failed to init KIQ BOs!\n");
2107                 return r;
2108         }
2109
2110         kiq = &adev->gfx.kiq;
2111         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
2112         if (r)
2113                 return r;
2114
2115         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
2116         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
2117         if (r)
2118                 return r;
2119
2120         adev->gfx.ce_ram_size = 0x8000;
2121
2122         r = gfx_v9_0_gpu_early_init(adev);
2123         if (r)
2124                 return r;
2125
2126         return 0;
2127 }
2128
2129
2130 static int gfx_v9_0_sw_fini(void *handle)
2131 {
2132         int i;
2133         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2134
2135         amdgpu_gfx_ras_fini(adev);
2136
2137         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2138                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2139         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2140                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2141
2142         amdgpu_gfx_mqd_sw_fini(adev);
2143         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
2144         amdgpu_gfx_kiq_fini(adev);
2145
2146         gfx_v9_0_mec_fini(adev);
2147         amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
2148         if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) {
2149                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2150                                 &adev->gfx.rlc.cp_table_gpu_addr,
2151                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2152         }
2153         gfx_v9_0_free_microcode(adev);
2154
2155         return 0;
2156 }
2157
2158
2159 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
2160 {
2161         /* TODO */
2162 }
2163
2164 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
2165 {
2166         u32 data;
2167
2168         if (instance == 0xffffffff)
2169                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
2170         else
2171                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
2172
2173         if (se_num == 0xffffffff)
2174                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
2175         else
2176                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
2177
2178         if (sh_num == 0xffffffff)
2179                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
2180         else
2181                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
2182
2183         WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
2184 }
2185
2186 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
2187 {
2188         u32 data, mask;
2189
2190         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
2191         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
2192
2193         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
2194         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
2195
2196         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
2197                                          adev->gfx.config.max_sh_per_se);
2198
2199         return (~data) & mask;
2200 }
2201
2202 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
2203 {
2204         int i, j;
2205         u32 data;
2206         u32 active_rbs = 0;
2207         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
2208                                         adev->gfx.config.max_sh_per_se;
2209
2210         mutex_lock(&adev->grbm_idx_mutex);
2211         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2212                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2213                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2214                         data = gfx_v9_0_get_rb_active_bitmap(adev);
2215                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
2216                                                rb_bitmap_width_per_sh);
2217                 }
2218         }
2219         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2220         mutex_unlock(&adev->grbm_idx_mutex);
2221
2222         adev->gfx.config.backend_enable_mask = active_rbs;
2223         adev->gfx.config.num_rbs = hweight32(active_rbs);
2224 }
2225
2226 #define DEFAULT_SH_MEM_BASES    (0x6000)
2227 #define FIRST_COMPUTE_VMID      (8)
2228 #define LAST_COMPUTE_VMID       (16)
2229 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
2230 {
2231         int i;
2232         uint32_t sh_mem_config;
2233         uint32_t sh_mem_bases;
2234
2235         /*
2236          * Configure apertures:
2237          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
2238          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
2239          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
2240          */
2241         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2242
2243         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
2244                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2245                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2246
2247         mutex_lock(&adev->srbm_mutex);
2248         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2249                 soc15_grbm_select(adev, 0, 0, 0, i);
2250                 /* CP and shaders */
2251                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
2252                 WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
2253         }
2254         soc15_grbm_select(adev, 0, 0, 0, 0);
2255         mutex_unlock(&adev->srbm_mutex);
2256
2257         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
2258            acccess. These should be enabled by FW for target VMIDs. */
2259         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2260                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
2261                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
2262                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
2263                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
2264         }
2265 }
2266
2267 static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
2268 {
2269         int vmid;
2270
2271         /*
2272          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
2273          * access. Compute VMIDs should be enabled by FW for target VMIDs,
2274          * the driver can enable them for graphics. VMID0 should maintain
2275          * access so that HWS firmware can save/restore entries.
2276          */
2277         for (vmid = 1; vmid < 16; vmid++) {
2278                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
2279                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
2280                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
2281                 WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
2282         }
2283 }
2284
2285 static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
2286 {
2287         u32 tmp;
2288         int i;
2289
2290         WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
2291
2292         gfx_v9_0_tiling_mode_table_init(adev);
2293
2294         gfx_v9_0_setup_rb(adev);
2295         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
2296         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
2297
2298         /* XXX SH_MEM regs */
2299         /* where to put LDS, scratch, GPUVM in FSA64 space */
2300         mutex_lock(&adev->srbm_mutex);
2301         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
2302                 soc15_grbm_select(adev, 0, 0, 0, i);
2303                 /* CP and shaders */
2304                 if (i == 0) {
2305                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2306                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2307                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2308                                             !!amdgpu_noretry);
2309                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2310                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
2311                 } else {
2312                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
2313                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
2314                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
2315                                             !!amdgpu_noretry);
2316                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
2317                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
2318                                 (adev->gmc.private_aperture_start >> 48));
2319                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
2320                                 (adev->gmc.shared_aperture_start >> 48));
2321                         WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
2322                 }
2323         }
2324         soc15_grbm_select(adev, 0, 0, 0, 0);
2325
2326         mutex_unlock(&adev->srbm_mutex);
2327
2328         gfx_v9_0_init_compute_vmid(adev);
2329         gfx_v9_0_init_gds_vmid(adev);
2330 }
2331
2332 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
2333 {
2334         u32 i, j, k;
2335         u32 mask;
2336
2337         mutex_lock(&adev->grbm_idx_mutex);
2338         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
2339                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
2340                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
2341                         for (k = 0; k < adev->usec_timeout; k++) {
2342                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
2343                                         break;
2344                                 udelay(1);
2345                         }
2346                         if (k == adev->usec_timeout) {
2347                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
2348                                                       0xffffffff, 0xffffffff);
2349                                 mutex_unlock(&adev->grbm_idx_mutex);
2350                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
2351                                          i, j);
2352                                 return;
2353                         }
2354                 }
2355         }
2356         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2357         mutex_unlock(&adev->grbm_idx_mutex);
2358
2359         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
2360                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
2361                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
2362                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
2363         for (k = 0; k < adev->usec_timeout; k++) {
2364                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
2365                         break;
2366                 udelay(1);
2367         }
2368 }
2369
2370 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
2371                                                bool enable)
2372 {
2373         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
2374
2375         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
2376         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
2377         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
2378         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
2379
2380         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
2381 }
2382
2383 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
2384 {
2385         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
2386         /* csib */
2387         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
2388                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
2389         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
2390                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
2391         WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
2392                         adev->gfx.rlc.clear_state_size);
2393 }
2394
2395 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
2396                                 int indirect_offset,
2397                                 int list_size,
2398                                 int *unique_indirect_regs,
2399                                 int unique_indirect_reg_count,
2400                                 int *indirect_start_offsets,
2401                                 int *indirect_start_offsets_count,
2402                                 int max_start_offsets_count)
2403 {
2404         int idx;
2405
2406         for (; indirect_offset < list_size; indirect_offset++) {
2407                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
2408                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
2409                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
2410
2411                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
2412                         indirect_offset += 2;
2413
2414                         /* look for the matching indice */
2415                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
2416                                 if (unique_indirect_regs[idx] ==
2417                                         register_list_format[indirect_offset] ||
2418                                         !unique_indirect_regs[idx])
2419                                         break;
2420                         }
2421
2422                         BUG_ON(idx >= unique_indirect_reg_count);
2423
2424                         if (!unique_indirect_regs[idx])
2425                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
2426
2427                         indirect_offset++;
2428                 }
2429         }
2430 }
2431
2432 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
2433 {
2434         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2435         int unique_indirect_reg_count = 0;
2436
2437         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2438         int indirect_start_offsets_count = 0;
2439
2440         int list_size = 0;
2441         int i = 0, j = 0;
2442         u32 tmp = 0;
2443
2444         u32 *register_list_format =
2445                 kmemdup(adev->gfx.rlc.register_list_format,
2446                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
2447         if (!register_list_format)
2448                 return -ENOMEM;
2449
2450         /* setup unique_indirect_regs array and indirect_start_offsets array */
2451         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
2452         gfx_v9_1_parse_ind_reg_list(register_list_format,
2453                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
2454                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
2455                                     unique_indirect_regs,
2456                                     unique_indirect_reg_count,
2457                                     indirect_start_offsets,
2458                                     &indirect_start_offsets_count,
2459                                     ARRAY_SIZE(indirect_start_offsets));
2460
2461         /* enable auto inc in case it is disabled */
2462         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
2463         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2464         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
2465
2466         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
2467         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
2468                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
2469         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
2470                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
2471                         adev->gfx.rlc.register_restore[i]);
2472
2473         /* load indirect register */
2474         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2475                 adev->gfx.rlc.reg_list_format_start);
2476
2477         /* direct register portion */
2478         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
2479                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2480                         register_list_format[i]);
2481
2482         /* indirect register portion */
2483         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
2484                 if (register_list_format[i] == 0xFFFFFFFF) {
2485                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2486                         continue;
2487                 }
2488
2489                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2490                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
2491
2492                 for (j = 0; j < unique_indirect_reg_count; j++) {
2493                         if (register_list_format[i] == unique_indirect_regs[j]) {
2494                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
2495                                 break;
2496                         }
2497                 }
2498
2499                 BUG_ON(j >= unique_indirect_reg_count);
2500
2501                 i++;
2502         }
2503
2504         /* set save/restore list size */
2505         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
2506         list_size = list_size >> 1;
2507         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2508                 adev->gfx.rlc.reg_restore_list_size);
2509         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
2510
2511         /* write the starting offsets to RLC scratch ram */
2512         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
2513                 adev->gfx.rlc.starting_offsets_start);
2514         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
2515                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
2516                        indirect_start_offsets[i]);
2517
2518         /* load unique indirect regs*/
2519         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2520                 if (unique_indirect_regs[i] != 0) {
2521                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2522                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2523                                unique_indirect_regs[i] & 0x3FFFF);
2524
2525                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2526                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2527                                unique_indirect_regs[i] >> 20);
2528                 }
2529         }
2530
2531         kfree(register_list_format);
2532         return 0;
2533 }
2534
2535 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2536 {
2537         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2538 }
2539
2540 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2541                                              bool enable)
2542 {
2543         uint32_t data = 0;
2544         uint32_t default_data = 0;
2545
2546         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2547         if (enable == true) {
2548                 /* enable GFXIP control over CGPG */
2549                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2550                 if(default_data != data)
2551                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2552
2553                 /* update status */
2554                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2555                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2556                 if(default_data != data)
2557                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2558         } else {
2559                 /* restore GFXIP control over GCPG */
2560                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2561                 if(default_data != data)
2562                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2563         }
2564 }
2565
2566 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2567 {
2568         uint32_t data = 0;
2569
2570         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2571                               AMD_PG_SUPPORT_GFX_SMG |
2572                               AMD_PG_SUPPORT_GFX_DMG)) {
2573                 /* init IDLE_POLL_COUNT = 60 */
2574                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2575                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2576                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2577                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2578
2579                 /* init RLC PG Delay */
2580                 data = 0;
2581                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2582                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2583                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2584                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2585                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2586
2587                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2588                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2589                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2590                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2591
2592                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2593                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2594                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2595                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2596
2597                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2598                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2599
2600                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2601                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2602                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2603
2604                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2605         }
2606 }
2607
2608 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2609                                                 bool enable)
2610 {
2611         uint32_t data = 0;
2612         uint32_t default_data = 0;
2613
2614         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2615         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2616                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2617                              enable ? 1 : 0);
2618         if (default_data != data)
2619                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2620 }
2621
2622 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2623                                                 bool enable)
2624 {
2625         uint32_t data = 0;
2626         uint32_t default_data = 0;
2627
2628         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2629         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2630                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2631                              enable ? 1 : 0);
2632         if(default_data != data)
2633                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2634 }
2635
2636 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2637                                         bool enable)
2638 {
2639         uint32_t data = 0;
2640         uint32_t default_data = 0;
2641
2642         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2643         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2644                              CP_PG_DISABLE,
2645                              enable ? 0 : 1);
2646         if(default_data != data)
2647                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2648 }
2649
2650 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2651                                                 bool enable)
2652 {
2653         uint32_t data, default_data;
2654
2655         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2656         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2657                              GFX_POWER_GATING_ENABLE,
2658                              enable ? 1 : 0);
2659         if(default_data != data)
2660                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2661 }
2662
2663 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2664                                                 bool enable)
2665 {
2666         uint32_t data, default_data;
2667
2668         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2669         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2670                              GFX_PIPELINE_PG_ENABLE,
2671                              enable ? 1 : 0);
2672         if(default_data != data)
2673                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2674
2675         if (!enable)
2676                 /* read any GFX register to wake up GFX */
2677                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2678 }
2679
2680 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2681                                                        bool enable)
2682 {
2683         uint32_t data, default_data;
2684
2685         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2686         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2687                              STATIC_PER_CU_PG_ENABLE,
2688                              enable ? 1 : 0);
2689         if(default_data != data)
2690                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2691 }
2692
2693 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2694                                                 bool enable)
2695 {
2696         uint32_t data, default_data;
2697
2698         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2699         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2700                              DYN_PER_CU_PG_ENABLE,
2701                              enable ? 1 : 0);
2702         if(default_data != data)
2703                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2704 }
2705
2706 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2707 {
2708         gfx_v9_0_init_csb(adev);
2709
2710         /*
2711          * Rlc save restore list is workable since v2_1.
2712          * And it's needed by gfxoff feature.
2713          */
2714         if (adev->gfx.rlc.is_rlc_v2_1) {
2715                 if (adev->asic_type == CHIP_VEGA12 ||
2716                     (adev->asic_type == CHIP_RAVEN &&
2717                      adev->rev_id >= 8))
2718                         gfx_v9_1_init_rlc_save_restore_list(adev);
2719                 gfx_v9_0_enable_save_restore_machine(adev);
2720         }
2721
2722         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2723                               AMD_PG_SUPPORT_GFX_SMG |
2724                               AMD_PG_SUPPORT_GFX_DMG |
2725                               AMD_PG_SUPPORT_CP |
2726                               AMD_PG_SUPPORT_GDS |
2727                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2728                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2729                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2730                 gfx_v9_0_init_gfx_power_gating(adev);
2731         }
2732 }
2733
2734 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2735 {
2736         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2737         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2738         gfx_v9_0_wait_for_rlc_serdes(adev);
2739 }
2740
2741 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2742 {
2743         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2744         udelay(50);
2745         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2746         udelay(50);
2747 }
2748
2749 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2750 {
2751 #ifdef AMDGPU_RLC_DEBUG_RETRY
2752         u32 rlc_ucode_ver;
2753 #endif
2754
2755         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2756         udelay(50);
2757
2758         /* carrizo do enable cp interrupt after cp inited */
2759         if (!(adev->flags & AMD_IS_APU)) {
2760                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2761                 udelay(50);
2762         }
2763
2764 #ifdef AMDGPU_RLC_DEBUG_RETRY
2765         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2766         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2767         if(rlc_ucode_ver == 0x108) {
2768                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2769                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2770                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2771                  * default is 0x9C4 to create a 100us interval */
2772                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2773                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2774                  * to disable the page fault retry interrupts, default is
2775                  * 0x100 (256) */
2776                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2777         }
2778 #endif
2779 }
2780
2781 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2782 {
2783         const struct rlc_firmware_header_v2_0 *hdr;
2784         const __le32 *fw_data;
2785         unsigned i, fw_size;
2786
2787         if (!adev->gfx.rlc_fw)
2788                 return -EINVAL;
2789
2790         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2791         amdgpu_ucode_print_rlc_hdr(&hdr->header);
2792
2793         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2794                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2795         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2796
2797         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2798                         RLCG_UCODE_LOADING_START_ADDRESS);
2799         for (i = 0; i < fw_size; i++)
2800                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2801         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2802
2803         return 0;
2804 }
2805
2806 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2807 {
2808         int r;
2809
2810         if (amdgpu_sriov_vf(adev)) {
2811                 gfx_v9_0_init_csb(adev);
2812                 return 0;
2813         }
2814
2815         adev->gfx.rlc.funcs->stop(adev);
2816
2817         /* disable CG */
2818         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2819
2820         gfx_v9_0_init_pg(adev);
2821
2822         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2823                 /* legacy rlc firmware loading */
2824                 r = gfx_v9_0_rlc_load_microcode(adev);
2825                 if (r)
2826                         return r;
2827         }
2828
2829         switch (adev->asic_type) {
2830         case CHIP_RAVEN:
2831                 if (amdgpu_lbpw == 0)
2832                         gfx_v9_0_enable_lbpw(adev, false);
2833                 else
2834                         gfx_v9_0_enable_lbpw(adev, true);
2835                 break;
2836         case CHIP_VEGA20:
2837                 if (amdgpu_lbpw > 0)
2838                         gfx_v9_0_enable_lbpw(adev, true);
2839                 else
2840                         gfx_v9_0_enable_lbpw(adev, false);
2841                 break;
2842         default:
2843                 break;
2844         }
2845
2846         adev->gfx.rlc.funcs->start(adev);
2847
2848         return 0;
2849 }
2850
2851 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2852 {
2853         int i;
2854         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2855
2856         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2857         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2858         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2859         if (!enable) {
2860                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2861                         adev->gfx.gfx_ring[i].sched.ready = false;
2862         }
2863         WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
2864         udelay(50);
2865 }
2866
2867 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2868 {
2869         const struct gfx_firmware_header_v1_0 *pfp_hdr;
2870         const struct gfx_firmware_header_v1_0 *ce_hdr;
2871         const struct gfx_firmware_header_v1_0 *me_hdr;
2872         const __le32 *fw_data;
2873         unsigned i, fw_size;
2874
2875         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2876                 return -EINVAL;
2877
2878         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2879                 adev->gfx.pfp_fw->data;
2880         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2881                 adev->gfx.ce_fw->data;
2882         me_hdr = (const struct gfx_firmware_header_v1_0 *)
2883                 adev->gfx.me_fw->data;
2884
2885         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2886         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2887         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2888
2889         gfx_v9_0_cp_gfx_enable(adev, false);
2890
2891         /* PFP */
2892         fw_data = (const __le32 *)
2893                 (adev->gfx.pfp_fw->data +
2894                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2895         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2896         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2897         for (i = 0; i < fw_size; i++)
2898                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2899         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2900
2901         /* CE */
2902         fw_data = (const __le32 *)
2903                 (adev->gfx.ce_fw->data +
2904                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2905         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2906         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2907         for (i = 0; i < fw_size; i++)
2908                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2909         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2910
2911         /* ME */
2912         fw_data = (const __le32 *)
2913                 (adev->gfx.me_fw->data +
2914                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2915         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2916         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2917         for (i = 0; i < fw_size; i++)
2918                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2919         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2920
2921         return 0;
2922 }
2923
2924 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2925 {
2926         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2927         const struct cs_section_def *sect = NULL;
2928         const struct cs_extent_def *ext = NULL;
2929         int r, i, tmp;
2930
2931         /* init the CP */
2932         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2933         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2934
2935         gfx_v9_0_cp_gfx_enable(adev, true);
2936
2937         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2938         if (r) {
2939                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2940                 return r;
2941         }
2942
2943         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2944         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2945
2946         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2947         amdgpu_ring_write(ring, 0x80000000);
2948         amdgpu_ring_write(ring, 0x80000000);
2949
2950         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2951                 for (ext = sect->section; ext->extent != NULL; ++ext) {
2952                         if (sect->id == SECT_CONTEXT) {
2953                                 amdgpu_ring_write(ring,
2954                                        PACKET3(PACKET3_SET_CONTEXT_REG,
2955                                                ext->reg_count));
2956                                 amdgpu_ring_write(ring,
2957                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2958                                 for (i = 0; i < ext->reg_count; i++)
2959                                         amdgpu_ring_write(ring, ext->extent[i]);
2960                         }
2961                 }
2962         }
2963
2964         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2965         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2966
2967         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2968         amdgpu_ring_write(ring, 0);
2969
2970         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2971         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2972         amdgpu_ring_write(ring, 0x8000);
2973         amdgpu_ring_write(ring, 0x8000);
2974
2975         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2976         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2977                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2978         amdgpu_ring_write(ring, tmp);
2979         amdgpu_ring_write(ring, 0);
2980
2981         amdgpu_ring_commit(ring);
2982
2983         return 0;
2984 }
2985
2986 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2987 {
2988         struct amdgpu_ring *ring;
2989         u32 tmp;
2990         u32 rb_bufsz;
2991         u64 rb_addr, rptr_addr, wptr_gpu_addr;
2992
2993         /* Set the write pointer delay */
2994         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2995
2996         /* set the RB to use vmid 0 */
2997         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2998
2999         /* Set ring buffer size */
3000         ring = &adev->gfx.gfx_ring[0];
3001         rb_bufsz = order_base_2(ring->ring_size / 8);
3002         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3003         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3004 #ifdef __BIG_ENDIAN
3005         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
3006 #endif
3007         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3008
3009         /* Initialize the ring buffer's write pointers */
3010         ring->wptr = 0;
3011         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3012         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3013
3014         /* set the wb address wether it's enabled or not */
3015         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3016         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3017         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3018
3019         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3020         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
3021         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
3022
3023         mdelay(1);
3024         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
3025
3026         rb_addr = ring->gpu_addr >> 8;
3027         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
3028         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3029
3030         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
3031         if (ring->use_doorbell) {
3032                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3033                                     DOORBELL_OFFSET, ring->doorbell_index);
3034                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3035                                     DOORBELL_EN, 1);
3036         } else {
3037                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
3038         }
3039         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
3040
3041         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3042                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
3043         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
3044
3045         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
3046                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3047
3048
3049         /* start the ring */
3050         gfx_v9_0_cp_gfx_start(adev);
3051         ring->sched.ready = true;
3052
3053         return 0;
3054 }
3055
3056 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3057 {
3058         int i;
3059
3060         if (enable) {
3061                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
3062         } else {
3063                 WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
3064                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
3065                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3066                         adev->gfx.compute_ring[i].sched.ready = false;
3067                 adev->gfx.kiq.ring.sched.ready = false;
3068         }
3069         udelay(50);
3070 }
3071
3072 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3073 {
3074         const struct gfx_firmware_header_v1_0 *mec_hdr;
3075         const __le32 *fw_data;
3076         unsigned i;
3077         u32 tmp;
3078
3079         if (!adev->gfx.mec_fw)
3080                 return -EINVAL;
3081
3082         gfx_v9_0_cp_compute_enable(adev, false);
3083
3084         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3085         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3086
3087         fw_data = (const __le32 *)
3088                 (adev->gfx.mec_fw->data +
3089                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3090         tmp = 0;
3091         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3092         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3093         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
3094
3095         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
3096                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
3097         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
3098                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3099
3100         /* MEC1 */
3101         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3102                          mec_hdr->jt_offset);
3103         for (i = 0; i < mec_hdr->jt_size; i++)
3104                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
3105                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3106
3107         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
3108                         adev->gfx.mec_fw_version);
3109         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
3110
3111         return 0;
3112 }
3113
3114 /* KIQ functions */
3115 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
3116 {
3117         uint32_t tmp;
3118         struct amdgpu_device *adev = ring->adev;
3119
3120         /* tell RLC which is KIQ queue */
3121         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
3122         tmp &= 0xffffff00;
3123         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3124         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3125         tmp |= 0x80;
3126         WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
3127 }
3128
3129 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
3130 {
3131         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3132         uint64_t queue_mask = 0;
3133         int r, i;
3134
3135         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
3136                 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
3137                         continue;
3138
3139                 /* This situation may be hit in the future if a new HW
3140                  * generation exposes more than 64 queues. If so, the
3141                  * definition of queue_mask needs updating */
3142                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
3143                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
3144                         break;
3145                 }
3146
3147                 queue_mask |= (1ull << i);
3148         }
3149
3150         r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8);
3151         if (r) {
3152                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3153                 return r;
3154         }
3155
3156         /* set resources */
3157         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
3158         amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
3159                           PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
3160         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
3161         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
3162         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
3163         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
3164         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
3165         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
3166         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3167                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3168                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
3169                 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3170
3171                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
3172                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
3173                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3174                                   PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
3175                                   PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
3176                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
3177                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
3178                                   PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
3179                                   PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
3180                                   PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
3181                                   PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
3182                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
3183                 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
3184                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
3185                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
3186                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
3187                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
3188         }
3189
3190         r = amdgpu_ring_test_helper(kiq_ring);
3191         if (r)
3192                 DRM_ERROR("KCQ enable failed\n");
3193
3194         return r;
3195 }
3196
3197 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
3198 {
3199         struct amdgpu_device *adev = ring->adev;
3200         struct v9_mqd *mqd = ring->mqd_ptr;
3201         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3202         uint32_t tmp;
3203
3204         mqd->header = 0xC0310800;
3205         mqd->compute_pipelinestat_enable = 0x00000001;
3206         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3207         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3208         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3209         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3210         mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
3211         mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
3212         mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
3213         mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
3214         mqd->compute_misc_reserved = 0x00000003;
3215
3216         mqd->dynamic_cu_mask_addr_lo =
3217                 lower_32_bits(ring->mqd_gpu_addr
3218                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3219         mqd->dynamic_cu_mask_addr_hi =
3220                 upper_32_bits(ring->mqd_gpu_addr
3221                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
3222
3223         eop_base_addr = ring->eop_gpu_addr >> 8;
3224         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3225         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3226
3227         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3228         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
3229         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3230                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
3231
3232         mqd->cp_hqd_eop_control = tmp;
3233
3234         /* enable doorbell? */
3235         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3236
3237         if (ring->use_doorbell) {
3238                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3239                                     DOORBELL_OFFSET, ring->doorbell_index);
3240                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3241                                     DOORBELL_EN, 1);
3242                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3243                                     DOORBELL_SOURCE, 0);
3244                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3245                                     DOORBELL_HIT, 0);
3246         } else {
3247                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3248                                          DOORBELL_EN, 0);
3249         }
3250
3251         mqd->cp_hqd_pq_doorbell_control = tmp;
3252
3253         /* disable the queue if it's active */
3254         ring->wptr = 0;
3255         mqd->cp_hqd_dequeue_request = 0;
3256         mqd->cp_hqd_pq_rptr = 0;
3257         mqd->cp_hqd_pq_wptr_lo = 0;
3258         mqd->cp_hqd_pq_wptr_hi = 0;
3259
3260         /* set the pointer to the MQD */
3261         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
3262         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
3263
3264         /* set MQD vmid to 0 */
3265         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
3266         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3267         mqd->cp_mqd_control = tmp;
3268
3269         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3270         hqd_gpu_addr = ring->gpu_addr >> 8;
3271         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3272         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3273
3274         /* set up the HQD, this is similar to CP_RB0_CNTL */
3275         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
3276         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3277                             (order_base_2(ring->ring_size / 4) - 1));
3278         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3279                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
3280 #ifdef __BIG_ENDIAN
3281         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
3282 #endif
3283         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3284         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
3285         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3286         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3287         mqd->cp_hqd_pq_control = tmp;
3288
3289         /* set the wb address whether it's enabled or not */
3290         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
3291         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3292         mqd->cp_hqd_pq_rptr_report_addr_hi =
3293                 upper_32_bits(wb_gpu_addr) & 0xffff;
3294
3295         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3296         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
3297         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3298         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3299
3300         tmp = 0;
3301         /* enable the doorbell if requested */
3302         if (ring->use_doorbell) {
3303                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
3304                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3305                                 DOORBELL_OFFSET, ring->doorbell_index);
3306
3307                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3308                                          DOORBELL_EN, 1);
3309                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3310                                          DOORBELL_SOURCE, 0);
3311                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3312                                          DOORBELL_HIT, 0);
3313         }
3314
3315         mqd->cp_hqd_pq_doorbell_control = tmp;
3316
3317         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3318         ring->wptr = 0;
3319         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
3320
3321         /* set the vmid for the queue */
3322         mqd->cp_hqd_vmid = 0;
3323
3324         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
3325         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
3326         mqd->cp_hqd_persistent_state = tmp;
3327
3328         /* set MIN_IB_AVAIL_SIZE */
3329         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
3330         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3331         mqd->cp_hqd_ib_control = tmp;
3332
3333         /* activate the queue */
3334         mqd->cp_hqd_active = 1;
3335
3336         return 0;
3337 }
3338
3339 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
3340 {
3341         struct amdgpu_device *adev = ring->adev;
3342         struct v9_mqd *mqd = ring->mqd_ptr;
3343         int j;
3344
3345         /* disable wptr polling */
3346         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3347
3348         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
3349                mqd->cp_hqd_eop_base_addr_lo);
3350         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
3351                mqd->cp_hqd_eop_base_addr_hi);
3352
3353         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3354         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
3355                mqd->cp_hqd_eop_control);
3356
3357         /* enable doorbell? */
3358         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3359                mqd->cp_hqd_pq_doorbell_control);
3360
3361         /* disable the queue if it's active */
3362         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3363                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3364                 for (j = 0; j < adev->usec_timeout; j++) {
3365                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3366                                 break;
3367                         udelay(1);
3368                 }
3369                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3370                        mqd->cp_hqd_dequeue_request);
3371                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
3372                        mqd->cp_hqd_pq_rptr);
3373                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3374                        mqd->cp_hqd_pq_wptr_lo);
3375                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3376                        mqd->cp_hqd_pq_wptr_hi);
3377         }
3378
3379         /* set the pointer to the MQD */
3380         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
3381                mqd->cp_mqd_base_addr_lo);
3382         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
3383                mqd->cp_mqd_base_addr_hi);
3384
3385         /* set MQD vmid to 0 */
3386         WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
3387                mqd->cp_mqd_control);
3388
3389         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3390         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
3391                mqd->cp_hqd_pq_base_lo);
3392         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
3393                mqd->cp_hqd_pq_base_hi);
3394
3395         /* set up the HQD, this is similar to CP_RB0_CNTL */
3396         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
3397                mqd->cp_hqd_pq_control);
3398
3399         /* set the wb address whether it's enabled or not */
3400         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
3401                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
3402         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3403                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
3404
3405         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3406         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
3407                mqd->cp_hqd_pq_wptr_poll_addr_lo);
3408         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3409                mqd->cp_hqd_pq_wptr_poll_addr_hi);
3410
3411         /* enable the doorbell if requested */
3412         if (ring->use_doorbell) {
3413                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
3414                                         (adev->doorbell_index.kiq * 2) << 2);
3415                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
3416                                         (adev->doorbell_index.userqueue_end * 2) << 2);
3417         }
3418
3419         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
3420                mqd->cp_hqd_pq_doorbell_control);
3421
3422         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3423         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
3424                mqd->cp_hqd_pq_wptr_lo);
3425         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
3426                mqd->cp_hqd_pq_wptr_hi);
3427
3428         /* set the vmid for the queue */
3429         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
3430
3431         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
3432                mqd->cp_hqd_persistent_state);
3433
3434         /* activate the queue */
3435         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
3436                mqd->cp_hqd_active);
3437
3438         if (ring->use_doorbell)
3439                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3440
3441         return 0;
3442 }
3443
3444 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
3445 {
3446         struct amdgpu_device *adev = ring->adev;
3447         int j;
3448
3449         /* disable the queue if it's active */
3450         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
3451
3452                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
3453
3454                 for (j = 0; j < adev->usec_timeout; j++) {
3455                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
3456                                 break;
3457                         udelay(1);
3458                 }
3459
3460                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
3461                         DRM_DEBUG("KIQ dequeue request failed.\n");
3462
3463                         /* Manual disable if dequeue request times out */
3464                         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
3465                 }
3466
3467                 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
3468                       0);
3469         }
3470
3471         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
3472         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
3473         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
3474         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
3475         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
3476         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
3477         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
3478         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
3479
3480         return 0;
3481 }
3482
3483 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
3484 {
3485         struct amdgpu_device *adev = ring->adev;
3486         struct v9_mqd *mqd = ring->mqd_ptr;
3487         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
3488
3489         gfx_v9_0_kiq_setting(ring);
3490
3491         if (adev->in_gpu_reset) { /* for GPU_RESET case */
3492                 /* reset MQD to a clean status */
3493                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3494                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3495
3496                 /* reset ring buffer */
3497                 ring->wptr = 0;
3498                 amdgpu_ring_clear_ring(ring);
3499
3500                 mutex_lock(&adev->srbm_mutex);
3501                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3502                 gfx_v9_0_kiq_init_register(ring);
3503                 soc15_grbm_select(adev, 0, 0, 0, 0);
3504                 mutex_unlock(&adev->srbm_mutex);
3505         } else {
3506                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3507                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3508                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3509                 mutex_lock(&adev->srbm_mutex);
3510                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3511                 gfx_v9_0_mqd_init(ring);
3512                 gfx_v9_0_kiq_init_register(ring);
3513                 soc15_grbm_select(adev, 0, 0, 0, 0);
3514                 mutex_unlock(&adev->srbm_mutex);
3515
3516                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3517                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3518         }
3519
3520         return 0;
3521 }
3522
3523 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3524 {
3525         struct amdgpu_device *adev = ring->adev;
3526         struct v9_mqd *mqd = ring->mqd_ptr;
3527         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3528
3529         if (!adev->in_gpu_reset && !adev->in_suspend) {
3530                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3531                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3532                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3533                 mutex_lock(&adev->srbm_mutex);
3534                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3535                 gfx_v9_0_mqd_init(ring);
3536                 soc15_grbm_select(adev, 0, 0, 0, 0);
3537                 mutex_unlock(&adev->srbm_mutex);
3538
3539                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3540                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3541         } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3542                 /* reset MQD to a clean status */
3543                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3544                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3545
3546                 /* reset ring buffer */
3547                 ring->wptr = 0;
3548                 amdgpu_ring_clear_ring(ring);
3549         } else {
3550                 amdgpu_ring_clear_ring(ring);
3551         }
3552
3553         return 0;
3554 }
3555
3556 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3557 {
3558         struct amdgpu_ring *ring;
3559         int r;
3560
3561         ring = &adev->gfx.kiq.ring;
3562
3563         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3564         if (unlikely(r != 0))
3565                 return r;
3566
3567         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3568         if (unlikely(r != 0))
3569                 return r;
3570
3571         gfx_v9_0_kiq_init_queue(ring);
3572         amdgpu_bo_kunmap(ring->mqd_obj);
3573         ring->mqd_ptr = NULL;
3574         amdgpu_bo_unreserve(ring->mqd_obj);
3575         ring->sched.ready = true;
3576         return 0;
3577 }
3578
3579 static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
3580 {
3581         struct amdgpu_ring *ring = NULL;
3582         int r = 0, i;
3583
3584         gfx_v9_0_cp_compute_enable(adev, true);
3585
3586         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3587                 ring = &adev->gfx.compute_ring[i];
3588
3589                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3590                 if (unlikely(r != 0))
3591                         goto done;
3592                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3593                 if (!r) {
3594                         r = gfx_v9_0_kcq_init_queue(ring);
3595                         amdgpu_bo_kunmap(ring->mqd_obj);
3596                         ring->mqd_ptr = NULL;
3597                 }
3598                 amdgpu_bo_unreserve(ring->mqd_obj);
3599                 if (r)
3600                         goto done;
3601         }
3602
3603         r = gfx_v9_0_kiq_kcq_enable(adev);
3604 done:
3605         return r;
3606 }
3607
3608 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3609 {
3610         int r, i;
3611         struct amdgpu_ring *ring;
3612
3613         if (!(adev->flags & AMD_IS_APU))
3614                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3615
3616         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3617                 if (adev->asic_type != CHIP_ARCTURUS) {
3618                         /* legacy firmware loading */
3619                         r = gfx_v9_0_cp_gfx_load_microcode(adev);
3620                         if (r)
3621                                 return r;
3622                 }
3623
3624                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3625                 if (r)
3626                         return r;
3627         }
3628
3629         r = gfx_v9_0_kiq_resume(adev);
3630         if (r)
3631                 return r;
3632
3633         if (adev->asic_type != CHIP_ARCTURUS) {
3634                 r = gfx_v9_0_cp_gfx_resume(adev);
3635                 if (r)
3636                         return r;
3637         }
3638
3639         r = gfx_v9_0_kcq_resume(adev);
3640         if (r)
3641                 return r;
3642
3643         if (adev->asic_type != CHIP_ARCTURUS) {
3644                 ring = &adev->gfx.gfx_ring[0];
3645                 r = amdgpu_ring_test_helper(ring);
3646                 if (r)
3647                         return r;
3648         }
3649
3650         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3651                 ring = &adev->gfx.compute_ring[i];
3652                 amdgpu_ring_test_helper(ring);
3653         }
3654
3655         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3656
3657         return 0;
3658 }
3659
3660 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3661 {
3662         if (adev->asic_type != CHIP_ARCTURUS)
3663                 gfx_v9_0_cp_gfx_enable(adev, enable);
3664         gfx_v9_0_cp_compute_enable(adev, enable);
3665 }
3666
3667 static int gfx_v9_0_hw_init(void *handle)
3668 {
3669         int r;
3670         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3671
3672         if (!amdgpu_sriov_vf(adev))
3673                 gfx_v9_0_init_golden_registers(adev);
3674
3675         gfx_v9_0_constants_init(adev);
3676
3677         r = adev->gfx.rlc.funcs->resume(adev);
3678         if (r)
3679                 return r;
3680
3681         r = gfx_v9_0_cp_resume(adev);
3682         if (r)
3683                 return r;
3684
3685         return r;
3686 }
3687
3688 static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
3689 {
3690         int r, i;
3691         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3692
3693         r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
3694         if (r)
3695                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3696
3697         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3698                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
3699
3700                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3701                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3702                                                 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3703                                                 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3704                                                 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3705                                                 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3706                 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3707                 amdgpu_ring_write(kiq_ring, 0);
3708                 amdgpu_ring_write(kiq_ring, 0);
3709                 amdgpu_ring_write(kiq_ring, 0);
3710         }
3711         r = amdgpu_ring_test_helper(kiq_ring);
3712         if (r)
3713                 DRM_ERROR("KCQ disable failed\n");
3714
3715         return r;
3716 }
3717
3718 static int gfx_v9_0_hw_fini(void *handle)
3719 {
3720         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3721
3722         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
3723         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3724         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3725
3726         /* DF freeze and kcq disable will fail */
3727         if (!amdgpu_ras_intr_triggered())
3728                 /* disable KCQ to avoid CPC touch memory not valid anymore */
3729                 gfx_v9_0_kcq_disable(adev);
3730
3731         if (amdgpu_sriov_vf(adev)) {
3732                 gfx_v9_0_cp_gfx_enable(adev, false);
3733                 /* must disable polling for SRIOV when hw finished, otherwise
3734                  * CPC engine may still keep fetching WB address which is already
3735                  * invalid after sw finished and trigger DMAR reading error in
3736                  * hypervisor side.
3737                  */
3738                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3739                 return 0;
3740         }
3741
3742         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3743          * otherwise KIQ is hanging when binding back
3744          */
3745         if (!adev->in_gpu_reset && !adev->in_suspend) {
3746                 mutex_lock(&adev->srbm_mutex);
3747                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3748                                 adev->gfx.kiq.ring.pipe,
3749                                 adev->gfx.kiq.ring.queue, 0);
3750                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3751                 soc15_grbm_select(adev, 0, 0, 0, 0);
3752                 mutex_unlock(&adev->srbm_mutex);
3753         }
3754
3755         gfx_v9_0_cp_enable(adev, false);
3756         adev->gfx.rlc.funcs->stop(adev);
3757
3758         return 0;
3759 }
3760
3761 static int gfx_v9_0_suspend(void *handle)
3762 {
3763         return gfx_v9_0_hw_fini(handle);
3764 }
3765
3766 static int gfx_v9_0_resume(void *handle)
3767 {
3768         return gfx_v9_0_hw_init(handle);
3769 }
3770
3771 static bool gfx_v9_0_is_idle(void *handle)
3772 {
3773         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3774
3775         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3776                                 GRBM_STATUS, GUI_ACTIVE))
3777                 return false;
3778         else
3779                 return true;
3780 }
3781
3782 static int gfx_v9_0_wait_for_idle(void *handle)
3783 {
3784         unsigned i;
3785         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3786
3787         for (i = 0; i < adev->usec_timeout; i++) {
3788                 if (gfx_v9_0_is_idle(handle))
3789                         return 0;
3790                 udelay(1);
3791         }
3792         return -ETIMEDOUT;
3793 }
3794
3795 static int gfx_v9_0_soft_reset(void *handle)
3796 {
3797         u32 grbm_soft_reset = 0;
3798         u32 tmp;
3799         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3800
3801         /* GRBM_STATUS */
3802         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3803         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3804                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3805                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3806                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3807                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3808                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3809                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3810                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3811                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3812                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3813         }
3814
3815         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3816                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3817                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3818         }
3819
3820         /* GRBM_STATUS2 */
3821         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3822         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3823                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3824                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3825
3826
3827         if (grbm_soft_reset) {
3828                 /* stop the rlc */
3829                 adev->gfx.rlc.funcs->stop(adev);
3830
3831                 if (adev->asic_type != CHIP_ARCTURUS)
3832                         /* Disable GFX parsing/prefetching */
3833                         gfx_v9_0_cp_gfx_enable(adev, false);
3834
3835                 /* Disable MEC parsing/prefetching */
3836                 gfx_v9_0_cp_compute_enable(adev, false);
3837
3838                 if (grbm_soft_reset) {
3839                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3840                         tmp |= grbm_soft_reset;
3841                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3842                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3843                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3844
3845                         udelay(50);
3846
3847                         tmp &= ~grbm_soft_reset;
3848                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3849                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3850                 }
3851
3852                 /* Wait a little for things to settle down */
3853                 udelay(50);
3854         }
3855         return 0;
3856 }
3857
3858 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3859 {
3860         uint64_t clock;
3861
3862         mutex_lock(&adev->gfx.gpu_clock_mutex);
3863         if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
3864                 uint32_t tmp, lsb, msb, i = 0;
3865                 do {
3866                         if (i != 0)
3867                                 udelay(1);
3868                         tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
3869                         lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
3870                         msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
3871                         i++;
3872                 } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
3873                 clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
3874         } else {
3875                 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3876                 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3877                         ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3878         }
3879         mutex_unlock(&adev->gfx.gpu_clock_mutex);
3880         return clock;
3881 }
3882
3883 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3884                                           uint32_t vmid,
3885                                           uint32_t gds_base, uint32_t gds_size,
3886                                           uint32_t gws_base, uint32_t gws_size,
3887                                           uint32_t oa_base, uint32_t oa_size)
3888 {
3889         struct amdgpu_device *adev = ring->adev;
3890
3891         /* GDS Base */
3892         gfx_v9_0_write_data_to_reg(ring, 0, false,
3893                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3894                                    gds_base);
3895
3896         /* GDS Size */
3897         gfx_v9_0_write_data_to_reg(ring, 0, false,
3898                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3899                                    gds_size);
3900
3901         /* GWS */
3902         gfx_v9_0_write_data_to_reg(ring, 0, false,
3903                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3904                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3905
3906         /* OA */
3907         gfx_v9_0_write_data_to_reg(ring, 0, false,
3908                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3909                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
3910 }
3911
3912 static const u32 vgpr_init_compute_shader[] =
3913 {
3914         0xb07c0000, 0xbe8000ff,
3915         0x000000f8, 0xbf110800,
3916         0x7e000280, 0x7e020280,
3917         0x7e040280, 0x7e060280,
3918         0x7e080280, 0x7e0a0280,
3919         0x7e0c0280, 0x7e0e0280,
3920         0x80808800, 0xbe803200,
3921         0xbf84fff5, 0xbf9c0000,
3922         0xd28c0001, 0x0001007f,
3923         0xd28d0001, 0x0002027e,
3924         0x10020288, 0xb8810904,
3925         0xb7814000, 0xd1196a01,
3926         0x00000301, 0xbe800087,
3927         0xbefc00c1, 0xd89c4000,
3928         0x00020201, 0xd89cc080,
3929         0x00040401, 0x320202ff,
3930         0x00000800, 0x80808100,
3931         0xbf84fff8, 0x7e020280,
3932         0xbf810000, 0x00000000,
3933 };
3934
3935 static const u32 sgpr_init_compute_shader[] =
3936 {
3937         0xb07c0000, 0xbe8000ff,
3938         0x0000005f, 0xbee50080,
3939         0xbe812c65, 0xbe822c65,
3940         0xbe832c65, 0xbe842c65,
3941         0xbe852c65, 0xb77c0005,
3942         0x80808500, 0xbf84fff8,
3943         0xbe800080, 0xbf810000,
3944 };
3945
3946 static const struct soc15_reg_entry vgpr_init_regs[] = {
3947    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
3948    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
3949    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
3950    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
3951    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
3952    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
3953    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
3954    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
3955    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x100007f }, /* VGPRS=15 (256 logical VGPRs, SGPRS=1 (16 SGPRs, BULKY=1 */
3956    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
3957 };
3958
3959 static const struct soc15_reg_entry sgpr_init_regs[] = {
3960    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
3961    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
3962    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
3963    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
3964    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x1000000 }, /* CU_GROUP_COUNT=1 */
3965    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 256*2 },
3966    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 1 },
3967    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
3968    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x340 }, /* SGPRS=13 (112 GPRS) */
3969    { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
3970 };
3971
3972 static const struct soc15_reg_entry sec_ded_counter_registers[] = {
3973    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
3974    { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
3975    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
3976    { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
3977    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
3978    { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
3979    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
3980    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
3981    { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
3982    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
3983    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
3984    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
3985    { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
3986    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
3987    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
3988    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
3989    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
3990    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
3991    { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
3992    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
3993    { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
3994    { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
3995    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
3996    { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
3997    { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
3998    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
3999    { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
4000    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
4001    { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
4002    { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
4003    { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
4004    { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
4005    { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
4006 };
4007
4008 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
4009 {
4010         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4011         int i, r;
4012
4013         /* only support when RAS is enabled */
4014         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4015                 return 0;
4016
4017         r = amdgpu_ring_alloc(ring, 7);
4018         if (r) {
4019                 DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n",
4020                         ring->name, r);
4021                 return r;
4022         }
4023
4024         WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
4025         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);
4026
4027         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
4028         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
4029                                 PACKET3_DMA_DATA_DST_SEL(1) |
4030                                 PACKET3_DMA_DATA_SRC_SEL(2) |
4031                                 PACKET3_DMA_DATA_ENGINE(0)));
4032         amdgpu_ring_write(ring, 0);
4033         amdgpu_ring_write(ring, 0);
4034         amdgpu_ring_write(ring, 0);
4035         amdgpu_ring_write(ring, 0);
4036         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
4037                                 adev->gds.gds_size);
4038
4039         amdgpu_ring_commit(ring);
4040
4041         for (i = 0; i < adev->usec_timeout; i++) {
4042                 if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
4043                         break;
4044                 udelay(1);
4045         }
4046
4047         if (i >= adev->usec_timeout)
4048                 r = -ETIMEDOUT;
4049
4050         WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);
4051
4052         return r;
4053 }
4054
4055 static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
4056 {
4057         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
4058         struct amdgpu_ib ib;
4059         struct dma_fence *f = NULL;
4060         int r, i, j, k;
4061         unsigned total_size, vgpr_offset, sgpr_offset;
4062         u64 gpu_addr;
4063
4064         /* only support when RAS is enabled */
4065         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
4066                 return 0;
4067
4068         /* bail if the compute ring is not ready */
4069         if (!ring->sched.ready)
4070                 return 0;
4071
4072         total_size =
4073                 ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
4074         total_size +=
4075                 ((ARRAY_SIZE(sgpr_init_regs) * 3) + 4 + 5 + 2) * 4;
4076         total_size = ALIGN(total_size, 256);
4077         vgpr_offset = total_size;
4078         total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
4079         sgpr_offset = total_size;
4080         total_size += sizeof(sgpr_init_compute_shader);
4081
4082         /* allocate an indirect buffer to put the commands in */
4083         memset(&ib, 0, sizeof(ib));
4084         r = amdgpu_ib_get(adev, NULL, total_size, &ib);
4085         if (r) {
4086                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
4087                 return r;
4088         }
4089
4090         /* load the compute shaders */
4091         for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
4092                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
4093
4094         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
4095                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
4096
4097         /* init the ib length to 0 */
4098         ib.length_dw = 0;
4099
4100         /* VGPR */
4101         /* write the register state for the compute dispatch */
4102         for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) {
4103                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4104                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
4105                                                                 - PACKET3_SET_SH_REG_START;
4106                 ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
4107         }
4108         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4109         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
4110         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4111         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4112                                                         - PACKET3_SET_SH_REG_START;
4113         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4114         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4115
4116         /* write dispatch packet */
4117         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4118         ib.ptr[ib.length_dw++] = 128; /* x */
4119         ib.ptr[ib.length_dw++] = 1; /* y */
4120         ib.ptr[ib.length_dw++] = 1; /* z */
4121         ib.ptr[ib.length_dw++] =
4122                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4123
4124         /* write CS partial flush packet */
4125         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4126         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4127
4128         /* SGPR */
4129         /* write the register state for the compute dispatch */
4130         for (i = 0; i < ARRAY_SIZE(sgpr_init_regs); i++) {
4131                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
4132                 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr_init_regs[i])
4133                                                                 - PACKET3_SET_SH_REG_START;
4134                 ib.ptr[ib.length_dw++] = sgpr_init_regs[i].reg_value;
4135         }
4136         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
4137         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
4138         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
4139         ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
4140                                                         - PACKET3_SET_SH_REG_START;
4141         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
4142         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
4143
4144         /* write dispatch packet */
4145         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
4146         ib.ptr[ib.length_dw++] = 128; /* x */
4147         ib.ptr[ib.length_dw++] = 1; /* y */
4148         ib.ptr[ib.length_dw++] = 1; /* z */
4149         ib.ptr[ib.length_dw++] =
4150                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
4151
4152         /* write CS partial flush packet */
4153         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
4154         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
4155
4156         /* shedule the ib on the ring */
4157         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
4158         if (r) {
4159                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
4160                 goto fail;
4161         }
4162
4163         /* wait for the GPU to finish processing the IB */
4164         r = dma_fence_wait(f, false);
4165         if (r) {
4166                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
4167                 goto fail;
4168         }
4169
4170         /* read back registers to clear the counters */
4171         mutex_lock(&adev->grbm_idx_mutex);
4172         for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
4173                 for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
4174                         for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
4175                                 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
4176                                 RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
4177                         }
4178                 }
4179         }
4180         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
4181         mutex_unlock(&adev->grbm_idx_mutex);
4182
4183 fail:
4184         amdgpu_ib_free(adev, &ib, NULL);
4185         dma_fence_put(f);
4186
4187         return r;
4188 }
4189
4190 static int gfx_v9_0_early_init(void *handle)
4191 {
4192         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4193
4194         if (adev->asic_type == CHIP_ARCTURUS)
4195                 adev->gfx.num_gfx_rings = 0;
4196         else
4197                 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
4198         adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
4199         gfx_v9_0_set_ring_funcs(adev);
4200         gfx_v9_0_set_irq_funcs(adev);
4201         gfx_v9_0_set_gds_init(adev);
4202         gfx_v9_0_set_rlc_funcs(adev);
4203
4204         return 0;
4205 }
4206
4207 static int gfx_v9_0_ecc_late_init(void *handle)
4208 {
4209         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4210         int r;
4211
4212         r = amdgpu_gfx_ras_late_init(adev);
4213         if (r)
4214                 return r;
4215
4216         r = gfx_v9_0_do_edc_gds_workarounds(adev);
4217         if (r)
4218                 return r;
4219
4220         /* requires IBs so do in late init after IB pool is initialized */
4221         r = gfx_v9_0_do_edc_gpr_workarounds(adev);
4222         if (r)
4223                 return r;
4224
4225         return 0;
4226 }
4227
4228 static int gfx_v9_0_late_init(void *handle)
4229 {
4230         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4231         int r;
4232
4233         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4234         if (r)
4235                 return r;
4236
4237         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4238         if (r)
4239                 return r;
4240
4241         r = gfx_v9_0_ecc_late_init(handle);
4242         if (r)
4243                 return r;
4244
4245         return 0;
4246 }
4247
4248 static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
4249 {
4250         uint32_t rlc_setting;
4251
4252         /* if RLC is not enabled, do nothing */
4253         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
4254         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
4255                 return false;
4256
4257         return true;
4258 }
4259
4260 static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
4261 {
4262         uint32_t data;
4263         unsigned i;
4264
4265         data = RLC_SAFE_MODE__CMD_MASK;
4266         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4267         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4268
4269         /* wait for RLC_SAFE_MODE */
4270         for (i = 0; i < adev->usec_timeout; i++) {
4271                 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
4272                         break;
4273                 udelay(1);
4274         }
4275 }
4276
4277 static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
4278 {
4279         uint32_t data;
4280
4281         data = RLC_SAFE_MODE__CMD_MASK;
4282         WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
4283 }
4284
4285 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
4286                                                 bool enable)
4287 {
4288         amdgpu_gfx_rlc_enter_safe_mode(adev);
4289
4290         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
4291                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
4292                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4293                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
4294         } else {
4295                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
4296                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
4297                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
4298         }
4299
4300         amdgpu_gfx_rlc_exit_safe_mode(adev);
4301 }
4302
4303 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
4304                                                 bool enable)
4305 {
4306         /* TODO: double check if we need to perform under safe mode */
4307         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
4308
4309         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
4310                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
4311         else
4312                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
4313
4314         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
4315                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
4316         else
4317                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
4318
4319         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
4320 }
4321
4322 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4323                                                       bool enable)
4324 {
4325         uint32_t data, def;
4326
4327         amdgpu_gfx_rlc_enter_safe_mode(adev);
4328
4329         /* It is disabled by HW by default */
4330         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
4331                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4332                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4333
4334                 if (adev->asic_type != CHIP_VEGA12)
4335                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4336
4337                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4338                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4339                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4340
4341                 /* only for Vega10 & Raven1 */
4342                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
4343
4344                 if (def != data)
4345                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4346
4347                 /* MGLS is a global flag to control all MGLS in GFX */
4348                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
4349                         /* 2 - RLC memory Light sleep */
4350                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
4351                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4352                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4353                                 if (def != data)
4354                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4355                         }
4356                         /* 3 - CP memory Light sleep */
4357                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
4358                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4359                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4360                                 if (def != data)
4361                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4362                         }
4363                 }
4364         } else {
4365                 /* 1 - MGCG_OVERRIDE */
4366                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4367
4368                 if (adev->asic_type != CHIP_VEGA12)
4369                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
4370
4371                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4372                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4373                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
4374                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
4375
4376                 if (def != data)
4377                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4378
4379                 /* 2 - disable MGLS in RLC */
4380                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4381                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
4382                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
4383                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
4384                 }
4385
4386                 /* 3 - disable MGLS in CP */
4387                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4388                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
4389                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
4390                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
4391                 }
4392         }
4393
4394         amdgpu_gfx_rlc_exit_safe_mode(adev);
4395 }
4396
4397 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
4398                                            bool enable)
4399 {
4400         uint32_t data, def;
4401
4402         if (adev->asic_type == CHIP_ARCTURUS)
4403                 return;
4404
4405         amdgpu_gfx_rlc_enter_safe_mode(adev);
4406
4407         /* Enable 3D CGCG/CGLS */
4408         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
4409                 /* write cmd to clear cgcg/cgls ov */
4410                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4411                 /* unset CGCG override */
4412                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4413                 /* update CGCG and CGLS override bits */
4414                 if (def != data)
4415                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4416
4417                 /* enable 3Dcgcg FSM(0x0000363f) */
4418                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4419
4420                 data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4421                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4422                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4423                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4424                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4425                 if (def != data)
4426                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4427
4428                 /* set IDLE_POLL_COUNT(0x00900100) */
4429                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4430                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4431                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4432                 if (def != data)
4433                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4434         } else {
4435                 /* Disable CGCG/CGLS */
4436                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4437                 /* disable cgcg, cgls should be disabled */
4438                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
4439                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
4440                 /* disable cgcg and cgls in FSM */
4441                 if (def != data)
4442                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
4443         }
4444
4445         amdgpu_gfx_rlc_exit_safe_mode(adev);
4446 }
4447
4448 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4449                                                       bool enable)
4450 {
4451         uint32_t def, data;
4452
4453         amdgpu_gfx_rlc_enter_safe_mode(adev);
4454
4455         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
4456                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4457                 /* unset CGCG override */
4458                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4459                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4460                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4461                 else
4462                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4463                 /* update CGCG and CGLS override bits */
4464                 if (def != data)
4465                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
4466
4467                 /* enable cgcg FSM(0x0000363F) */
4468                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4469
4470                 if (adev->asic_type == CHIP_ARCTURUS)
4471                         data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4472                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4473                 else
4474                         data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4475                                 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4476                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4477                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4478                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4479                 if (def != data)
4480                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4481
4482                 /* set IDLE_POLL_COUNT(0x00900100) */
4483                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
4484                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4485                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4486                 if (def != data)
4487                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
4488         } else {
4489                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4490                 /* reset CGCG/CGLS bits */
4491                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
4492                 /* disable cgcg and cgls in FSM */
4493                 if (def != data)
4494                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
4495         }
4496
4497         amdgpu_gfx_rlc_exit_safe_mode(adev);
4498 }
4499
4500 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4501                                             bool enable)
4502 {
4503         if (enable) {
4504                 /* CGCG/CGLS should be enabled after MGCG/MGLS
4505                  * ===  MGCG + MGLS ===
4506                  */
4507                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4508                 /* ===  CGCG /CGLS for GFX 3D Only === */
4509                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4510                 /* ===  CGCG + CGLS === */
4511                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4512         } else {
4513                 /* CGCG/CGLS should be disabled before MGCG/MGLS
4514                  * ===  CGCG + CGLS ===
4515                  */
4516                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
4517                 /* ===  CGCG /CGLS for GFX 3D Only === */
4518                 gfx_v9_0_update_3d_clock_gating(adev, enable);
4519                 /* ===  MGCG + MGLS === */
4520                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
4521         }
4522         return 0;
4523 }
4524
4525 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
4526         .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
4527         .set_safe_mode = gfx_v9_0_set_safe_mode,
4528         .unset_safe_mode = gfx_v9_0_unset_safe_mode,
4529         .init = gfx_v9_0_rlc_init,
4530         .get_csb_size = gfx_v9_0_get_csb_size,
4531         .get_csb_buffer = gfx_v9_0_get_csb_buffer,
4532         .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
4533         .resume = gfx_v9_0_rlc_resume,
4534         .stop = gfx_v9_0_rlc_stop,
4535         .reset = gfx_v9_0_rlc_reset,
4536         .start = gfx_v9_0_rlc_start
4537 };
4538
4539 static int gfx_v9_0_set_powergating_state(void *handle,
4540                                           enum amd_powergating_state state)
4541 {
4542         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4543         bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
4544
4545         switch (adev->asic_type) {
4546         case CHIP_RAVEN:
4547         case CHIP_RENOIR:
4548                 if (!enable) {
4549                         amdgpu_gfx_off_ctrl(adev, false);
4550                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4551                 }
4552                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
4553                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
4554                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
4555                 } else {
4556                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
4557                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
4558                 }
4559
4560                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
4561                         gfx_v9_0_enable_cp_power_gating(adev, true);
4562                 else
4563                         gfx_v9_0_enable_cp_power_gating(adev, false);
4564
4565                 /* update gfx cgpg state */
4566                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
4567
4568                 /* update mgcg state */
4569                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
4570
4571                 if (enable)
4572                         amdgpu_gfx_off_ctrl(adev, true);
4573                 break;
4574         case CHIP_VEGA12:
4575                 if (!enable) {
4576                         amdgpu_gfx_off_ctrl(adev, false);
4577                         cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
4578                 } else {
4579                         amdgpu_gfx_off_ctrl(adev, true);
4580                 }
4581                 break;
4582         default:
4583                 break;
4584         }
4585
4586         return 0;
4587 }
4588
4589 static int gfx_v9_0_set_clockgating_state(void *handle,
4590                                           enum amd_clockgating_state state)
4591 {
4592         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4593
4594         if (amdgpu_sriov_vf(adev))
4595                 return 0;
4596
4597         switch (adev->asic_type) {
4598         case CHIP_VEGA10:
4599         case CHIP_VEGA12:
4600         case CHIP_VEGA20:
4601         case CHIP_RAVEN:
4602         case CHIP_ARCTURUS:
4603         case CHIP_RENOIR:
4604                 gfx_v9_0_update_gfx_clock_gating(adev,
4605                                                  state == AMD_CG_STATE_GATE ? true : false);
4606                 break;
4607         default:
4608                 break;
4609         }
4610         return 0;
4611 }
4612
4613 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
4614 {
4615         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4616         int data;
4617
4618         if (amdgpu_sriov_vf(adev))
4619                 *flags = 0;
4620
4621         /* AMD_CG_SUPPORT_GFX_MGCG */
4622         data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
4623         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
4624                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
4625
4626         /* AMD_CG_SUPPORT_GFX_CGCG */
4627         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
4628         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
4629                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
4630
4631         /* AMD_CG_SUPPORT_GFX_CGLS */
4632         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
4633                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
4634
4635         /* AMD_CG_SUPPORT_GFX_RLC_LS */
4636         data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
4637         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
4638                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
4639
4640         /* AMD_CG_SUPPORT_GFX_CP_LS */
4641         data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
4642         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
4643                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
4644
4645         if (adev->asic_type != CHIP_ARCTURUS) {
4646                 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
4647                 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
4648                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
4649                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
4650
4651                 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
4652                 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
4653                         *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
4654         }
4655 }
4656
4657 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
4658 {
4659         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
4660 }
4661
4662 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
4663 {
4664         struct amdgpu_device *adev = ring->adev;
4665         u64 wptr;
4666
4667         /* XXX check if swapping is necessary on BE */
4668         if (ring->use_doorbell) {
4669                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
4670         } else {
4671                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
4672                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
4673         }
4674
4675         return wptr;
4676 }
4677
4678 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
4679 {
4680         struct amdgpu_device *adev = ring->adev;
4681
4682         if (ring->use_doorbell) {
4683                 /* XXX check if swapping is necessary on BE */
4684                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4685                 WDOORBELL64(ring->doorbell_index, ring->wptr);
4686         } else {
4687                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4688                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
4689         }
4690 }
4691
4692 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
4693 {
4694         struct amdgpu_device *adev = ring->adev;
4695         u32 ref_and_mask, reg_mem_engine;
4696         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
4697
4698         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4699                 switch (ring->me) {
4700                 case 1:
4701                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
4702                         break;
4703                 case 2:
4704                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
4705                         break;
4706                 default:
4707                         return;
4708                 }
4709                 reg_mem_engine = 0;
4710         } else {
4711                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
4712                 reg_mem_engine = 1; /* pfp */
4713         }
4714
4715         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
4716                               adev->nbio.funcs->get_hdp_flush_req_offset(adev),
4717                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
4718                               ref_and_mask, ref_and_mask, 0x20);
4719 }
4720
4721 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
4722                                         struct amdgpu_job *job,
4723                                         struct amdgpu_ib *ib,
4724                                         uint32_t flags)
4725 {
4726         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4727         u32 header, control = 0;
4728
4729         if (ib->flags & AMDGPU_IB_FLAG_CE)
4730                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
4731         else
4732                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4733
4734         control |= ib->length_dw | (vmid << 24);
4735
4736         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
4737                 control |= INDIRECT_BUFFER_PRE_ENB(1);
4738
4739                 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
4740                         gfx_v9_0_ring_emit_de_meta(ring);
4741         }
4742
4743         amdgpu_ring_write(ring, header);
4744         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4745         amdgpu_ring_write(ring,
4746 #ifdef __BIG_ENDIAN
4747                 (2 << 0) |
4748 #endif
4749                 lower_32_bits(ib->gpu_addr));
4750         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4751         amdgpu_ring_write(ring, control);
4752 }
4753
4754 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
4755                                           struct amdgpu_job *job,
4756                                           struct amdgpu_ib *ib,
4757                                           uint32_t flags)
4758 {
4759         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
4760         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
4761
4762         /* Currently, there is a high possibility to get wave ID mismatch
4763          * between ME and GDS, leading to a hw deadlock, because ME generates
4764          * different wave IDs than the GDS expects. This situation happens
4765          * randomly when at least 5 compute pipes use GDS ordered append.
4766          * The wave IDs generated by ME are also wrong after suspend/resume.
4767          * Those are probably bugs somewhere else in the kernel driver.
4768          *
4769          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
4770          * GDS to 0 for this ring (me/pipe).
4771          */
4772         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
4773                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
4774                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
4775                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
4776         }
4777
4778         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
4779         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
4780         amdgpu_ring_write(ring,
4781 #ifdef __BIG_ENDIAN
4782                                 (2 << 0) |
4783 #endif
4784                                 lower_32_bits(ib->gpu_addr));
4785         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
4786         amdgpu_ring_write(ring, control);
4787 }
4788
4789 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
4790                                      u64 seq, unsigned flags)
4791 {
4792         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
4793         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
4794         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
4795
4796         /* RELEASE_MEM - flush caches, send int */
4797         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
4798         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
4799                                                EOP_TC_NC_ACTION_EN) :
4800                                               (EOP_TCL1_ACTION_EN |
4801                                                EOP_TC_ACTION_EN |
4802                                                EOP_TC_WB_ACTION_EN |
4803                                                EOP_TC_MD_ACTION_EN)) |
4804                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
4805                                  EVENT_INDEX(5)));
4806         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
4807
4808         /*
4809          * the address should be Qword aligned if 64bit write, Dword
4810          * aligned if only send 32bit data low (discard data high)
4811          */
4812         if (write64bit)
4813                 BUG_ON(addr & 0x7);
4814         else
4815                 BUG_ON(addr & 0x3);
4816         amdgpu_ring_write(ring, lower_32_bits(addr));
4817         amdgpu_ring_write(ring, upper_32_bits(addr));
4818         amdgpu_ring_write(ring, lower_32_bits(seq));
4819         amdgpu_ring_write(ring, upper_32_bits(seq));
4820         amdgpu_ring_write(ring, 0);
4821 }
4822
4823 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
4824 {
4825         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4826         uint32_t seq = ring->fence_drv.sync_seq;
4827         uint64_t addr = ring->fence_drv.gpu_addr;
4828
4829         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
4830                               lower_32_bits(addr), upper_32_bits(addr),
4831                               seq, 0xffffffff, 4);
4832 }
4833
4834 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4835                                         unsigned vmid, uint64_t pd_addr)
4836 {
4837         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4838
4839         /* compute doesn't have PFP */
4840         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4841                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4842                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4843                 amdgpu_ring_write(ring, 0x0);
4844         }
4845 }
4846
4847 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4848 {
4849         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4850 }
4851
4852 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4853 {
4854         u64 wptr;
4855
4856         /* XXX check if swapping is necessary on BE */
4857         if (ring->use_doorbell)
4858                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4859         else
4860                 BUG();
4861         return wptr;
4862 }
4863
4864 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4865                                            bool acquire)
4866 {
4867         struct amdgpu_device *adev = ring->adev;
4868         int pipe_num, tmp, reg;
4869         int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4870
4871         pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4872
4873         /* first me only has 2 entries, GFX and HP3D */
4874         if (ring->me > 0)
4875                 pipe_num -= 2;
4876
4877         reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4878         tmp = RREG32(reg);
4879         tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4880         WREG32(reg, tmp);
4881 }
4882
4883 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4884                                             struct amdgpu_ring *ring,
4885                                             bool acquire)
4886 {
4887         int i, pipe;
4888         bool reserve;
4889         struct amdgpu_ring *iring;
4890
4891         mutex_lock(&adev->gfx.pipe_reserve_mutex);
4892         pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
4893         if (acquire)
4894                 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4895         else
4896                 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4897
4898         if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4899                 /* Clear all reservations - everyone reacquires all resources */
4900                 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4901                         gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4902                                                        true);
4903
4904                 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4905                         gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4906                                                        true);
4907         } else {
4908                 /* Lower all pipes without a current reservation */
4909                 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4910                         iring = &adev->gfx.gfx_ring[i];
4911                         pipe = amdgpu_gfx_mec_queue_to_bit(adev,
4912                                                            iring->me,
4913                                                            iring->pipe,
4914                                                            0);
4915                         reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4916                         gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4917                 }
4918
4919                 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4920                         iring = &adev->gfx.compute_ring[i];
4921                         pipe = amdgpu_gfx_mec_queue_to_bit(adev,
4922                                                            iring->me,
4923                                                            iring->pipe,
4924                                                            0);
4925                         reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4926                         gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4927                 }
4928         }
4929
4930         mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4931 }
4932
4933 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4934                                       struct amdgpu_ring *ring,
4935                                       bool acquire)
4936 {
4937         uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4938         uint32_t queue_priority = acquire ? 0xf : 0x0;
4939
4940         mutex_lock(&adev->srbm_mutex);
4941         soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4942
4943         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4944         WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4945
4946         soc15_grbm_select(adev, 0, 0, 0, 0);
4947         mutex_unlock(&adev->srbm_mutex);
4948 }
4949
4950 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4951                                                enum drm_sched_priority priority)
4952 {
4953         struct amdgpu_device *adev = ring->adev;
4954         bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4955
4956         if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4957                 return;
4958
4959         gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4960         gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4961 }
4962
4963 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4964 {
4965         struct amdgpu_device *adev = ring->adev;
4966
4967         /* XXX check if swapping is necessary on BE */
4968         if (ring->use_doorbell) {
4969                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4970                 WDOORBELL64(ring->doorbell_index, ring->wptr);
4971         } else{
4972                 BUG(); /* only DOORBELL method supported on gfx9 now */
4973         }
4974 }
4975
4976 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4977                                          u64 seq, unsigned int flags)
4978 {
4979         struct amdgpu_device *adev = ring->adev;
4980
4981         /* we only allocate 32bit for each seq wb address */
4982         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4983
4984         /* write fence seq to the "addr" */
4985         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4986         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4987                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4988         amdgpu_ring_write(ring, lower_32_bits(addr));
4989         amdgpu_ring_write(ring, upper_32_bits(addr));
4990         amdgpu_ring_write(ring, lower_32_bits(seq));
4991
4992         if (flags & AMDGPU_FENCE_FLAG_INT) {
4993                 /* set register to trigger INT */
4994                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4995                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4996                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4997                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4998                 amdgpu_ring_write(ring, 0);
4999                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5000         }
5001 }
5002
5003 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
5004 {
5005         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
5006         amdgpu_ring_write(ring, 0);
5007 }
5008
5009 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
5010 {
5011         struct v9_ce_ib_state ce_payload = {0};
5012         uint64_t csa_addr;
5013         int cnt;
5014
5015         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
5016         csa_addr = amdgpu_csa_vaddr(ring->adev);
5017
5018         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5019         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
5020                                  WRITE_DATA_DST_SEL(8) |
5021                                  WR_CONFIRM) |
5022                                  WRITE_DATA_CACHE_POLICY(0));
5023         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5024         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
5025         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
5026 }
5027
5028 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
5029 {
5030         struct v9_de_ib_state de_payload = {0};
5031         uint64_t csa_addr, gds_addr;
5032         int cnt;
5033
5034         csa_addr = amdgpu_csa_vaddr(ring->adev);
5035         gds_addr = csa_addr + 4096;
5036         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5037         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5038
5039         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5040         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5041         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5042                                  WRITE_DATA_DST_SEL(8) |
5043                                  WR_CONFIRM) |
5044                                  WRITE_DATA_CACHE_POLICY(0));
5045         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5046         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
5047         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
5048 }
5049
5050 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
5051 {
5052         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5053         amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
5054 }
5055
5056 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
5057 {
5058         uint32_t dw2 = 0;
5059
5060         if (amdgpu_sriov_vf(ring->adev))
5061                 gfx_v9_0_ring_emit_ce_meta(ring);
5062
5063         gfx_v9_0_ring_emit_tmz(ring, true);
5064
5065         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5066         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5067                 /* set load_global_config & load_global_uconfig */
5068                 dw2 |= 0x8001;
5069                 /* set load_cs_sh_regs */
5070                 dw2 |= 0x01000000;
5071                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5072                 dw2 |= 0x10002;
5073
5074                 /* set load_ce_ram if preamble presented */
5075                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
5076                         dw2 |= 0x10000000;
5077         } else {
5078                 /* still load_ce_ram if this is the first time preamble presented
5079                  * although there is no context switch happens.
5080                  */
5081                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
5082                         dw2 |= 0x10000000;
5083         }
5084
5085         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5086         amdgpu_ring_write(ring, dw2);
5087         amdgpu_ring_write(ring, 0);
5088 }
5089
5090 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5091 {
5092         unsigned ret;
5093         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5094         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5095         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5096         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5097         ret = ring->wptr & ring->buf_mask;
5098         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5099         return ret;
5100 }
5101
5102 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5103 {
5104         unsigned cur;
5105         BUG_ON(offset > ring->buf_mask);
5106         BUG_ON(ring->ring[offset] != 0x55aa55aa);
5107
5108         cur = (ring->wptr & ring->buf_mask) - 1;
5109         if (likely(cur > offset))
5110                 ring->ring[offset] = cur - offset;
5111         else
5112                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
5113 }
5114
5115 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
5116 {
5117         struct amdgpu_device *adev = ring->adev;
5118
5119         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5120         amdgpu_ring_write(ring, 0 |     /* src: register*/
5121                                 (5 << 8) |      /* dst: memory */
5122                                 (1 << 20));     /* write confirm */
5123         amdgpu_ring_write(ring, reg);
5124         amdgpu_ring_write(ring, 0);
5125         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5126                                 adev->virt.reg_val_offs * 4));
5127         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5128                                 adev->virt.reg_val_offs * 4));
5129 }
5130
5131 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5132                                     uint32_t val)
5133 {
5134         uint32_t cmd = 0;
5135
5136         switch (ring->funcs->type) {
5137         case AMDGPU_RING_TYPE_GFX:
5138                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5139                 break;
5140         case AMDGPU_RING_TYPE_KIQ:
5141                 cmd = (1 << 16); /* no inc addr */
5142                 break;
5143         default:
5144                 cmd = WR_CONFIRM;
5145                 break;
5146         }
5147         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5148         amdgpu_ring_write(ring, cmd);
5149         amdgpu_ring_write(ring, reg);
5150         amdgpu_ring_write(ring, 0);
5151         amdgpu_ring_write(ring, val);
5152 }
5153
5154 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5155                                         uint32_t val, uint32_t mask)
5156 {
5157         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5158 }
5159
5160 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5161                                                   uint32_t reg0, uint32_t reg1,
5162                                                   uint32_t ref, uint32_t mask)
5163 {
5164         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5165         struct amdgpu_device *adev = ring->adev;
5166         bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
5167                 adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;
5168
5169         if (fw_version_ok)
5170                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5171                                       ref, mask, 0x20);
5172         else
5173                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
5174                                                            ref, mask);
5175 }
5176
5177 static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
5178 {
5179         struct amdgpu_device *adev = ring->adev;
5180         uint32_t value = 0;
5181
5182         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5183         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5184         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5185         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5186         WREG32_SOC15(GC, 0, mmSQ_CMD, value);
5187 }
5188
5189 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5190                                                  enum amdgpu_interrupt_state state)
5191 {
5192         switch (state) {
5193         case AMDGPU_IRQ_STATE_DISABLE:
5194         case AMDGPU_IRQ_STATE_ENABLE:
5195                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5196                                TIME_STAMP_INT_ENABLE,
5197                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5198                 break;
5199         default:
5200                 break;
5201         }
5202 }
5203
5204 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5205                                                      int me, int pipe,
5206                                                      enum amdgpu_interrupt_state state)
5207 {
5208         u32 mec_int_cntl, mec_int_cntl_reg;
5209
5210         /*
5211          * amdgpu controls only the first MEC. That's why this function only
5212          * handles the setting of interrupts for this specific MEC. All other
5213          * pipes' interrupts are set by amdkfd.
5214          */
5215
5216         if (me == 1) {
5217                 switch (pipe) {
5218                 case 0:
5219                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
5220                         break;
5221                 case 1:
5222                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
5223                         break;
5224                 case 2:
5225                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
5226                         break;
5227                 case 3:
5228                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
5229                         break;
5230                 default:
5231                         DRM_DEBUG("invalid pipe %d\n", pipe);
5232                         return;
5233                 }
5234         } else {
5235                 DRM_DEBUG("invalid me %d\n", me);
5236                 return;
5237         }
5238
5239         switch (state) {
5240         case AMDGPU_IRQ_STATE_DISABLE:
5241                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5242                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5243                                              TIME_STAMP_INT_ENABLE, 0);
5244                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5245                 break;
5246         case AMDGPU_IRQ_STATE_ENABLE:
5247                 mec_int_cntl = RREG32(mec_int_cntl_reg);
5248                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5249                                              TIME_STAMP_INT_ENABLE, 1);
5250                 WREG32(mec_int_cntl_reg, mec_int_cntl);
5251                 break;
5252         default:
5253                 break;
5254         }
5255 }
5256
5257 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5258                                              struct amdgpu_irq_src *source,
5259                                              unsigned type,
5260                                              enum amdgpu_interrupt_state state)
5261 {
5262         switch (state) {
5263         case AMDGPU_IRQ_STATE_DISABLE:
5264         case AMDGPU_IRQ_STATE_ENABLE:
5265                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5266                                PRIV_REG_INT_ENABLE,
5267                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5268                 break;
5269         default:
5270                 break;
5271         }
5272
5273         return 0;
5274 }
5275
5276 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5277                                               struct amdgpu_irq_src *source,
5278                                               unsigned type,
5279                                               enum amdgpu_interrupt_state state)
5280 {
5281         switch (state) {
5282         case AMDGPU_IRQ_STATE_DISABLE:
5283         case AMDGPU_IRQ_STATE_ENABLE:
5284                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5285                                PRIV_INSTR_INT_ENABLE,
5286                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5287         default:
5288                 break;
5289         }
5290
5291         return 0;
5292 }
5293
5294 #define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
5295         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5296                         CP_ECC_ERROR_INT_ENABLE, 1)
5297
5298 #define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
5299         WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
5300                         CP_ECC_ERROR_INT_ENABLE, 0)
5301
5302 static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
5303                                               struct amdgpu_irq_src *source,
5304                                               unsigned type,
5305                                               enum amdgpu_interrupt_state state)
5306 {
5307         switch (state) {
5308         case AMDGPU_IRQ_STATE_DISABLE:
5309                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5310                                 CP_ECC_ERROR_INT_ENABLE, 0);
5311                 DISABLE_ECC_ON_ME_PIPE(1, 0);
5312                 DISABLE_ECC_ON_ME_PIPE(1, 1);
5313                 DISABLE_ECC_ON_ME_PIPE(1, 2);
5314                 DISABLE_ECC_ON_ME_PIPE(1, 3);
5315                 break;
5316
5317         case AMDGPU_IRQ_STATE_ENABLE:
5318                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
5319                                 CP_ECC_ERROR_INT_ENABLE, 1);
5320                 ENABLE_ECC_ON_ME_PIPE(1, 0);
5321                 ENABLE_ECC_ON_ME_PIPE(1, 1);
5322                 ENABLE_ECC_ON_ME_PIPE(1, 2);
5323                 ENABLE_ECC_ON_ME_PIPE(1, 3);
5324                 break;
5325         default:
5326                 break;
5327         }
5328
5329         return 0;
5330 }
5331
5332
5333 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5334                                             struct amdgpu_irq_src *src,
5335                                             unsigned type,
5336                                             enum amdgpu_interrupt_state state)
5337 {
5338         switch (type) {
5339         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5340                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
5341                 break;
5342         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5343                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5344                 break;
5345         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5346                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5347                 break;
5348         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5349                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5350                 break;
5351         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5352                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5353                 break;
5354         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
5355                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
5356                 break;
5357         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
5358                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
5359                 break;
5360         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
5361                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
5362                 break;
5363         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
5364                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
5365                 break;
5366         default:
5367                 break;
5368         }
5369         return 0;
5370 }
5371
5372 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
5373                             struct amdgpu_irq_src *source,
5374                             struct amdgpu_iv_entry *entry)
5375 {
5376         int i;
5377         u8 me_id, pipe_id, queue_id;
5378         struct amdgpu_ring *ring;
5379
5380         DRM_DEBUG("IH: CP EOP\n");
5381         me_id = (entry->ring_id & 0x0c) >> 2;
5382         pipe_id = (entry->ring_id & 0x03) >> 0;
5383         queue_id = (entry->ring_id & 0x70) >> 4;
5384
5385         switch (me_id) {
5386         case 0:
5387                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5388                 break;
5389         case 1:
5390         case 2:
5391                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5392                         ring = &adev->gfx.compute_ring[i];
5393                         /* Per-queue interrupt is supported for MEC starting from VI.
5394                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
5395                           */
5396                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
5397                                 amdgpu_fence_process(ring);
5398                 }
5399                 break;
5400         }
5401         return 0;
5402 }
5403
5404 static void gfx_v9_0_fault(struct amdgpu_device *adev,
5405                            struct amdgpu_iv_entry *entry)
5406 {
5407         u8 me_id, pipe_id, queue_id;
5408         struct amdgpu_ring *ring;
5409         int i;
5410
5411         me_id = (entry->ring_id & 0x0c) >> 2;
5412         pipe_id = (entry->ring_id & 0x03) >> 0;
5413         queue_id = (entry->ring_id & 0x70) >> 4;
5414
5415         switch (me_id) {
5416         case 0:
5417                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
5418                 break;
5419         case 1:
5420         case 2:
5421                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5422                         ring = &adev->gfx.compute_ring[i];
5423                         if (ring->me == me_id && ring->pipe == pipe_id &&
5424                             ring->queue == queue_id)
5425                                 drm_sched_fault(&ring->sched);
5426                 }
5427                 break;
5428         }
5429 }
5430
5431 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
5432                                  struct amdgpu_irq_src *source,
5433                                  struct amdgpu_iv_entry *entry)
5434 {
5435         DRM_ERROR("Illegal register access in command stream\n");
5436         gfx_v9_0_fault(adev, entry);
5437         return 0;
5438 }
5439
5440 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
5441                                   struct amdgpu_irq_src *source,
5442                                   struct amdgpu_iv_entry *entry)
5443 {
5444         DRM_ERROR("Illegal instruction in command stream\n");
5445         gfx_v9_0_fault(adev, entry);
5446         return 0;
5447 }
5448
5449
5450 static const struct ras_gfx_subblock_reg ras_subblock_regs[] = {
5451         { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
5452           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
5453           SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
5454         },
5455         { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
5456           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
5457           SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
5458         },
5459         { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5460           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
5461           0, 0
5462         },
5463         { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
5464           SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
5465           0, 0
5466         },
5467         { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
5468           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
5469           SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
5470         },
5471         { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5472           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
5473           0, 0
5474         },
5475         { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
5476           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
5477           SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
5478         },
5479         { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
5480           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
5481           SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
5482         },
5483         { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
5484           SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
5485           0, 0
5486         },
5487         { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
5488           SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
5489           0, 0
5490         },
5491         { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
5492           SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
5493           0, 0
5494         },
5495         { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5496           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
5497           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
5498         },
5499         { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
5500           SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
5501           0, 0
5502         },
5503         { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5504           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
5505           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
5506         },
5507         { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
5508           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5509           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
5510           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
5511         },
5512         { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
5513           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
5514           SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
5515           0, 0
5516         },
5517         { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
5518           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5519           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
5520           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
5521         },
5522         { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
5523           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5524           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
5525           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
5526         },
5527         { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
5528           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5529           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
5530           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
5531         },
5532         { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
5533           SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
5534           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
5535           SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
5536         },
5537         { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
5538           SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
5539           0, 0
5540         },
5541         { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5542           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
5543           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
5544         },
5545         { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5546           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
5547           0, 0
5548         },
5549         { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5550           SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
5551           0, 0
5552         },
5553         { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5554           SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
5555           0, 0
5556         },
5557         { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
5558           SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
5559           0, 0
5560         },
5561         { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5562           SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
5563           0, 0
5564         },
5565         { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
5566           SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
5567           0, 0
5568         },
5569         { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5570           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
5571           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
5572         },
5573         { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5574           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
5575           SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
5576         },
5577         { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5578           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
5579           SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
5580         },
5581         { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5582           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
5583           SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
5584         },
5585         { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5586           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
5587           SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
5588         },
5589         { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5590           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
5591           0, 0
5592         },
5593         { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5594           SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
5595           0, 0
5596         },
5597         { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5598           SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
5599           0, 0
5600         },
5601         { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5602           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
5603           0, 0
5604         },
5605         { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5606           SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
5607           0, 0
5608         },
5609         { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
5610           SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
5611           0, 0
5612         },
5613         { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5614           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
5615           0, 0
5616         },
5617         { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5618           SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
5619           0, 0
5620         },
5621         { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5622           SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
5623           0, 0
5624         },
5625         { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5626           SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
5627           0, 0
5628         },
5629         { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5630           SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
5631           0, 0
5632         },
5633         { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5634           SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
5635           0, 0
5636         },
5637         { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
5638           SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
5639           0, 0
5640         },
5641         { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
5642           SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
5643           0, 0
5644         },
5645         { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5646           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
5647           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
5648         },
5649         { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5650           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
5651           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
5652         },
5653         { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5654           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
5655           0, 0
5656         },
5657         { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5658           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
5659           0, 0
5660         },
5661         { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5662           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
5663           0, 0
5664         },
5665         { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5666           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
5667           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
5668         },
5669         { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
5670           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
5671           SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
5672         },
5673         { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
5674           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
5675           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
5676         },
5677         { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
5678           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
5679           SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
5680         },
5681         { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
5682           SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
5683           0, 0
5684         },
5685         { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5686           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
5687           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
5688         },
5689         { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5690           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
5691           SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
5692         },
5693         { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5694           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
5695           SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
5696         },
5697         { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5698           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
5699           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
5700         },
5701         { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5702           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
5703           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
5704         },
5705         { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5706           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
5707           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
5708         },
5709         { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
5710           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
5711           SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
5712         },
5713         { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5714           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
5715           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
5716         },
5717         { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5718           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
5719           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
5720         },
5721         { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5722           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
5723           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
5724         },
5725         { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5726           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
5727           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
5728         },
5729         { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5730           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
5731           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
5732         },
5733         { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
5734           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
5735           SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
5736         },
5737         { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5738           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
5739           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
5740         },
5741         { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5742           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
5743           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
5744         },
5745         { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5746           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
5747           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
5748         },
5749         { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5750           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
5751           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
5752         },
5753         { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5754           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
5755           0, 0
5756         },
5757         { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5758           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
5759           0, 0
5760         },
5761         { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5762           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
5763           0, 0
5764         },
5765         { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5766           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
5767           0, 0
5768         },
5769         { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5770           SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
5771           0, 0
5772         },
5773         { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
5774           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
5775           SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
5776         },
5777         { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5778           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
5779           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
5780         },
5781         { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5782           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
5783           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
5784         },
5785         { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5786           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
5787           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
5788         },
5789         { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5790           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
5791           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
5792         },
5793         { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5794           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
5795           0, 0
5796         },
5797         { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5798           SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
5799           0, 0
5800         },
5801         { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5802           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
5803           0, 0
5804         },
5805         { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5806           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
5807           0, 0
5808         },
5809         { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
5810           SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
5811           0, 0
5812         },
5813         { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5814           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
5815           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
5816         },
5817         { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5818           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
5819           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
5820         },
5821         { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5822           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
5823           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
5824         },
5825         { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5826           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
5827           SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
5828         },
5829         { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5830           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
5831           SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
5832         },
5833         { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5834           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
5835           0, 0
5836         },
5837         { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5838           SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
5839           0, 0
5840         },
5841         { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5842           SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
5843           0, 0
5844         },
5845         { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5846           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
5847           0, 0
5848         },
5849         { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
5850           SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
5851           0, 0
5852         },
5853         { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5854           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
5855           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
5856         },
5857         { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5858           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
5859           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
5860         },
5861         { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5862           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
5863           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
5864         },
5865         { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5866           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
5867           0, 0
5868         },
5869         { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5870           SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
5871           0, 0
5872         },
5873         { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5874           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
5875           0, 0
5876         },
5877         { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5878           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
5879           0, 0
5880         },
5881         { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5882           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
5883           0, 0
5884         },
5885         { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
5886           SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
5887           0, 0
5888         }
5889 };
5890
5891 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
5892                                      void *inject_if)
5893 {
5894         struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
5895         int ret;
5896         struct ta_ras_trigger_error_input block_info = { 0 };
5897
5898         if (adev->asic_type != CHIP_VEGA20)
5899                 return -EINVAL;
5900
5901         if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
5902                 return -EINVAL;
5903
5904         if (!ras_gfx_subblocks[info->head.sub_block_index].name)
5905                 return -EPERM;
5906
5907         if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
5908               info->head.type)) {
5909                 DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
5910                         ras_gfx_subblocks[info->head.sub_block_index].name,
5911                         info->head.type);
5912                 return -EPERM;
5913         }
5914
5915         if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
5916               info->head.type)) {
5917                 DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
5918                         ras_gfx_subblocks[info->head.sub_block_index].name,
5919                         info->head.type);
5920                 return -EPERM;
5921         }
5922
5923         block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
5924         block_info.sub_block_index =
5925                 ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
5926         block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
5927         block_info.address = info->address;
5928         block_info.value = info->value;
5929
5930         mutex_lock(&adev->grbm_idx_mutex);
5931         ret = psp_ras_trigger_error(&adev->psp, &block_info);
5932         mutex_unlock(&adev->grbm_idx_mutex);
5933
5934         return ret;
5935 }
5936
5937 static const char *vml2_mems[] = {
5938         "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
5939         "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
5940         "UTC_VML2_BANK_CACHE_0_4K_MEM0",
5941         "UTC_VML2_BANK_CACHE_0_4K_MEM1",
5942         "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
5943         "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
5944         "UTC_VML2_BANK_CACHE_1_4K_MEM0",
5945         "UTC_VML2_BANK_CACHE_1_4K_MEM1",
5946         "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
5947         "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
5948         "UTC_VML2_BANK_CACHE_2_4K_MEM0",
5949         "UTC_VML2_BANK_CACHE_2_4K_MEM1",
5950         "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
5951         "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
5952         "UTC_VML2_BANK_CACHE_3_4K_MEM0",
5953         "UTC_VML2_BANK_CACHE_3_4K_MEM1",
5954 };
5955
5956 static const char *vml2_walker_mems[] = {
5957         "UTC_VML2_CACHE_PDE0_MEM0",
5958         "UTC_VML2_CACHE_PDE0_MEM1",
5959         "UTC_VML2_CACHE_PDE1_MEM0",
5960         "UTC_VML2_CACHE_PDE1_MEM1",
5961         "UTC_VML2_CACHE_PDE2_MEM0",
5962         "UTC_VML2_CACHE_PDE2_MEM1",
5963         "UTC_VML2_RDIF_LOG_FIFO",
5964 };
5965
5966 static const char *atc_l2_cache_2m_mems[] = {
5967         "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
5968         "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
5969         "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
5970         "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
5971 };
5972
5973 static const char *atc_l2_cache_4k_mems[] = {
5974         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
5975         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
5976         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
5977         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
5978         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
5979         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
5980         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
5981         "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
5982         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
5983         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
5984         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
5985         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
5986         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
5987         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
5988         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
5989         "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
5990         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
5991         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
5992         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
5993         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
5994         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
5995         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
5996         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
5997         "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
5998         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
5999         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
6000         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
6001         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
6002         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
6003         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
6004         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
6005         "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
6006 };
6007
6008 static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
6009                                          struct ras_err_data *err_data)
6010 {
6011         uint32_t i, data;
6012         uint32_t sec_count, ded_count;
6013
6014         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6015         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
6016         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6017         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
6018         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6019         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
6020         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6021         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);
6022
6023         for (i = 0; i < 16; i++) {
6024                 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
6025                 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
6026
6027                 sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
6028                 if (sec_count) {
6029                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6030                                  vml2_mems[i], sec_count);
6031                         err_data->ce_count += sec_count;
6032                 }
6033
6034                 ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
6035                 if (ded_count) {
6036                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6037                                  vml2_mems[i], ded_count);
6038                         err_data->ue_count += ded_count;
6039                 }
6040         }
6041
6042         for (i = 0; i < 7; i++) {
6043                 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
6044                 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
6045
6046                 sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6047                                                 SEC_COUNT);
6048                 if (sec_count) {
6049                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6050                                  vml2_walker_mems[i], sec_count);
6051                         err_data->ce_count += sec_count;
6052                 }
6053
6054                 ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
6055                                                 DED_COUNT);
6056                 if (ded_count) {
6057                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6058                                  vml2_walker_mems[i], ded_count);
6059                         err_data->ue_count += ded_count;
6060                 }
6061         }
6062
6063         for (i = 0; i < 4; i++) {
6064                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
6065                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
6066
6067                 sec_count = (data & 0x00006000L) >> 0xd;
6068                 if (sec_count) {
6069                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6070                                  atc_l2_cache_2m_mems[i], sec_count);
6071                         err_data->ce_count += sec_count;
6072                 }
6073         }
6074
6075         for (i = 0; i < 32; i++) {
6076                 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
6077                 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
6078
6079                 sec_count = (data & 0x00006000L) >> 0xd;
6080                 if (sec_count) {
6081                         DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
6082                                  atc_l2_cache_4k_mems[i], sec_count);
6083                         err_data->ce_count += sec_count;
6084                 }
6085
6086                 ded_count = (data & 0x00018000L) >> 0xf;
6087                 if (ded_count) {
6088                         DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
6089                                  atc_l2_cache_4k_mems[i], ded_count);
6090                         err_data->ue_count += ded_count;
6091                 }
6092         }
6093
6094         WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
6095         WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
6096         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
6097         WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
6098
6099         return 0;
6100 }
6101
6102 static int __get_ras_error_count(const struct soc15_reg_entry *reg,
6103         uint32_t se_id, uint32_t inst_id, uint32_t value,
6104         uint32_t *sec_count, uint32_t *ded_count)
6105 {
6106         uint32_t i;
6107         uint32_t sec_cnt, ded_cnt;
6108
6109         for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) {
6110                 if(ras_subblock_regs[i].reg_offset != reg->reg_offset ||
6111                         ras_subblock_regs[i].seg != reg->seg ||
6112                         ras_subblock_regs[i].inst != reg->inst)
6113                         continue;
6114
6115                 sec_cnt = (value &
6116                                 ras_subblock_regs[i].sec_count_mask) >>
6117                                 ras_subblock_regs[i].sec_count_shift;
6118                 if (sec_cnt) {
6119                         DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
6120                                 ras_subblock_regs[i].name,
6121                                 se_id, inst_id,
6122                                 sec_cnt);
6123                         *sec_count += sec_cnt;
6124                 }
6125
6126                 ded_cnt = (value &
6127                                 ras_subblock_regs[i].ded_count_mask) >>
6128                                 ras_subblock_regs[i].ded_count_shift;
6129                 if (ded_cnt) {
6130                         DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
6131                                 ras_subblock_regs[i].name,
6132                                 se_id, inst_id,
6133                                 ded_cnt);
6134                         *ded_count += ded_cnt;
6135                 }
6136         }
6137
6138         return 0;
6139 }
6140
6141 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
6142                                           void *ras_error_status)
6143 {
6144         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
6145         uint32_t sec_count = 0, ded_count = 0;
6146         uint32_t i, j, k;
6147         uint32_t reg_value;
6148
6149         if (adev->asic_type != CHIP_VEGA20)
6150                 return -EINVAL;
6151
6152         err_data->ue_count = 0;
6153         err_data->ce_count = 0;
6154
6155         mutex_lock(&adev->grbm_idx_mutex);
6156
6157         for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) {
6158                 for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) {
6159                         for (k = 0; k < sec_ded_counter_registers[i].instance; k++) {
6160                                 gfx_v9_0_select_se_sh(adev, j, 0, k);
6161                                 reg_value =
6162                                         RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i]));
6163                                 if (reg_value)
6164                                         __get_ras_error_count(&sec_ded_counter_registers[i],
6165                                                         j, k, reg_value,
6166                                                         &sec_count, &ded_count);
6167                         }
6168                 }
6169         }
6170
6171         err_data->ce_count += sec_count;
6172         err_data->ue_count += ded_count;
6173
6174         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6175         mutex_unlock(&adev->grbm_idx_mutex);
6176
6177         gfx_v9_0_query_utc_edc_status(adev, err_data);
6178
6179         return 0;
6180 }
6181
6182 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
6183         .name = "gfx_v9_0",
6184         .early_init = gfx_v9_0_early_init,
6185         .late_init = gfx_v9_0_late_init,
6186         .sw_init = gfx_v9_0_sw_init,
6187         .sw_fini = gfx_v9_0_sw_fini,
6188         .hw_init = gfx_v9_0_hw_init,
6189         .hw_fini = gfx_v9_0_hw_fini,
6190         .suspend = gfx_v9_0_suspend,
6191         .resume = gfx_v9_0_resume,
6192         .is_idle = gfx_v9_0_is_idle,
6193         .wait_for_idle = gfx_v9_0_wait_for_idle,
6194         .soft_reset = gfx_v9_0_soft_reset,
6195         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
6196         .set_powergating_state = gfx_v9_0_set_powergating_state,
6197         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
6198 };
6199
6200 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
6201         .type = AMDGPU_RING_TYPE_GFX,
6202         .align_mask = 0xff,
6203         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6204         .support_64bit_ptrs = true,
6205         .vmhub = AMDGPU_GFXHUB_0,
6206         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
6207         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
6208         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
6209         .emit_frame_size = /* totally 242 maximum if 16 IBs */
6210                 5 +  /* COND_EXEC */
6211                 7 +  /* PIPELINE_SYNC */
6212                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6213                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6214                 2 + /* VM_FLUSH */
6215                 8 +  /* FENCE for VM_FLUSH */
6216                 20 + /* GDS switch */
6217                 4 + /* double SWITCH_BUFFER,
6218                        the first COND_EXEC jump to the place just
6219                            prior to this double SWITCH_BUFFER  */
6220                 5 + /* COND_EXEC */
6221                 7 +      /*     HDP_flush */
6222                 4 +      /*     VGT_flush */
6223                 14 + /* CE_META */
6224                 31 + /* DE_META */
6225                 3 + /* CNTX_CTRL */
6226                 5 + /* HDP_INVL */
6227                 8 + 8 + /* FENCE x2 */
6228                 2, /* SWITCH_BUFFER */
6229         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
6230         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
6231         .emit_fence = gfx_v9_0_ring_emit_fence,
6232         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6233         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6234         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6235         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6236         .test_ring = gfx_v9_0_ring_test_ring,
6237         .test_ib = gfx_v9_0_ring_test_ib,
6238         .insert_nop = amdgpu_ring_insert_nop,
6239         .pad_ib = amdgpu_ring_generic_pad_ib,
6240         .emit_switch_buffer = gfx_v9_ring_emit_sb,
6241         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
6242         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
6243         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
6244         .emit_tmz = gfx_v9_0_ring_emit_tmz,
6245         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6246         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6247         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6248         .soft_recovery = gfx_v9_0_ring_soft_recovery,
6249 };
6250
6251 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
6252         .type = AMDGPU_RING_TYPE_COMPUTE,
6253         .align_mask = 0xff,
6254         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6255         .support_64bit_ptrs = true,
6256         .vmhub = AMDGPU_GFXHUB_0,
6257         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6258         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6259         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6260         .emit_frame_size =
6261                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6262                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6263                 5 + /* hdp invalidate */
6264                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6265                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6266                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6267                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6268                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
6269         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6270         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
6271         .emit_fence = gfx_v9_0_ring_emit_fence,
6272         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
6273         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
6274         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
6275         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
6276         .test_ring = gfx_v9_0_ring_test_ring,
6277         .test_ib = gfx_v9_0_ring_test_ib,
6278         .insert_nop = amdgpu_ring_insert_nop,
6279         .pad_ib = amdgpu_ring_generic_pad_ib,
6280         .set_priority = gfx_v9_0_ring_set_priority_compute,
6281         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6282         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6283         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6284 };
6285
6286 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
6287         .type = AMDGPU_RING_TYPE_KIQ,
6288         .align_mask = 0xff,
6289         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6290         .support_64bit_ptrs = true,
6291         .vmhub = AMDGPU_GFXHUB_0,
6292         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
6293         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
6294         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
6295         .emit_frame_size =
6296                 20 + /* gfx_v9_0_ring_emit_gds_switch */
6297                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
6298                 5 + /* hdp invalidate */
6299                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
6300                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6301                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6302                 2 + /* gfx_v9_0_ring_emit_vm_flush */
6303                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6304         .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
6305         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
6306         .test_ring = gfx_v9_0_ring_test_ring,
6307         .insert_nop = amdgpu_ring_insert_nop,
6308         .pad_ib = amdgpu_ring_generic_pad_ib,
6309         .emit_rreg = gfx_v9_0_ring_emit_rreg,
6310         .emit_wreg = gfx_v9_0_ring_emit_wreg,
6311         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
6312         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
6313 };
6314
6315 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
6316 {
6317         int i;
6318
6319         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
6320
6321         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6322                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
6323
6324         for (i = 0; i < adev->gfx.num_compute_rings; i++)
6325                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
6326 }
6327
6328 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
6329         .set = gfx_v9_0_set_eop_interrupt_state,
6330         .process = gfx_v9_0_eop_irq,
6331 };
6332
6333 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
6334         .set = gfx_v9_0_set_priv_reg_fault_state,
6335         .process = gfx_v9_0_priv_reg_irq,
6336 };
6337
6338 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
6339         .set = gfx_v9_0_set_priv_inst_fault_state,
6340         .process = gfx_v9_0_priv_inst_irq,
6341 };
6342
6343 static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
6344         .set = gfx_v9_0_set_cp_ecc_error_state,
6345         .process = amdgpu_gfx_cp_ecc_error_irq,
6346 };
6347
6348
6349 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
6350 {
6351         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6352         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
6353
6354         adev->gfx.priv_reg_irq.num_types = 1;
6355         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
6356
6357         adev->gfx.priv_inst_irq.num_types = 1;
6358         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
6359
6360         adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
6361         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
6362 }
6363
6364 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
6365 {
6366         switch (adev->asic_type) {
6367         case CHIP_VEGA10:
6368         case CHIP_VEGA12:
6369         case CHIP_VEGA20:
6370         case CHIP_RAVEN:
6371         case CHIP_ARCTURUS:
6372         case CHIP_RENOIR:
6373                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
6374                 break;
6375         default:
6376                 break;
6377         }
6378 }
6379
6380 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
6381 {
6382         /* init asci gds info */
6383         switch (adev->asic_type) {
6384         case CHIP_VEGA10:
6385         case CHIP_VEGA12:
6386         case CHIP_VEGA20:
6387                 adev->gds.gds_size = 0x10000;
6388                 break;
6389         case CHIP_RAVEN:
6390         case CHIP_ARCTURUS:
6391                 adev->gds.gds_size = 0x1000;
6392                 break;
6393         default:
6394                 adev->gds.gds_size = 0x10000;
6395                 break;
6396         }
6397
6398         switch (adev->asic_type) {
6399         case CHIP_VEGA10:
6400         case CHIP_VEGA20:
6401                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6402                 break;
6403         case CHIP_VEGA12:
6404                 adev->gds.gds_compute_max_wave_id = 0x27f;
6405                 break;
6406         case CHIP_RAVEN:
6407                 if (adev->rev_id >= 0x8)
6408                         adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
6409                 else
6410                         adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
6411                 break;
6412         case CHIP_ARCTURUS:
6413                 adev->gds.gds_compute_max_wave_id = 0xfff;
6414                 break;
6415         default:
6416                 /* this really depends on the chip */
6417                 adev->gds.gds_compute_max_wave_id = 0x7ff;
6418                 break;
6419         }
6420
6421         adev->gds.gws_size = 64;
6422         adev->gds.oa_size = 16;
6423 }
6424
6425 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6426                                                  u32 bitmap)
6427 {
6428         u32 data;
6429
6430         if (!bitmap)
6431                 return;
6432
6433         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6434         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6435
6436         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
6437 }
6438
6439 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6440 {
6441         u32 data, mask;
6442
6443         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
6444         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
6445
6446         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6447         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6448
6449         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
6450
6451         return (~data) & mask;
6452 }
6453
6454 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
6455                                  struct amdgpu_cu_info *cu_info)
6456 {
6457         int i, j, k, counter, active_cu_number = 0;
6458         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6459         unsigned disable_masks[4 * 4];
6460
6461         if (!adev || !cu_info)
6462                 return -EINVAL;
6463
6464         /*
6465          * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
6466          */
6467         if (adev->gfx.config.max_shader_engines *
6468                 adev->gfx.config.max_sh_per_se > 16)
6469                 return -EINVAL;
6470
6471         amdgpu_gfx_parse_disable_cu(disable_masks,
6472                                     adev->gfx.config.max_shader_engines,
6473                                     adev->gfx.config.max_sh_per_se);
6474
6475         mutex_lock(&adev->grbm_idx_mutex);
6476         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6477                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6478                         mask = 1;
6479                         ao_bitmap = 0;
6480                         counter = 0;
6481                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
6482                         gfx_v9_0_set_user_cu_inactive_bitmap(
6483                                 adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
6484                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
6485
6486                         /*
6487                          * The bitmap(and ao_cu_bitmap) in cu_info structure is
6488                          * 4x4 size array, and it's usually suitable for Vega
6489                          * ASICs which has 4*2 SE/SH layout.
6490                          * But for Arcturus, SE/SH layout is changed to 8*1.
6491                          * To mostly reduce the impact, we make it compatible
6492                          * with current bitmap array as below:
6493                          *    SE4,SH0 --> bitmap[0][1]
6494                          *    SE5,SH0 --> bitmap[1][1]
6495                          *    SE6,SH0 --> bitmap[2][1]
6496                          *    SE7,SH0 --> bitmap[3][1]
6497                          */
6498                         cu_info->bitmap[i % 4][j + i / 4] = bitmap;
6499
6500                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
6501                                 if (bitmap & mask) {
6502                                         if (counter < adev->gfx.config.max_cu_per_sh)
6503                                                 ao_bitmap |= mask;
6504                                         counter ++;
6505                                 }
6506                                 mask <<= 1;
6507                         }
6508                         active_cu_number += counter;
6509                         if (i < 2 && j < 2)
6510                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
6511                         cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
6512                 }
6513         }
6514         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6515         mutex_unlock(&adev->grbm_idx_mutex);
6516
6517         cu_info->number = active_cu_number;
6518         cu_info->ao_cu_mask = ao_cu_mask;
6519         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6520
6521         return 0;
6522 }
6523
6524 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
6525 {
6526         .type = AMD_IP_BLOCK_TYPE_GFX,
6527         .major = 9,
6528         .minor = 0,
6529         .rev = 0,
6530         .funcs = &gfx_v9_0_ip_funcs,
6531 };
This page took 0.439473 seconds and 4 git commands to generate.