]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
Merge tag 'bootconfig-fixes-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / gpu / drm / amd / amdgpu / gfx_v8_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "amdgpu_ring.h"
33 #include "vi.h"
34 #include "vi_structs.h"
35 #include "vid.h"
36 #include "amdgpu_ucode.h"
37 #include "amdgpu_atombios.h"
38 #include "atombios_i2c.h"
39 #include "clearstate_vi.h"
40
41 #include "gmc/gmc_8_2_d.h"
42 #include "gmc/gmc_8_2_sh_mask.h"
43
44 #include "oss/oss_3_0_d.h"
45 #include "oss/oss_3_0_sh_mask.h"
46
47 #include "bif/bif_5_0_d.h"
48 #include "bif/bif_5_0_sh_mask.h"
49 #include "gca/gfx_8_0_d.h"
50 #include "gca/gfx_8_0_enum.h"
51 #include "gca/gfx_8_0_sh_mask.h"
52
53 #include "dce/dce_10_0_d.h"
54 #include "dce/dce_10_0_sh_mask.h"
55
56 #include "smu/smu_7_1_3_d.h"
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #define GFX8_NUM_GFX_RINGS     1
61 #define GFX8_MEC_HPD_SIZE 4096
62
63 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
64 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
65 #define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
66 #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
67
68 #define ARRAY_MODE(x)                                   ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
69 #define PIPE_CONFIG(x)                                  ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
70 #define TILE_SPLIT(x)                                   ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
71 #define MICRO_TILE_MODE_NEW(x)                          ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
72 #define SAMPLE_SPLIT(x)                                 ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
73 #define BANK_WIDTH(x)                                   ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
74 #define BANK_HEIGHT(x)                                  ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
75 #define MACRO_TILE_ASPECT(x)                            ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
76 #define NUM_BANKS(x)                                    ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
77
78 #define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
79 #define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
80 #define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
81 #define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
82 #define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
83 #define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
84
85 /* BPM SERDES CMD */
86 #define SET_BPM_SERDES_CMD    1
87 #define CLE_BPM_SERDES_CMD    0
88
89 /* BPM Register Address*/
90 enum {
91         BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
92         BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
93         BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
94         BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
95         BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
96         BPM_REG_FGCG_MAX
97 };
98
99 #define RLC_FormatDirectRegListLength        14
100
101 MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
102 MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
103 MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
104 MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
105 MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
106 MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
107
108 MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
109 MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
110 MODULE_FIRMWARE("amdgpu/stoney_me.bin");
111 MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
112 MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
113
114 MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
115 MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
116 MODULE_FIRMWARE("amdgpu/tonga_me.bin");
117 MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
118 MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
119 MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
120
121 MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
122 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
123 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
124 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
125 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
126
127 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
128 MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
129 MODULE_FIRMWARE("amdgpu/fiji_me.bin");
130 MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
131 MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
132 MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
133
134 MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
135 MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
136 MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
137 MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
138 MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
139 MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
140 MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
141 MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
142 MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
143 MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
144 MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
145
146 MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
147 MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
148 MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
149 MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
150 MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
151 MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
152 MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
153 MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
154 MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
155 MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
156 MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
157
158 MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
159 MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
160 MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
161 MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
162 MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
163 MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
164 MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
165 MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
166 MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
167 MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
168 MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
169
170 MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
171 MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
172 MODULE_FIRMWARE("amdgpu/vegam_me.bin");
173 MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
174 MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
175 MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
176
177 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
178 {
179         {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
180         {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
181         {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
182         {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
183         {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
184         {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
185         {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
186         {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
187         {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
188         {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
189         {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
190         {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
191         {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
192         {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
193         {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
194         {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
195 };
196
197 static const u32 golden_settings_tonga_a11[] =
198 {
199         mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
200         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
201         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
202         mmGB_GPU_ID, 0x0000000f, 0x00000000,
203         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
204         mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
205         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
206         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
207         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
208         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
209         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
210         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
211         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
212         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
213         mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
214         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
215 };
216
217 static const u32 tonga_golden_common_all[] =
218 {
219         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
220         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
221         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
222         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
223         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
224         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
225         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
226         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
227 };
228
229 static const u32 tonga_mgcg_cgcg_init[] =
230 {
231         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
232         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
233         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
234         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
235         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
236         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
237         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
238         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
239         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
240         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
241         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
242         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
243         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
244         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
245         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
246         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
247         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
248         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
249         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
250         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
251         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
252         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
253         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
254         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
255         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
256         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
257         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
258         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
259         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
260         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
261         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
262         mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
263         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
264         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
265         mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
266         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
267         mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
268         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
269         mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
270         mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
271         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
272         mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
273         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
274         mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
275         mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
276         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
277         mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
278         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
279         mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
280         mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
281         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
282         mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
283         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
284         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
285         mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
286         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
287         mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
288         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
289         mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
290         mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
291         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
292         mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
293         mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
294         mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
295         mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
296         mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
297         mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
298         mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
299         mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
300         mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
301         mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
302         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
303         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
304         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
305         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
306 };
307
308 static const u32 golden_settings_vegam_a11[] =
309 {
310         mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
311         mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
312         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
313         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
314         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
315         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
316         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
317         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
318         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
319         mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
320         mmSQ_CONFIG, 0x07f80000, 0x01180000,
321         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
322         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
323         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
324         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
325         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
326         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
327 };
328
329 static const u32 vegam_golden_common_all[] =
330 {
331         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
332         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
333         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
334         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
335         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
336         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
337 };
338
339 static const u32 golden_settings_polaris11_a11[] =
340 {
341         mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
342         mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
343         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
344         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
345         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
346         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
347         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
348         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
349         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
350         mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
351         mmSQ_CONFIG, 0x07f80000, 0x01180000,
352         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
353         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
354         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
355         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
356         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
357         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
358 };
359
360 static const u32 polaris11_golden_common_all[] =
361 {
362         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
363         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
364         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
365         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
366         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
367         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
368 };
369
370 static const u32 golden_settings_polaris10_a11[] =
371 {
372         mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
373         mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
374         mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
375         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
376         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
377         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
378         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
379         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
380         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
381         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
382         mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
383         mmSQ_CONFIG, 0x07f80000, 0x07180000,
384         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
385         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
386         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
387         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
388         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
389 };
390
391 static const u32 polaris10_golden_common_all[] =
392 {
393         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
394         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
395         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
396         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
397         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
398         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
399         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
400         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
401 };
402
403 static const u32 fiji_golden_common_all[] =
404 {
405         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
406         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
407         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
408         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
409         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
410         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
411         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
412         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
413         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
414         mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
415 };
416
417 static const u32 golden_settings_fiji_a10[] =
418 {
419         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
420         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
421         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
422         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
423         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
424         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
425         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
426         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
427         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
428         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
429         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
430 };
431
432 static const u32 fiji_mgcg_cgcg_init[] =
433 {
434         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
435         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
436         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
437         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
438         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
439         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
440         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
441         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
442         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
443         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
444         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
445         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
446         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
447         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
448         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
449         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
450         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
451         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
452         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
453         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
454         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
455         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
456         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
457         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
458         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
459         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
460         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
461         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
462         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
463         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
464         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
465         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
466         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
467         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
468         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
469 };
470
471 static const u32 golden_settings_iceland_a11[] =
472 {
473         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
474         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
475         mmDB_DEBUG3, 0xc0000000, 0xc0000000,
476         mmGB_GPU_ID, 0x0000000f, 0x00000000,
477         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
478         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
479         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
480         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
481         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
482         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
483         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
484         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
485         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
486         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
487         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
488         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
489 };
490
491 static const u32 iceland_golden_common_all[] =
492 {
493         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
494         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
495         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
496         mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
497         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
498         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
499         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
500         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
501 };
502
503 static const u32 iceland_mgcg_cgcg_init[] =
504 {
505         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
506         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
507         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
508         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
509         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
510         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
511         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
512         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
513         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
514         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
515         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
516         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
517         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
518         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
519         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
520         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
521         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
522         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
523         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
524         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
525         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
526         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
527         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
528         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
529         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
530         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
531         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
532         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
533         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
534         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
535         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
536         mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
537         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
538         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
539         mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
540         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
541         mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
542         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
543         mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
544         mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
545         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
546         mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
547         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
548         mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
549         mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
550         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
551         mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
552         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
553         mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
554         mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
555         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
556         mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
557         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
558         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
559         mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
560         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
561         mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
562         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
563         mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
564         mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
565         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
566         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
567         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
568         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
569 };
570
571 static const u32 cz_golden_settings_a11[] =
572 {
573         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
574         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
575         mmGB_GPU_ID, 0x0000000f, 0x00000000,
576         mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
577         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
578         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
579         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
580         mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
581         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
582         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
583         mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
584         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
585 };
586
587 static const u32 cz_golden_common_all[] =
588 {
589         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
590         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
591         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
592         mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
593         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
594         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
595         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
596         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
597 };
598
599 static const u32 cz_mgcg_cgcg_init[] =
600 {
601         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
602         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
603         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
604         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
605         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
606         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
607         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
608         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
609         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
610         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
611         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
612         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
613         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
614         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
615         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
616         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
617         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
618         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
619         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
620         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
621         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
622         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
623         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
624         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
625         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
626         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
627         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
628         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
629         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
630         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
631         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
632         mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
633         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
634         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
635         mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
636         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
637         mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
638         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
639         mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
640         mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
641         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
642         mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
643         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
644         mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
645         mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
646         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
647         mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
648         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
649         mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
650         mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
651         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
652         mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
653         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
654         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
655         mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
656         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
657         mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
658         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
659         mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
660         mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
661         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
662         mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
663         mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
664         mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
665         mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
666         mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
667         mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
668         mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
669         mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
670         mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
671         mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
672         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
673         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
674         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
675         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
676 };
677
678 static const u32 stoney_golden_settings_a11[] =
679 {
680         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
681         mmGB_GPU_ID, 0x0000000f, 0x00000000,
682         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
683         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
684         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
685         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
686         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
687         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
688         mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
689         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
690 };
691
692 static const u32 stoney_golden_common_all[] =
693 {
694         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
695         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
696         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
697         mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
698         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
699         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
700         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
701         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
702 };
703
704 static const u32 stoney_mgcg_cgcg_init[] =
705 {
706         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
707         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
708         mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
709         mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
710         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
711 };
712
713
714 static const char * const sq_edc_source_names[] = {
715         "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
716         "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
717         "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
718         "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
719         "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
720         "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
721         "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
722 };
723
724 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
725 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
726 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
727 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
728 static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
729 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
730 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
731 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
732
733 #define CG_ACLK_CNTL__ACLK_DIVIDER_MASK                    0x0000007fL
734 #define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT                  0x00000000L
735
736 static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
737 {
738         uint32_t data;
739
740         switch (adev->asic_type) {
741         case CHIP_TOPAZ:
742                 amdgpu_device_program_register_sequence(adev,
743                                                         iceland_mgcg_cgcg_init,
744                                                         ARRAY_SIZE(iceland_mgcg_cgcg_init));
745                 amdgpu_device_program_register_sequence(adev,
746                                                         golden_settings_iceland_a11,
747                                                         ARRAY_SIZE(golden_settings_iceland_a11));
748                 amdgpu_device_program_register_sequence(adev,
749                                                         iceland_golden_common_all,
750                                                         ARRAY_SIZE(iceland_golden_common_all));
751                 break;
752         case CHIP_FIJI:
753                 amdgpu_device_program_register_sequence(adev,
754                                                         fiji_mgcg_cgcg_init,
755                                                         ARRAY_SIZE(fiji_mgcg_cgcg_init));
756                 amdgpu_device_program_register_sequence(adev,
757                                                         golden_settings_fiji_a10,
758                                                         ARRAY_SIZE(golden_settings_fiji_a10));
759                 amdgpu_device_program_register_sequence(adev,
760                                                         fiji_golden_common_all,
761                                                         ARRAY_SIZE(fiji_golden_common_all));
762                 break;
763
764         case CHIP_TONGA:
765                 amdgpu_device_program_register_sequence(adev,
766                                                         tonga_mgcg_cgcg_init,
767                                                         ARRAY_SIZE(tonga_mgcg_cgcg_init));
768                 amdgpu_device_program_register_sequence(adev,
769                                                         golden_settings_tonga_a11,
770                                                         ARRAY_SIZE(golden_settings_tonga_a11));
771                 amdgpu_device_program_register_sequence(adev,
772                                                         tonga_golden_common_all,
773                                                         ARRAY_SIZE(tonga_golden_common_all));
774                 break;
775         case CHIP_VEGAM:
776                 amdgpu_device_program_register_sequence(adev,
777                                                         golden_settings_vegam_a11,
778                                                         ARRAY_SIZE(golden_settings_vegam_a11));
779                 amdgpu_device_program_register_sequence(adev,
780                                                         vegam_golden_common_all,
781                                                         ARRAY_SIZE(vegam_golden_common_all));
782                 break;
783         case CHIP_POLARIS11:
784         case CHIP_POLARIS12:
785                 amdgpu_device_program_register_sequence(adev,
786                                                         golden_settings_polaris11_a11,
787                                                         ARRAY_SIZE(golden_settings_polaris11_a11));
788                 amdgpu_device_program_register_sequence(adev,
789                                                         polaris11_golden_common_all,
790                                                         ARRAY_SIZE(polaris11_golden_common_all));
791                 break;
792         case CHIP_POLARIS10:
793                 amdgpu_device_program_register_sequence(adev,
794                                                         golden_settings_polaris10_a11,
795                                                         ARRAY_SIZE(golden_settings_polaris10_a11));
796                 amdgpu_device_program_register_sequence(adev,
797                                                         polaris10_golden_common_all,
798                                                         ARRAY_SIZE(polaris10_golden_common_all));
799                 data = RREG32_SMC(ixCG_ACLK_CNTL);
800                 data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK;
801                 data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT;
802                 WREG32_SMC(ixCG_ACLK_CNTL, data);
803                 if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) &&
804                     ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
805                      (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
806                      (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) {
807                         amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
808                         amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
809                 }
810                 break;
811         case CHIP_CARRIZO:
812                 amdgpu_device_program_register_sequence(adev,
813                                                         cz_mgcg_cgcg_init,
814                                                         ARRAY_SIZE(cz_mgcg_cgcg_init));
815                 amdgpu_device_program_register_sequence(adev,
816                                                         cz_golden_settings_a11,
817                                                         ARRAY_SIZE(cz_golden_settings_a11));
818                 amdgpu_device_program_register_sequence(adev,
819                                                         cz_golden_common_all,
820                                                         ARRAY_SIZE(cz_golden_common_all));
821                 break;
822         case CHIP_STONEY:
823                 amdgpu_device_program_register_sequence(adev,
824                                                         stoney_mgcg_cgcg_init,
825                                                         ARRAY_SIZE(stoney_mgcg_cgcg_init));
826                 amdgpu_device_program_register_sequence(adev,
827                                                         stoney_golden_settings_a11,
828                                                         ARRAY_SIZE(stoney_golden_settings_a11));
829                 amdgpu_device_program_register_sequence(adev,
830                                                         stoney_golden_common_all,
831                                                         ARRAY_SIZE(stoney_golden_common_all));
832                 break;
833         default:
834                 break;
835         }
836 }
837
838 static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
839 {
840         struct amdgpu_device *adev = ring->adev;
841         uint32_t tmp = 0;
842         unsigned i;
843         int r;
844
845         WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
846         r = amdgpu_ring_alloc(ring, 3);
847         if (r)
848                 return r;
849
850         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
851         amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
852         amdgpu_ring_write(ring, 0xDEADBEEF);
853         amdgpu_ring_commit(ring);
854
855         for (i = 0; i < adev->usec_timeout; i++) {
856                 tmp = RREG32(mmSCRATCH_REG0);
857                 if (tmp == 0xDEADBEEF)
858                         break;
859                 udelay(1);
860         }
861
862         if (i >= adev->usec_timeout)
863                 r = -ETIMEDOUT;
864
865         return r;
866 }
867
868 static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
869 {
870         struct amdgpu_device *adev = ring->adev;
871         struct amdgpu_ib ib;
872         struct dma_fence *f = NULL;
873
874         unsigned int index;
875         uint64_t gpu_addr;
876         uint32_t tmp;
877         long r;
878
879         r = amdgpu_device_wb_get(adev, &index);
880         if (r)
881                 return r;
882
883         gpu_addr = adev->wb.gpu_addr + (index * 4);
884         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
885         memset(&ib, 0, sizeof(ib));
886
887         r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
888         if (r)
889                 goto err1;
890
891         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
892         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
893         ib.ptr[2] = lower_32_bits(gpu_addr);
894         ib.ptr[3] = upper_32_bits(gpu_addr);
895         ib.ptr[4] = 0xDEADBEEF;
896         ib.length_dw = 5;
897
898         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
899         if (r)
900                 goto err2;
901
902         r = dma_fence_wait_timeout(f, false, timeout);
903         if (r == 0) {
904                 r = -ETIMEDOUT;
905                 goto err2;
906         } else if (r < 0) {
907                 goto err2;
908         }
909
910         tmp = adev->wb.wb[index];
911         if (tmp == 0xDEADBEEF)
912                 r = 0;
913         else
914                 r = -EINVAL;
915
916 err2:
917         amdgpu_ib_free(&ib, NULL);
918         dma_fence_put(f);
919 err1:
920         amdgpu_device_wb_free(adev, index);
921         return r;
922 }
923
924
925 static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
926 {
927         amdgpu_ucode_release(&adev->gfx.pfp_fw);
928         amdgpu_ucode_release(&adev->gfx.me_fw);
929         amdgpu_ucode_release(&adev->gfx.ce_fw);
930         amdgpu_ucode_release(&adev->gfx.rlc_fw);
931         amdgpu_ucode_release(&adev->gfx.mec_fw);
932         if ((adev->asic_type != CHIP_STONEY) &&
933             (adev->asic_type != CHIP_TOPAZ))
934                 amdgpu_ucode_release(&adev->gfx.mec2_fw);
935
936         kfree(adev->gfx.rlc.register_list_format);
937 }
938
939 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
940 {
941         const char *chip_name;
942         int err;
943         struct amdgpu_firmware_info *info = NULL;
944         const struct common_firmware_header *header = NULL;
945         const struct gfx_firmware_header_v1_0 *cp_hdr;
946         const struct rlc_firmware_header_v2_0 *rlc_hdr;
947         unsigned int *tmp = NULL, i;
948
949         DRM_DEBUG("\n");
950
951         switch (adev->asic_type) {
952         case CHIP_TOPAZ:
953                 chip_name = "topaz";
954                 break;
955         case CHIP_TONGA:
956                 chip_name = "tonga";
957                 break;
958         case CHIP_CARRIZO:
959                 chip_name = "carrizo";
960                 break;
961         case CHIP_FIJI:
962                 chip_name = "fiji";
963                 break;
964         case CHIP_STONEY:
965                 chip_name = "stoney";
966                 break;
967         case CHIP_POLARIS10:
968                 chip_name = "polaris10";
969                 break;
970         case CHIP_POLARIS11:
971                 chip_name = "polaris11";
972                 break;
973         case CHIP_POLARIS12:
974                 chip_name = "polaris12";
975                 break;
976         case CHIP_VEGAM:
977                 chip_name = "vegam";
978                 break;
979         default:
980                 BUG();
981         }
982
983         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
984                 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
985                                            AMDGPU_UCODE_OPTIONAL,
986                                            "amdgpu/%s_pfp_2.bin", chip_name);
987                 if (err == -ENODEV) {
988                         err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
989                                                    AMDGPU_UCODE_REQUIRED,
990                                                    "amdgpu/%s_pfp.bin", chip_name);
991                 }
992         } else {
993                 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
994                                            AMDGPU_UCODE_REQUIRED,
995                                            "amdgpu/%s_pfp.bin", chip_name);
996         }
997         if (err)
998                 goto out;
999         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1000         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1001         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1002
1003         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1004                 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1005                                            AMDGPU_UCODE_OPTIONAL,
1006                                            "amdgpu/%s_me_2.bin", chip_name);
1007                 if (err == -ENODEV) {
1008                         err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1009                                                    AMDGPU_UCODE_REQUIRED,
1010                                                    "amdgpu/%s_me.bin", chip_name);
1011                 }
1012         } else {
1013                 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
1014                                            AMDGPU_UCODE_REQUIRED,
1015                                            "amdgpu/%s_me.bin", chip_name);
1016         }
1017         if (err)
1018                 goto out;
1019         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1020         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1021
1022         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1023
1024         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1025                 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1026                                            AMDGPU_UCODE_OPTIONAL,
1027                                            "amdgpu/%s_ce_2.bin", chip_name);
1028                 if (err == -ENODEV) {
1029                         err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1030                                                    AMDGPU_UCODE_REQUIRED,
1031                                                    "amdgpu/%s_ce.bin", chip_name);
1032                 }
1033         } else {
1034                 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
1035                                            AMDGPU_UCODE_REQUIRED,
1036                                            "amdgpu/%s_ce.bin", chip_name);
1037         }
1038         if (err)
1039                 goto out;
1040         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1041         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1042         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1043
1044         /*
1045          * Support for MCBP/Virtualization in combination with chained IBs is
1046          * formal released on feature version #46
1047          */
1048         if (adev->gfx.ce_feature_version >= 46 &&
1049             adev->gfx.pfp_feature_version >= 46) {
1050                 adev->virt.chained_ib_support = true;
1051                 DRM_INFO("Chained IB support enabled!\n");
1052         } else
1053                 adev->virt.chained_ib_support = false;
1054
1055         err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
1056                                    AMDGPU_UCODE_REQUIRED,
1057                                    "amdgpu/%s_rlc.bin", chip_name);
1058         if (err)
1059                 goto out;
1060         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1061         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1062         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1063
1064         adev->gfx.rlc.save_and_restore_offset =
1065                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1066         adev->gfx.rlc.clear_state_descriptor_offset =
1067                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1068         adev->gfx.rlc.avail_scratch_ram_locations =
1069                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1070         adev->gfx.rlc.reg_restore_list_size =
1071                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1072         adev->gfx.rlc.reg_list_format_start =
1073                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1074         adev->gfx.rlc.reg_list_format_separate_start =
1075                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1076         adev->gfx.rlc.starting_offsets_start =
1077                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1078         adev->gfx.rlc.reg_list_format_size_bytes =
1079                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1080         adev->gfx.rlc.reg_list_size_bytes =
1081                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1082
1083         adev->gfx.rlc.register_list_format =
1084                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1085                                         adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1086
1087         if (!adev->gfx.rlc.register_list_format) {
1088                 err = -ENOMEM;
1089                 goto out;
1090         }
1091
1092         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1093                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1094         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1095                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1096
1097         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1098
1099         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1100                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1101         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1102                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1103
1104         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1105                 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1106                                            AMDGPU_UCODE_OPTIONAL,
1107                                            "amdgpu/%s_mec_2.bin", chip_name);
1108                 if (err == -ENODEV) {
1109                         err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1110                                                    AMDGPU_UCODE_REQUIRED,
1111                                                    "amdgpu/%s_mec.bin", chip_name);
1112                 }
1113         } else {
1114                 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
1115                                            AMDGPU_UCODE_REQUIRED,
1116                                            "amdgpu/%s_mec.bin", chip_name);
1117         }
1118         if (err)
1119                 goto out;
1120         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1121         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1122         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1123
1124         if ((adev->asic_type != CHIP_STONEY) &&
1125             (adev->asic_type != CHIP_TOPAZ)) {
1126                 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1127                         err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1128                                                    AMDGPU_UCODE_OPTIONAL,
1129                                                    "amdgpu/%s_mec2_2.bin", chip_name);
1130                         if (err == -ENODEV) {
1131                                 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1132                                                            AMDGPU_UCODE_REQUIRED,
1133                                                            "amdgpu/%s_mec2.bin", chip_name);
1134                         }
1135                 } else {
1136                         err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
1137                                                    AMDGPU_UCODE_REQUIRED,
1138                                                    "amdgpu/%s_mec2.bin", chip_name);
1139                 }
1140                 if (!err) {
1141                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1142                                 adev->gfx.mec2_fw->data;
1143                         adev->gfx.mec2_fw_version =
1144                                 le32_to_cpu(cp_hdr->header.ucode_version);
1145                         adev->gfx.mec2_feature_version =
1146                                 le32_to_cpu(cp_hdr->ucode_feature_version);
1147                 } else {
1148                         err = 0;
1149                         adev->gfx.mec2_fw = NULL;
1150                 }
1151         }
1152
1153         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1154         info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1155         info->fw = adev->gfx.pfp_fw;
1156         header = (const struct common_firmware_header *)info->fw->data;
1157         adev->firmware.fw_size +=
1158                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1159
1160         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1161         info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1162         info->fw = adev->gfx.me_fw;
1163         header = (const struct common_firmware_header *)info->fw->data;
1164         adev->firmware.fw_size +=
1165                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1166
1167         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1168         info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1169         info->fw = adev->gfx.ce_fw;
1170         header = (const struct common_firmware_header *)info->fw->data;
1171         adev->firmware.fw_size +=
1172                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1173
1174         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1175         info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1176         info->fw = adev->gfx.rlc_fw;
1177         header = (const struct common_firmware_header *)info->fw->data;
1178         adev->firmware.fw_size +=
1179                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1180
1181         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1182         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1183         info->fw = adev->gfx.mec_fw;
1184         header = (const struct common_firmware_header *)info->fw->data;
1185         adev->firmware.fw_size +=
1186                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1187
1188         /* we need account JT in */
1189         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1190         adev->firmware.fw_size +=
1191                 ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1192
1193         if (amdgpu_sriov_vf(adev)) {
1194                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1195                 info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1196                 info->fw = adev->gfx.mec_fw;
1197                 adev->firmware.fw_size +=
1198                         ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1199         }
1200
1201         if (adev->gfx.mec2_fw) {
1202                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1203                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1204                 info->fw = adev->gfx.mec2_fw;
1205                 header = (const struct common_firmware_header *)info->fw->data;
1206                 adev->firmware.fw_size +=
1207                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1208         }
1209
1210 out:
1211         if (err) {
1212                 dev_err(adev->dev, "gfx8: Failed to load firmware %s gfx firmware\n", chip_name);
1213                 amdgpu_ucode_release(&adev->gfx.pfp_fw);
1214                 amdgpu_ucode_release(&adev->gfx.me_fw);
1215                 amdgpu_ucode_release(&adev->gfx.ce_fw);
1216                 amdgpu_ucode_release(&adev->gfx.rlc_fw);
1217                 amdgpu_ucode_release(&adev->gfx.mec_fw);
1218                 amdgpu_ucode_release(&adev->gfx.mec2_fw);
1219         }
1220         return err;
1221 }
1222
1223 static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1224                                     volatile u32 *buffer)
1225 {
1226         u32 count = 0, i;
1227         const struct cs_section_def *sect = NULL;
1228         const struct cs_extent_def *ext = NULL;
1229
1230         if (adev->gfx.rlc.cs_data == NULL)
1231                 return;
1232         if (buffer == NULL)
1233                 return;
1234
1235         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1236         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1237
1238         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1239         buffer[count++] = cpu_to_le32(0x80000000);
1240         buffer[count++] = cpu_to_le32(0x80000000);
1241
1242         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1243                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1244                         if (sect->id == SECT_CONTEXT) {
1245                                 buffer[count++] =
1246                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1247                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1248                                                 PACKET3_SET_CONTEXT_REG_START);
1249                                 for (i = 0; i < ext->reg_count; i++)
1250                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1251                         } else {
1252                                 return;
1253                         }
1254                 }
1255         }
1256
1257         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1258         buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1259                         PACKET3_SET_CONTEXT_REG_START);
1260         buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1261         buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1262
1263         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1264         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1265
1266         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1267         buffer[count++] = cpu_to_le32(0);
1268 }
1269
1270 static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1271 {
1272         if (adev->asic_type == CHIP_CARRIZO)
1273                 return 5;
1274         else
1275                 return 4;
1276 }
1277
1278 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1279 {
1280         const struct cs_section_def *cs_data;
1281         int r;
1282
1283         adev->gfx.rlc.cs_data = vi_cs_data;
1284
1285         cs_data = adev->gfx.rlc.cs_data;
1286
1287         if (cs_data) {
1288                 /* init clear state block */
1289                 r = amdgpu_gfx_rlc_init_csb(adev);
1290                 if (r)
1291                         return r;
1292         }
1293
1294         if ((adev->asic_type == CHIP_CARRIZO) ||
1295             (adev->asic_type == CHIP_STONEY)) {
1296                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1297                 r = amdgpu_gfx_rlc_init_cpt(adev);
1298                 if (r)
1299                         return r;
1300         }
1301
1302         /* init spm vmid with 0xf */
1303         if (adev->gfx.rlc.funcs->update_spm_vmid)
1304                 adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1305
1306         return 0;
1307 }
1308
1309 static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1310 {
1311         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1312 }
1313
1314 static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1315 {
1316         int r;
1317         u32 *hpd;
1318         size_t mec_hpd_size;
1319
1320         bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1321
1322         /* take ownership of the relevant compute queues */
1323         amdgpu_gfx_compute_queue_acquire(adev);
1324
1325         mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1326         if (mec_hpd_size) {
1327                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1328                                               AMDGPU_GEM_DOMAIN_VRAM |
1329                                               AMDGPU_GEM_DOMAIN_GTT,
1330                                               &adev->gfx.mec.hpd_eop_obj,
1331                                               &adev->gfx.mec.hpd_eop_gpu_addr,
1332                                               (void **)&hpd);
1333                 if (r) {
1334                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1335                         return r;
1336                 }
1337
1338                 memset(hpd, 0, mec_hpd_size);
1339
1340                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1341                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1342         }
1343
1344         return 0;
1345 }
1346
1347 static const u32 vgpr_init_compute_shader[] =
1348 {
1349         0x7e000209, 0x7e020208,
1350         0x7e040207, 0x7e060206,
1351         0x7e080205, 0x7e0a0204,
1352         0x7e0c0203, 0x7e0e0202,
1353         0x7e100201, 0x7e120200,
1354         0x7e140209, 0x7e160208,
1355         0x7e180207, 0x7e1a0206,
1356         0x7e1c0205, 0x7e1e0204,
1357         0x7e200203, 0x7e220202,
1358         0x7e240201, 0x7e260200,
1359         0x7e280209, 0x7e2a0208,
1360         0x7e2c0207, 0x7e2e0206,
1361         0x7e300205, 0x7e320204,
1362         0x7e340203, 0x7e360202,
1363         0x7e380201, 0x7e3a0200,
1364         0x7e3c0209, 0x7e3e0208,
1365         0x7e400207, 0x7e420206,
1366         0x7e440205, 0x7e460204,
1367         0x7e480203, 0x7e4a0202,
1368         0x7e4c0201, 0x7e4e0200,
1369         0x7e500209, 0x7e520208,
1370         0x7e540207, 0x7e560206,
1371         0x7e580205, 0x7e5a0204,
1372         0x7e5c0203, 0x7e5e0202,
1373         0x7e600201, 0x7e620200,
1374         0x7e640209, 0x7e660208,
1375         0x7e680207, 0x7e6a0206,
1376         0x7e6c0205, 0x7e6e0204,
1377         0x7e700203, 0x7e720202,
1378         0x7e740201, 0x7e760200,
1379         0x7e780209, 0x7e7a0208,
1380         0x7e7c0207, 0x7e7e0206,
1381         0xbf8a0000, 0xbf810000,
1382 };
1383
1384 static const u32 sgpr_init_compute_shader[] =
1385 {
1386         0xbe8a0100, 0xbe8c0102,
1387         0xbe8e0104, 0xbe900106,
1388         0xbe920108, 0xbe940100,
1389         0xbe960102, 0xbe980104,
1390         0xbe9a0106, 0xbe9c0108,
1391         0xbe9e0100, 0xbea00102,
1392         0xbea20104, 0xbea40106,
1393         0xbea60108, 0xbea80100,
1394         0xbeaa0102, 0xbeac0104,
1395         0xbeae0106, 0xbeb00108,
1396         0xbeb20100, 0xbeb40102,
1397         0xbeb60104, 0xbeb80106,
1398         0xbeba0108, 0xbebc0100,
1399         0xbebe0102, 0xbec00104,
1400         0xbec20106, 0xbec40108,
1401         0xbec60100, 0xbec80102,
1402         0xbee60004, 0xbee70005,
1403         0xbeea0006, 0xbeeb0007,
1404         0xbee80008, 0xbee90009,
1405         0xbefc0000, 0xbf8a0000,
1406         0xbf810000, 0x00000000,
1407 };
1408
1409 static const u32 vgpr_init_regs[] =
1410 {
1411         mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1412         mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1413         mmCOMPUTE_NUM_THREAD_X, 256*4,
1414         mmCOMPUTE_NUM_THREAD_Y, 1,
1415         mmCOMPUTE_NUM_THREAD_Z, 1,
1416         mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1417         mmCOMPUTE_PGM_RSRC2, 20,
1418         mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1419         mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1420         mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1421         mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1422         mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1423         mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1424         mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1425         mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1426         mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1427         mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1428 };
1429
1430 static const u32 sgpr1_init_regs[] =
1431 {
1432         mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1433         mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1434         mmCOMPUTE_NUM_THREAD_X, 256*5,
1435         mmCOMPUTE_NUM_THREAD_Y, 1,
1436         mmCOMPUTE_NUM_THREAD_Z, 1,
1437         mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1438         mmCOMPUTE_PGM_RSRC2, 20,
1439         mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1440         mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1441         mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1442         mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1443         mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1444         mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1445         mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1446         mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1447         mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1448         mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1449 };
1450
1451 static const u32 sgpr2_init_regs[] =
1452 {
1453         mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1454         mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1455         mmCOMPUTE_NUM_THREAD_X, 256*5,
1456         mmCOMPUTE_NUM_THREAD_Y, 1,
1457         mmCOMPUTE_NUM_THREAD_Z, 1,
1458         mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1459         mmCOMPUTE_PGM_RSRC2, 20,
1460         mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1461         mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1462         mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1463         mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1464         mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1465         mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1466         mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1467         mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1468         mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1469         mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1470 };
1471
1472 static const u32 sec_ded_counter_registers[] =
1473 {
1474         mmCPC_EDC_ATC_CNT,
1475         mmCPC_EDC_SCRATCH_CNT,
1476         mmCPC_EDC_UCODE_CNT,
1477         mmCPF_EDC_ATC_CNT,
1478         mmCPF_EDC_ROQ_CNT,
1479         mmCPF_EDC_TAG_CNT,
1480         mmCPG_EDC_ATC_CNT,
1481         mmCPG_EDC_DMA_CNT,
1482         mmCPG_EDC_TAG_CNT,
1483         mmDC_EDC_CSINVOC_CNT,
1484         mmDC_EDC_RESTORE_CNT,
1485         mmDC_EDC_STATE_CNT,
1486         mmGDS_EDC_CNT,
1487         mmGDS_EDC_GRBM_CNT,
1488         mmGDS_EDC_OA_DED,
1489         mmSPI_EDC_CNT,
1490         mmSQC_ATC_EDC_GATCL1_CNT,
1491         mmSQC_EDC_CNT,
1492         mmSQ_EDC_DED_CNT,
1493         mmSQ_EDC_INFO,
1494         mmSQ_EDC_SEC_CNT,
1495         mmTCC_EDC_CNT,
1496         mmTCP_ATC_EDC_GATCL1_CNT,
1497         mmTCP_EDC_CNT,
1498         mmTD_EDC_CNT
1499 };
1500
1501 static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1502 {
1503         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1504         struct amdgpu_ib ib;
1505         struct dma_fence *f = NULL;
1506         int r, i;
1507         u32 tmp;
1508         unsigned total_size, vgpr_offset, sgpr_offset;
1509         u64 gpu_addr;
1510
1511         /* only supported on CZ */
1512         if (adev->asic_type != CHIP_CARRIZO)
1513                 return 0;
1514
1515         /* bail if the compute ring is not ready */
1516         if (!ring->sched.ready)
1517                 return 0;
1518
1519         tmp = RREG32(mmGB_EDC_MODE);
1520         WREG32(mmGB_EDC_MODE, 0);
1521
1522         total_size =
1523                 (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1524         total_size +=
1525                 (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1526         total_size +=
1527                 (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1528         total_size = ALIGN(total_size, 256);
1529         vgpr_offset = total_size;
1530         total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1531         sgpr_offset = total_size;
1532         total_size += sizeof(sgpr_init_compute_shader);
1533
1534         /* allocate an indirect buffer to put the commands in */
1535         memset(&ib, 0, sizeof(ib));
1536         r = amdgpu_ib_get(adev, NULL, total_size,
1537                                         AMDGPU_IB_POOL_DIRECT, &ib);
1538         if (r) {
1539                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1540                 return r;
1541         }
1542
1543         /* load the compute shaders */
1544         for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1545                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1546
1547         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1548                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1549
1550         /* init the ib length to 0 */
1551         ib.length_dw = 0;
1552
1553         /* VGPR */
1554         /* write the register state for the compute dispatch */
1555         for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1556                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1557                 ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1558                 ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1559         }
1560         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1561         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1562         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1563         ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1564         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1565         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1566
1567         /* write dispatch packet */
1568         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1569         ib.ptr[ib.length_dw++] = 8; /* x */
1570         ib.ptr[ib.length_dw++] = 1; /* y */
1571         ib.ptr[ib.length_dw++] = 1; /* z */
1572         ib.ptr[ib.length_dw++] =
1573                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1574
1575         /* write CS partial flush packet */
1576         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1577         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1578
1579         /* SGPR1 */
1580         /* write the register state for the compute dispatch */
1581         for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1582                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1583                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1584                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1585         }
1586         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1587         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1588         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1589         ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1590         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1591         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1592
1593         /* write dispatch packet */
1594         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1595         ib.ptr[ib.length_dw++] = 8; /* x */
1596         ib.ptr[ib.length_dw++] = 1; /* y */
1597         ib.ptr[ib.length_dw++] = 1; /* z */
1598         ib.ptr[ib.length_dw++] =
1599                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1600
1601         /* write CS partial flush packet */
1602         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1603         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1604
1605         /* SGPR2 */
1606         /* write the register state for the compute dispatch */
1607         for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1608                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1609                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1610                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1611         }
1612         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1613         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1614         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1615         ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1616         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1617         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1618
1619         /* write dispatch packet */
1620         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1621         ib.ptr[ib.length_dw++] = 8; /* x */
1622         ib.ptr[ib.length_dw++] = 1; /* y */
1623         ib.ptr[ib.length_dw++] = 1; /* z */
1624         ib.ptr[ib.length_dw++] =
1625                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1626
1627         /* write CS partial flush packet */
1628         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1629         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1630
1631         /* shedule the ib on the ring */
1632         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1633         if (r) {
1634                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1635                 goto fail;
1636         }
1637
1638         /* wait for the GPU to finish processing the IB */
1639         r = dma_fence_wait(f, false);
1640         if (r) {
1641                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1642                 goto fail;
1643         }
1644
1645         tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1646         tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1647         WREG32(mmGB_EDC_MODE, tmp);
1648
1649         tmp = RREG32(mmCC_GC_EDC_CONFIG);
1650         tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1651         WREG32(mmCC_GC_EDC_CONFIG, tmp);
1652
1653
1654         /* read back registers to clear the counters */
1655         for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1656                 RREG32(sec_ded_counter_registers[i]);
1657
1658 fail:
1659         amdgpu_ib_free(&ib, NULL);
1660         dma_fence_put(f);
1661
1662         return r;
1663 }
1664
1665 static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1666 {
1667         u32 gb_addr_config;
1668         u32 mc_arb_ramcfg;
1669         u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1670         u32 tmp;
1671         int ret;
1672
1673         switch (adev->asic_type) {
1674         case CHIP_TOPAZ:
1675                 adev->gfx.config.max_shader_engines = 1;
1676                 adev->gfx.config.max_tile_pipes = 2;
1677                 adev->gfx.config.max_cu_per_sh = 6;
1678                 adev->gfx.config.max_sh_per_se = 1;
1679                 adev->gfx.config.max_backends_per_se = 2;
1680                 adev->gfx.config.max_texture_channel_caches = 2;
1681                 adev->gfx.config.max_gprs = 256;
1682                 adev->gfx.config.max_gs_threads = 32;
1683                 adev->gfx.config.max_hw_contexts = 8;
1684
1685                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1686                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1687                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1688                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1689                 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1690                 break;
1691         case CHIP_FIJI:
1692                 adev->gfx.config.max_shader_engines = 4;
1693                 adev->gfx.config.max_tile_pipes = 16;
1694                 adev->gfx.config.max_cu_per_sh = 16;
1695                 adev->gfx.config.max_sh_per_se = 1;
1696                 adev->gfx.config.max_backends_per_se = 4;
1697                 adev->gfx.config.max_texture_channel_caches = 16;
1698                 adev->gfx.config.max_gprs = 256;
1699                 adev->gfx.config.max_gs_threads = 32;
1700                 adev->gfx.config.max_hw_contexts = 8;
1701
1702                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1703                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1704                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1705                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1706                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1707                 break;
1708         case CHIP_POLARIS11:
1709         case CHIP_POLARIS12:
1710                 ret = amdgpu_atombios_get_gfx_info(adev);
1711                 if (ret)
1712                         return ret;
1713                 adev->gfx.config.max_gprs = 256;
1714                 adev->gfx.config.max_gs_threads = 32;
1715                 adev->gfx.config.max_hw_contexts = 8;
1716
1717                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1718                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1719                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1720                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1721                 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1722                 break;
1723         case CHIP_POLARIS10:
1724         case CHIP_VEGAM:
1725                 ret = amdgpu_atombios_get_gfx_info(adev);
1726                 if (ret)
1727                         return ret;
1728                 adev->gfx.config.max_gprs = 256;
1729                 adev->gfx.config.max_gs_threads = 32;
1730                 adev->gfx.config.max_hw_contexts = 8;
1731
1732                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1733                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1734                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1735                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1736                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1737                 break;
1738         case CHIP_TONGA:
1739                 adev->gfx.config.max_shader_engines = 4;
1740                 adev->gfx.config.max_tile_pipes = 8;
1741                 adev->gfx.config.max_cu_per_sh = 8;
1742                 adev->gfx.config.max_sh_per_se = 1;
1743                 adev->gfx.config.max_backends_per_se = 2;
1744                 adev->gfx.config.max_texture_channel_caches = 8;
1745                 adev->gfx.config.max_gprs = 256;
1746                 adev->gfx.config.max_gs_threads = 32;
1747                 adev->gfx.config.max_hw_contexts = 8;
1748
1749                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1750                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1751                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1752                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1753                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1754                 break;
1755         case CHIP_CARRIZO:
1756                 adev->gfx.config.max_shader_engines = 1;
1757                 adev->gfx.config.max_tile_pipes = 2;
1758                 adev->gfx.config.max_sh_per_se = 1;
1759                 adev->gfx.config.max_backends_per_se = 2;
1760                 adev->gfx.config.max_cu_per_sh = 8;
1761                 adev->gfx.config.max_texture_channel_caches = 2;
1762                 adev->gfx.config.max_gprs = 256;
1763                 adev->gfx.config.max_gs_threads = 32;
1764                 adev->gfx.config.max_hw_contexts = 8;
1765
1766                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1767                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1768                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1769                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1770                 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1771                 break;
1772         case CHIP_STONEY:
1773                 adev->gfx.config.max_shader_engines = 1;
1774                 adev->gfx.config.max_tile_pipes = 2;
1775                 adev->gfx.config.max_sh_per_se = 1;
1776                 adev->gfx.config.max_backends_per_se = 1;
1777                 adev->gfx.config.max_cu_per_sh = 3;
1778                 adev->gfx.config.max_texture_channel_caches = 2;
1779                 adev->gfx.config.max_gprs = 256;
1780                 adev->gfx.config.max_gs_threads = 16;
1781                 adev->gfx.config.max_hw_contexts = 8;
1782
1783                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1784                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1785                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1786                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1787                 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1788                 break;
1789         default:
1790                 adev->gfx.config.max_shader_engines = 2;
1791                 adev->gfx.config.max_tile_pipes = 4;
1792                 adev->gfx.config.max_cu_per_sh = 2;
1793                 adev->gfx.config.max_sh_per_se = 1;
1794                 adev->gfx.config.max_backends_per_se = 2;
1795                 adev->gfx.config.max_texture_channel_caches = 4;
1796                 adev->gfx.config.max_gprs = 256;
1797                 adev->gfx.config.max_gs_threads = 32;
1798                 adev->gfx.config.max_hw_contexts = 8;
1799
1800                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1801                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1802                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1803                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1804                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1805                 break;
1806         }
1807
1808         adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1809         mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1810
1811         adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1812                                 MC_ARB_RAMCFG, NOOFBANK);
1813         adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1814                                 MC_ARB_RAMCFG, NOOFRANKS);
1815
1816         adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1817         adev->gfx.config.mem_max_burst_length_bytes = 256;
1818         if (adev->flags & AMD_IS_APU) {
1819                 /* Get memory bank mapping mode. */
1820                 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1821                 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1822                 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1823
1824                 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1825                 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1826                 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1827
1828                 /* Validate settings in case only one DIMM installed. */
1829                 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1830                         dimm00_addr_map = 0;
1831                 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1832                         dimm01_addr_map = 0;
1833                 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1834                         dimm10_addr_map = 0;
1835                 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1836                         dimm11_addr_map = 0;
1837
1838                 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1839                 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1840                 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1841                         adev->gfx.config.mem_row_size_in_kb = 2;
1842                 else
1843                         adev->gfx.config.mem_row_size_in_kb = 1;
1844         } else {
1845                 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1846                 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1847                 if (adev->gfx.config.mem_row_size_in_kb > 4)
1848                         adev->gfx.config.mem_row_size_in_kb = 4;
1849         }
1850
1851         adev->gfx.config.shader_engine_tile_size = 32;
1852         adev->gfx.config.num_gpus = 1;
1853         adev->gfx.config.multi_gpu_tile_size = 64;
1854
1855         /* fix up row size */
1856         switch (adev->gfx.config.mem_row_size_in_kb) {
1857         case 1:
1858         default:
1859                 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1860                 break;
1861         case 2:
1862                 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1863                 break;
1864         case 4:
1865                 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1866                 break;
1867         }
1868         adev->gfx.config.gb_addr_config = gb_addr_config;
1869
1870         return 0;
1871 }
1872
1873 static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1874                                         int mec, int pipe, int queue)
1875 {
1876         int r;
1877         unsigned irq_type;
1878         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1879         unsigned int hw_prio;
1880
1881         ring = &adev->gfx.compute_ring[ring_id];
1882
1883         /* mec0 is me1 */
1884         ring->me = mec + 1;
1885         ring->pipe = pipe;
1886         ring->queue = queue;
1887
1888         ring->ring_obj = NULL;
1889         ring->use_doorbell = true;
1890         ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1891         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1892                                 + (ring_id * GFX8_MEC_HPD_SIZE);
1893         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1894
1895         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1896                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1897                 + ring->pipe;
1898
1899         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1900                         AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
1901         /* type-2 packets are deprecated on MEC, use type-3 instead */
1902         r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1903                              hw_prio, NULL);
1904         if (r)
1905                 return r;
1906
1907
1908         return 0;
1909 }
1910
1911 static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1912
1913 static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
1914 {
1915         int i, j, k, r, ring_id;
1916         int xcc_id = 0;
1917         struct amdgpu_ring *ring;
1918         struct amdgpu_device *adev = ip_block->adev;
1919
1920         switch (adev->asic_type) {
1921         case CHIP_TONGA:
1922         case CHIP_CARRIZO:
1923         case CHIP_FIJI:
1924         case CHIP_POLARIS10:
1925         case CHIP_POLARIS11:
1926         case CHIP_POLARIS12:
1927         case CHIP_VEGAM:
1928                 adev->gfx.mec.num_mec = 2;
1929                 break;
1930         case CHIP_TOPAZ:
1931         case CHIP_STONEY:
1932         default:
1933                 adev->gfx.mec.num_mec = 1;
1934                 break;
1935         }
1936
1937         adev->gfx.mec.num_pipe_per_mec = 4;
1938         adev->gfx.mec.num_queue_per_pipe = 8;
1939
1940         /* EOP Event */
1941         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1942         if (r)
1943                 return r;
1944
1945         /* Privileged reg */
1946         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1947                               &adev->gfx.priv_reg_irq);
1948         if (r)
1949                 return r;
1950
1951         /* Privileged inst */
1952         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1953                               &adev->gfx.priv_inst_irq);
1954         if (r)
1955                 return r;
1956
1957         /* Add CP EDC/ECC irq  */
1958         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1959                               &adev->gfx.cp_ecc_error_irq);
1960         if (r)
1961                 return r;
1962
1963         /* SQ interrupts. */
1964         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1965                               &adev->gfx.sq_irq);
1966         if (r) {
1967                 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1968                 return r;
1969         }
1970
1971         INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1972
1973         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1974
1975         r = gfx_v8_0_init_microcode(adev);
1976         if (r) {
1977                 DRM_ERROR("Failed to load gfx firmware!\n");
1978                 return r;
1979         }
1980
1981         r = adev->gfx.rlc.funcs->init(adev);
1982         if (r) {
1983                 DRM_ERROR("Failed to init rlc BOs!\n");
1984                 return r;
1985         }
1986
1987         r = gfx_v8_0_mec_init(adev);
1988         if (r) {
1989                 DRM_ERROR("Failed to init MEC BOs!\n");
1990                 return r;
1991         }
1992
1993         /* set up the gfx ring */
1994         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1995                 ring = &adev->gfx.gfx_ring[i];
1996                 ring->ring_obj = NULL;
1997                 sprintf(ring->name, "gfx");
1998                 /* no gfx doorbells on iceland */
1999                 if (adev->asic_type != CHIP_TOPAZ) {
2000                         ring->use_doorbell = true;
2001                         ring->doorbell_index = adev->doorbell_index.gfx_ring0;
2002                 }
2003
2004                 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2005                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
2006                                      AMDGPU_RING_PRIO_DEFAULT, NULL);
2007                 if (r)
2008                         return r;
2009         }
2010
2011
2012         /* set up the compute queues - allocate horizontally across pipes */
2013         ring_id = 0;
2014         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2015                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2016                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2017                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2018                                                                      k, j))
2019                                         continue;
2020
2021                                 r = gfx_v8_0_compute_ring_init(adev,
2022                                                                 ring_id,
2023                                                                 i, k, j);
2024                                 if (r)
2025                                         return r;
2026
2027                                 ring_id++;
2028                         }
2029                 }
2030         }
2031
2032         r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
2033         if (r) {
2034                 DRM_ERROR("Failed to init KIQ BOs!\n");
2035                 return r;
2036         }
2037
2038         r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
2039         if (r)
2040                 return r;
2041
2042         /* create MQD for all compute queues as well as KIQ for SRIOV case */
2043         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
2044         if (r)
2045                 return r;
2046
2047         adev->gfx.ce_ram_size = 0x8000;
2048
2049         r = gfx_v8_0_gpu_early_init(adev);
2050         if (r)
2051                 return r;
2052
2053         return 0;
2054 }
2055
2056 static int gfx_v8_0_sw_fini(struct amdgpu_ip_block *ip_block)
2057 {
2058         struct amdgpu_device *adev = ip_block->adev;
2059         int i;
2060
2061         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2062                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2063         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2064                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2065
2066         amdgpu_gfx_mqd_sw_fini(adev, 0);
2067         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2068         amdgpu_gfx_kiq_fini(adev, 0);
2069
2070         gfx_v8_0_mec_fini(adev);
2071         amdgpu_gfx_rlc_fini(adev);
2072         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2073                                 &adev->gfx.rlc.clear_state_gpu_addr,
2074                                 (void **)&adev->gfx.rlc.cs_ptr);
2075         if ((adev->asic_type == CHIP_CARRIZO) ||
2076             (adev->asic_type == CHIP_STONEY)) {
2077                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2078                                 &adev->gfx.rlc.cp_table_gpu_addr,
2079                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2080         }
2081         gfx_v8_0_free_microcode(adev);
2082
2083         return 0;
2084 }
2085
2086 static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2087 {
2088         uint32_t *modearray, *mod2array;
2089         const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2090         const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2091         u32 reg_offset;
2092
2093         modearray = adev->gfx.config.tile_mode_array;
2094         mod2array = adev->gfx.config.macrotile_mode_array;
2095
2096         for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2097                 modearray[reg_offset] = 0;
2098
2099         for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
2100                 mod2array[reg_offset] = 0;
2101
2102         switch (adev->asic_type) {
2103         case CHIP_TOPAZ:
2104                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2105                                 PIPE_CONFIG(ADDR_SURF_P2) |
2106                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2107                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2108                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2109                                 PIPE_CONFIG(ADDR_SURF_P2) |
2110                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2111                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2112                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2113                                 PIPE_CONFIG(ADDR_SURF_P2) |
2114                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2115                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2116                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2117                                 PIPE_CONFIG(ADDR_SURF_P2) |
2118                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2119                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2120                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2121                                 PIPE_CONFIG(ADDR_SURF_P2) |
2122                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2123                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2124                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2125                                 PIPE_CONFIG(ADDR_SURF_P2) |
2126                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2127                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2128                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2129                                 PIPE_CONFIG(ADDR_SURF_P2) |
2130                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2131                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2132                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2133                                 PIPE_CONFIG(ADDR_SURF_P2));
2134                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2135                                 PIPE_CONFIG(ADDR_SURF_P2) |
2136                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2137                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2138                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2139                                  PIPE_CONFIG(ADDR_SURF_P2) |
2140                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2141                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2142                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2143                                  PIPE_CONFIG(ADDR_SURF_P2) |
2144                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2145                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2146                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2147                                  PIPE_CONFIG(ADDR_SURF_P2) |
2148                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2149                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2150                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2151                                  PIPE_CONFIG(ADDR_SURF_P2) |
2152                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2153                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2154                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2155                                  PIPE_CONFIG(ADDR_SURF_P2) |
2156                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2157                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2158                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2159                                  PIPE_CONFIG(ADDR_SURF_P2) |
2160                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2161                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2162                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2163                                  PIPE_CONFIG(ADDR_SURF_P2) |
2164                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2165                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2166                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2167                                  PIPE_CONFIG(ADDR_SURF_P2) |
2168                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2169                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2170                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2171                                  PIPE_CONFIG(ADDR_SURF_P2) |
2172                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2173                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2174                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2175                                  PIPE_CONFIG(ADDR_SURF_P2) |
2176                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2177                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2178                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2179                                  PIPE_CONFIG(ADDR_SURF_P2) |
2180                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2181                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2182                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2183                                  PIPE_CONFIG(ADDR_SURF_P2) |
2184                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2185                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2186                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2187                                  PIPE_CONFIG(ADDR_SURF_P2) |
2188                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2189                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2190                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2191                                  PIPE_CONFIG(ADDR_SURF_P2) |
2192                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2193                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2194                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2195                                  PIPE_CONFIG(ADDR_SURF_P2) |
2196                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2197                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2198                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2199                                  PIPE_CONFIG(ADDR_SURF_P2) |
2200                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2201                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2202                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2203                                  PIPE_CONFIG(ADDR_SURF_P2) |
2204                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2205                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2206
2207                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2208                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2209                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2210                                 NUM_BANKS(ADDR_SURF_8_BANK));
2211                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2212                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2213                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2214                                 NUM_BANKS(ADDR_SURF_8_BANK));
2215                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2216                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2217                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2218                                 NUM_BANKS(ADDR_SURF_8_BANK));
2219                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2220                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2221                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2222                                 NUM_BANKS(ADDR_SURF_8_BANK));
2223                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2224                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2225                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2226                                 NUM_BANKS(ADDR_SURF_8_BANK));
2227                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2228                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2229                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2230                                 NUM_BANKS(ADDR_SURF_8_BANK));
2231                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2232                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2233                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2234                                 NUM_BANKS(ADDR_SURF_8_BANK));
2235                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2236                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2237                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2238                                 NUM_BANKS(ADDR_SURF_16_BANK));
2239                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2240                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2241                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2242                                 NUM_BANKS(ADDR_SURF_16_BANK));
2243                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2244                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2245                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2246                                  NUM_BANKS(ADDR_SURF_16_BANK));
2247                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2248                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2249                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2250                                  NUM_BANKS(ADDR_SURF_16_BANK));
2251                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2252                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2253                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2254                                  NUM_BANKS(ADDR_SURF_16_BANK));
2255                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2256                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2257                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2258                                  NUM_BANKS(ADDR_SURF_16_BANK));
2259                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2260                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2261                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2262                                  NUM_BANKS(ADDR_SURF_8_BANK));
2263
2264                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2265                         if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2266                             reg_offset != 23)
2267                                 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2268
2269                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2270                         if (reg_offset != 7)
2271                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2272
2273                 break;
2274         case CHIP_FIJI:
2275         case CHIP_VEGAM:
2276                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2277                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2278                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2279                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2280                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2281                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2282                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2283                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2284                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2285                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2286                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2287                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2288                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2289                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2290                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2291                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2292                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2293                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2294                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2295                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2296                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2297                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2298                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2299                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2300                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2301                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2302                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2303                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2304                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2305                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2306                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2307                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2308                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2309                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2310                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2311                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2312                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2313                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2314                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2315                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2316                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2317                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2318                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2319                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2320                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2321                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2322                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2323                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2324                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2325                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2326                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2327                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2328                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2329                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2330                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2331                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2332                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2333                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2334                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2335                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2336                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2337                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2338                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2339                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2340                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2341                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2342                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2343                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2344                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2345                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2346                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2347                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2348                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2349                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2350                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2351                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2352                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2353                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2354                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2355                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2356                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2357                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2358                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2359                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2360                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2361                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2362                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2363                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2364                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2365                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2366                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2367                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2368                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2369                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2370                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2371                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2372                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2373                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2374                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2375                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2376                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2377                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2378                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2379                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2380                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2381                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2382                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2383                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2384                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2385                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2386                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2387                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2388                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2389                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2390                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2391                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2392                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2393                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2394                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2395                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2396                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2397                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2398
2399                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2400                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2401                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2402                                 NUM_BANKS(ADDR_SURF_8_BANK));
2403                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2404                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2405                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2406                                 NUM_BANKS(ADDR_SURF_8_BANK));
2407                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2408                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2409                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2410                                 NUM_BANKS(ADDR_SURF_8_BANK));
2411                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2412                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2413                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2414                                 NUM_BANKS(ADDR_SURF_8_BANK));
2415                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2416                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2417                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2418                                 NUM_BANKS(ADDR_SURF_8_BANK));
2419                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2420                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2421                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2422                                 NUM_BANKS(ADDR_SURF_8_BANK));
2423                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2424                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2425                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2426                                 NUM_BANKS(ADDR_SURF_8_BANK));
2427                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2428                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2429                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2430                                 NUM_BANKS(ADDR_SURF_8_BANK));
2431                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2432                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2433                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2434                                 NUM_BANKS(ADDR_SURF_8_BANK));
2435                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2436                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2437                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2438                                  NUM_BANKS(ADDR_SURF_8_BANK));
2439                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2440                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2441                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2442                                  NUM_BANKS(ADDR_SURF_8_BANK));
2443                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2444                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2445                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2446                                  NUM_BANKS(ADDR_SURF_8_BANK));
2447                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2448                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2449                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2450                                  NUM_BANKS(ADDR_SURF_8_BANK));
2451                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2452                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2453                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2454                                  NUM_BANKS(ADDR_SURF_4_BANK));
2455
2456                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2457                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2458
2459                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2460                         if (reg_offset != 7)
2461                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2462
2463                 break;
2464         case CHIP_TONGA:
2465                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2466                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2467                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2468                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2469                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2470                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2471                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2472                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2473                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2474                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2475                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2476                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2477                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2478                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2479                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2480                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2481                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2482                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2483                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2484                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2485                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2486                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2487                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2488                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2489                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2490                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2491                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2492                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2493                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2494                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2495                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2496                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2497                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2498                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2499                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2500                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2501                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2502                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2503                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2504                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2505                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2506                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2507                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2508                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2509                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2510                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2511                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2512                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2513                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2514                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2515                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2516                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2517                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2518                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2519                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2520                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2521                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2522                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2523                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2524                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2525                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2526                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2527                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2528                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2529                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2530                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2531                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2532                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2533                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2534                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2535                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2536                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2537                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2538                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2539                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2540                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2541                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2542                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2543                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2544                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2545                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2546                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2547                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2548                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2549                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2550                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2551                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2552                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2553                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2554                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2555                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2556                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2557                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2558                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2559                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2560                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2561                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2562                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2563                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2564                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2565                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2566                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2567                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2568                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2569                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2570                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2571                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2572                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2573                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2574                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2575                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2576                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2577                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2578                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2579                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2580                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2581                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2582                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2583                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2584                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2585                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2586                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2587
2588                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2589                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2590                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2591                                 NUM_BANKS(ADDR_SURF_16_BANK));
2592                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2593                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2594                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2595                                 NUM_BANKS(ADDR_SURF_16_BANK));
2596                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2597                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2598                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2599                                 NUM_BANKS(ADDR_SURF_16_BANK));
2600                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2601                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2602                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2603                                 NUM_BANKS(ADDR_SURF_16_BANK));
2604                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2605                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2606                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2607                                 NUM_BANKS(ADDR_SURF_16_BANK));
2608                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2609                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2610                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2611                                 NUM_BANKS(ADDR_SURF_16_BANK));
2612                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2613                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2614                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2615                                 NUM_BANKS(ADDR_SURF_16_BANK));
2616                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2617                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2618                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2619                                 NUM_BANKS(ADDR_SURF_16_BANK));
2620                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2621                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2622                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2623                                 NUM_BANKS(ADDR_SURF_16_BANK));
2624                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2625                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2626                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2627                                  NUM_BANKS(ADDR_SURF_16_BANK));
2628                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2629                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2630                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2631                                  NUM_BANKS(ADDR_SURF_16_BANK));
2632                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2633                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2634                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2635                                  NUM_BANKS(ADDR_SURF_8_BANK));
2636                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2637                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2638                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2639                                  NUM_BANKS(ADDR_SURF_4_BANK));
2640                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2641                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2642                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2643                                  NUM_BANKS(ADDR_SURF_4_BANK));
2644
2645                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2646                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2647
2648                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2649                         if (reg_offset != 7)
2650                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2651
2652                 break;
2653         case CHIP_POLARIS11:
2654         case CHIP_POLARIS12:
2655                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2656                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2657                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2658                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2659                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2660                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2661                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2662                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2663                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2664                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2665                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2666                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2667                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2668                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2669                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2670                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2671                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2672                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2673                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2674                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2675                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2676                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2677                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2678                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2679                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2680                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2681                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2682                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2683                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2684                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2685                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2686                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2687                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2688                                 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2689                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2690                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2691                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2692                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2693                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2694                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2695                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2696                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2697                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2698                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2699                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2700                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2701                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2702                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2703                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2704                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2705                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2706                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2707                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2708                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2709                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2710                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2711                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2712                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2713                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2714                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2715                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2716                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2717                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2718                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2719                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2720                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2721                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2722                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2723                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2724                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2725                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2726                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2727                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2728                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2729                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2730                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2731                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2732                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2733                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2734                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2735                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2736                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2737                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2738                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2739                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2740                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2741                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2742                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2743                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2744                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2745                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2746                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2747                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2748                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2749                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2750                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2751                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2752                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2753                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2754                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2755                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2756                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2757                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2758                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2759                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2760                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2761                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2762                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2763                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2764                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2765                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2766                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2767                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2768                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2769                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2770                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2771                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2772                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2773                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2774                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2775                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2776                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2777
2778                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2779                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2780                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2781                                 NUM_BANKS(ADDR_SURF_16_BANK));
2782
2783                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2784                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2785                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2786                                 NUM_BANKS(ADDR_SURF_16_BANK));
2787
2788                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2789                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2790                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2791                                 NUM_BANKS(ADDR_SURF_16_BANK));
2792
2793                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2794                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2795                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2796                                 NUM_BANKS(ADDR_SURF_16_BANK));
2797
2798                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2799                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2800                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2801                                 NUM_BANKS(ADDR_SURF_16_BANK));
2802
2803                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2804                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2805                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2806                                 NUM_BANKS(ADDR_SURF_16_BANK));
2807
2808                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2809                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2810                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2811                                 NUM_BANKS(ADDR_SURF_16_BANK));
2812
2813                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2814                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2815                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2816                                 NUM_BANKS(ADDR_SURF_16_BANK));
2817
2818                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2819                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2820                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2821                                 NUM_BANKS(ADDR_SURF_16_BANK));
2822
2823                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2824                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2825                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2826                                 NUM_BANKS(ADDR_SURF_16_BANK));
2827
2828                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2829                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2830                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2831                                 NUM_BANKS(ADDR_SURF_16_BANK));
2832
2833                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2834                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2835                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2836                                 NUM_BANKS(ADDR_SURF_16_BANK));
2837
2838                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2839                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2840                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2841                                 NUM_BANKS(ADDR_SURF_8_BANK));
2842
2843                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2844                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2845                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2846                                 NUM_BANKS(ADDR_SURF_4_BANK));
2847
2848                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2849                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2850
2851                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2852                         if (reg_offset != 7)
2853                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2854
2855                 break;
2856         case CHIP_POLARIS10:
2857                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2858                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2859                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2860                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2861                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2862                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2863                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2864                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2865                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2866                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2867                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2868                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2869                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2870                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2871                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2872                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2873                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2874                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2875                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2876                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2877                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2878                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2879                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2880                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2881                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2882                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2883                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2884                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2885                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2886                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2887                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2888                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2889                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2890                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2891                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2892                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2893                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2894                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2895                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2896                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2897                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2898                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2899                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2900                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2901                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2902                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2903                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2904                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2905                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2906                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2907                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2908                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2909                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2910                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2911                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2912                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2913                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2914                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2915                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2916                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2917                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2918                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2919                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2920                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2921                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2922                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2923                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2924                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2925                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2926                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2927                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2928                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2929                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2930                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2931                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2932                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2933                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2934                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2935                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2936                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2937                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2938                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2939                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2940                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2941                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2942                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2943                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2944                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2945                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2946                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2947                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2948                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2949                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2950                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2951                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2952                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2953                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2954                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2955                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2956                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2957                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2958                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2959                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2960                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2961                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2962                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2963                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2964                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2965                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2966                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2967                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2968                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2969                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2970                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2971                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2972                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2973                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2974                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2975                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2976                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2977                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2978                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2979
2980                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2981                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2982                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2983                                 NUM_BANKS(ADDR_SURF_16_BANK));
2984
2985                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2986                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2987                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2988                                 NUM_BANKS(ADDR_SURF_16_BANK));
2989
2990                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2991                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2992                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2993                                 NUM_BANKS(ADDR_SURF_16_BANK));
2994
2995                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2996                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2997                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2998                                 NUM_BANKS(ADDR_SURF_16_BANK));
2999
3000                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3001                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3002                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3003                                 NUM_BANKS(ADDR_SURF_16_BANK));
3004
3005                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3006                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3007                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3008                                 NUM_BANKS(ADDR_SURF_16_BANK));
3009
3010                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3011                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3012                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3013                                 NUM_BANKS(ADDR_SURF_16_BANK));
3014
3015                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3016                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3017                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3018                                 NUM_BANKS(ADDR_SURF_16_BANK));
3019
3020                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3021                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3022                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3023                                 NUM_BANKS(ADDR_SURF_16_BANK));
3024
3025                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3026                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3027                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3028                                 NUM_BANKS(ADDR_SURF_16_BANK));
3029
3030                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3031                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3032                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3033                                 NUM_BANKS(ADDR_SURF_16_BANK));
3034
3035                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3036                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3037                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3038                                 NUM_BANKS(ADDR_SURF_8_BANK));
3039
3040                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3041                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3042                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3043                                 NUM_BANKS(ADDR_SURF_4_BANK));
3044
3045                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3046                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3047                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3048                                 NUM_BANKS(ADDR_SURF_4_BANK));
3049
3050                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3051                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3052
3053                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3054                         if (reg_offset != 7)
3055                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3056
3057                 break;
3058         case CHIP_STONEY:
3059                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3060                                 PIPE_CONFIG(ADDR_SURF_P2) |
3061                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3062                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3063                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3064                                 PIPE_CONFIG(ADDR_SURF_P2) |
3065                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3066                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3067                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3068                                 PIPE_CONFIG(ADDR_SURF_P2) |
3069                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3070                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3071                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3072                                 PIPE_CONFIG(ADDR_SURF_P2) |
3073                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3074                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3075                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3076                                 PIPE_CONFIG(ADDR_SURF_P2) |
3077                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3078                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3079                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3080                                 PIPE_CONFIG(ADDR_SURF_P2) |
3081                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3082                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3083                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3084                                 PIPE_CONFIG(ADDR_SURF_P2) |
3085                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3086                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3087                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3088                                 PIPE_CONFIG(ADDR_SURF_P2));
3089                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3090                                 PIPE_CONFIG(ADDR_SURF_P2) |
3091                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3092                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3093                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3094                                  PIPE_CONFIG(ADDR_SURF_P2) |
3095                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3096                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3097                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3098                                  PIPE_CONFIG(ADDR_SURF_P2) |
3099                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3100                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3101                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3102                                  PIPE_CONFIG(ADDR_SURF_P2) |
3103                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3104                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3105                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3106                                  PIPE_CONFIG(ADDR_SURF_P2) |
3107                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3108                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3109                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3110                                  PIPE_CONFIG(ADDR_SURF_P2) |
3111                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3112                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3113                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3114                                  PIPE_CONFIG(ADDR_SURF_P2) |
3115                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3116                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3117                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3118                                  PIPE_CONFIG(ADDR_SURF_P2) |
3119                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3120                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3121                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3122                                  PIPE_CONFIG(ADDR_SURF_P2) |
3123                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3124                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3125                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3126                                  PIPE_CONFIG(ADDR_SURF_P2) |
3127                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3128                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3129                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3130                                  PIPE_CONFIG(ADDR_SURF_P2) |
3131                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3132                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3133                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3134                                  PIPE_CONFIG(ADDR_SURF_P2) |
3135                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3136                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3137                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3138                                  PIPE_CONFIG(ADDR_SURF_P2) |
3139                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3140                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3141                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3142                                  PIPE_CONFIG(ADDR_SURF_P2) |
3143                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3144                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3145                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3146                                  PIPE_CONFIG(ADDR_SURF_P2) |
3147                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3148                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3149                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3150                                  PIPE_CONFIG(ADDR_SURF_P2) |
3151                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3152                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3153                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3154                                  PIPE_CONFIG(ADDR_SURF_P2) |
3155                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3156                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3157                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3158                                  PIPE_CONFIG(ADDR_SURF_P2) |
3159                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3160                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3161
3162                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3163                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3164                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3165                                 NUM_BANKS(ADDR_SURF_8_BANK));
3166                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3167                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3168                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3169                                 NUM_BANKS(ADDR_SURF_8_BANK));
3170                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3171                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3172                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3173                                 NUM_BANKS(ADDR_SURF_8_BANK));
3174                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3175                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3176                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3177                                 NUM_BANKS(ADDR_SURF_8_BANK));
3178                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3179                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3180                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3181                                 NUM_BANKS(ADDR_SURF_8_BANK));
3182                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3183                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3184                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3185                                 NUM_BANKS(ADDR_SURF_8_BANK));
3186                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3187                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3188                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3189                                 NUM_BANKS(ADDR_SURF_8_BANK));
3190                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3191                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3192                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3193                                 NUM_BANKS(ADDR_SURF_16_BANK));
3194                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3195                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3196                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3197                                 NUM_BANKS(ADDR_SURF_16_BANK));
3198                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3199                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3200                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3201                                  NUM_BANKS(ADDR_SURF_16_BANK));
3202                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3203                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3204                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3205                                  NUM_BANKS(ADDR_SURF_16_BANK));
3206                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3207                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3208                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3209                                  NUM_BANKS(ADDR_SURF_16_BANK));
3210                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3211                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3212                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3213                                  NUM_BANKS(ADDR_SURF_16_BANK));
3214                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3215                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3216                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3217                                  NUM_BANKS(ADDR_SURF_8_BANK));
3218
3219                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3220                         if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3221                             reg_offset != 23)
3222                                 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3223
3224                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3225                         if (reg_offset != 7)
3226                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3227
3228                 break;
3229         default:
3230                 dev_warn(adev->dev,
3231                          "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3232                          adev->asic_type);
3233                 fallthrough;
3234
3235         case CHIP_CARRIZO:
3236                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3237                                 PIPE_CONFIG(ADDR_SURF_P2) |
3238                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3239                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3240                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3241                                 PIPE_CONFIG(ADDR_SURF_P2) |
3242                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3243                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3244                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3245                                 PIPE_CONFIG(ADDR_SURF_P2) |
3246                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3247                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3248                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3249                                 PIPE_CONFIG(ADDR_SURF_P2) |
3250                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3251                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3252                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3253                                 PIPE_CONFIG(ADDR_SURF_P2) |
3254                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3255                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3256                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3257                                 PIPE_CONFIG(ADDR_SURF_P2) |
3258                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3259                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3260                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3261                                 PIPE_CONFIG(ADDR_SURF_P2) |
3262                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3263                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3264                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3265                                 PIPE_CONFIG(ADDR_SURF_P2));
3266                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3267                                 PIPE_CONFIG(ADDR_SURF_P2) |
3268                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3269                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3270                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3271                                  PIPE_CONFIG(ADDR_SURF_P2) |
3272                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3273                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3274                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3275                                  PIPE_CONFIG(ADDR_SURF_P2) |
3276                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3277                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3278                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3279                                  PIPE_CONFIG(ADDR_SURF_P2) |
3280                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3281                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3282                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3283                                  PIPE_CONFIG(ADDR_SURF_P2) |
3284                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3285                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3286                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3287                                  PIPE_CONFIG(ADDR_SURF_P2) |
3288                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3289                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3290                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3291                                  PIPE_CONFIG(ADDR_SURF_P2) |
3292                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3293                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3294                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3295                                  PIPE_CONFIG(ADDR_SURF_P2) |
3296                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3297                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3298                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3299                                  PIPE_CONFIG(ADDR_SURF_P2) |
3300                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3301                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3302                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3303                                  PIPE_CONFIG(ADDR_SURF_P2) |
3304                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3305                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3306                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3307                                  PIPE_CONFIG(ADDR_SURF_P2) |
3308                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3309                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3310                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3311                                  PIPE_CONFIG(ADDR_SURF_P2) |
3312                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3313                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3314                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3315                                  PIPE_CONFIG(ADDR_SURF_P2) |
3316                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3317                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3318                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3319                                  PIPE_CONFIG(ADDR_SURF_P2) |
3320                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3321                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3322                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3323                                  PIPE_CONFIG(ADDR_SURF_P2) |
3324                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3325                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3326                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3327                                  PIPE_CONFIG(ADDR_SURF_P2) |
3328                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3329                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3330                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3331                                  PIPE_CONFIG(ADDR_SURF_P2) |
3332                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3333                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3334                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3335                                  PIPE_CONFIG(ADDR_SURF_P2) |
3336                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3337                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3338
3339                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3340                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3341                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3342                                 NUM_BANKS(ADDR_SURF_8_BANK));
3343                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3344                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3345                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3346                                 NUM_BANKS(ADDR_SURF_8_BANK));
3347                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3348                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3349                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3350                                 NUM_BANKS(ADDR_SURF_8_BANK));
3351                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3352                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3353                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3354                                 NUM_BANKS(ADDR_SURF_8_BANK));
3355                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3356                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3357                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3358                                 NUM_BANKS(ADDR_SURF_8_BANK));
3359                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3360                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3361                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3362                                 NUM_BANKS(ADDR_SURF_8_BANK));
3363                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3364                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3365                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3366                                 NUM_BANKS(ADDR_SURF_8_BANK));
3367                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3368                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3369                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3370                                 NUM_BANKS(ADDR_SURF_16_BANK));
3371                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3372                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3373                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3374                                 NUM_BANKS(ADDR_SURF_16_BANK));
3375                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3376                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3377                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3378                                  NUM_BANKS(ADDR_SURF_16_BANK));
3379                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3380                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3381                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3382                                  NUM_BANKS(ADDR_SURF_16_BANK));
3383                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3384                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3385                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3386                                  NUM_BANKS(ADDR_SURF_16_BANK));
3387                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3388                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3389                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3390                                  NUM_BANKS(ADDR_SURF_16_BANK));
3391                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3392                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3393                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3394                                  NUM_BANKS(ADDR_SURF_8_BANK));
3395
3396                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3397                         if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3398                             reg_offset != 23)
3399                                 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3400
3401                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3402                         if (reg_offset != 7)
3403                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3404
3405                 break;
3406         }
3407 }
3408
3409 static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3410                                   u32 se_num, u32 sh_num, u32 instance,
3411                                   int xcc_id)
3412 {
3413         u32 data;
3414
3415         if (instance == 0xffffffff)
3416                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3417         else
3418                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3419
3420         if (se_num == 0xffffffff)
3421                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3422         else
3423                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3424
3425         if (sh_num == 0xffffffff)
3426                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3427         else
3428                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3429
3430         WREG32(mmGRBM_GFX_INDEX, data);
3431 }
3432
3433 static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3434                                   u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
3435 {
3436         vi_srbm_select(adev, me, pipe, q, vm);
3437 }
3438
3439 static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3440 {
3441         u32 data, mask;
3442
3443         data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
3444                 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3445
3446         data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
3447
3448         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3449                                          adev->gfx.config.max_sh_per_se);
3450
3451         return (~data) & mask;
3452 }
3453
3454 static void
3455 gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3456 {
3457         switch (adev->asic_type) {
3458         case CHIP_FIJI:
3459         case CHIP_VEGAM:
3460                 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3461                           RB_XSEL2(1) | PKR_MAP(2) |
3462                           PKR_XSEL(1) | PKR_YSEL(1) |
3463                           SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3464                 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3465                            SE_PAIR_YSEL(2);
3466                 break;
3467         case CHIP_TONGA:
3468         case CHIP_POLARIS10:
3469                 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3470                           SE_XSEL(1) | SE_YSEL(1);
3471                 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3472                            SE_PAIR_YSEL(2);
3473                 break;
3474         case CHIP_TOPAZ:
3475         case CHIP_CARRIZO:
3476                 *rconf |= RB_MAP_PKR0(2);
3477                 *rconf1 |= 0x0;
3478                 break;
3479         case CHIP_POLARIS11:
3480         case CHIP_POLARIS12:
3481                 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3482                           SE_XSEL(1) | SE_YSEL(1);
3483                 *rconf1 |= 0x0;
3484                 break;
3485         case CHIP_STONEY:
3486                 *rconf |= 0x0;
3487                 *rconf1 |= 0x0;
3488                 break;
3489         default:
3490                 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3491                 break;
3492         }
3493 }
3494
3495 static void
3496 gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3497                                         u32 raster_config, u32 raster_config_1,
3498                                         unsigned rb_mask, unsigned num_rb)
3499 {
3500         unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3501         unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3502         unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3503         unsigned rb_per_se = num_rb / num_se;
3504         unsigned se_mask[4];
3505         unsigned se;
3506
3507         se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3508         se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3509         se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3510         se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3511
3512         WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3513         WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3514         WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3515
3516         if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3517                              (!se_mask[2] && !se_mask[3]))) {
3518                 raster_config_1 &= ~SE_PAIR_MAP_MASK;
3519
3520                 if (!se_mask[0] && !se_mask[1]) {
3521                         raster_config_1 |=
3522                                 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3523                 } else {
3524                         raster_config_1 |=
3525                                 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3526                 }
3527         }
3528
3529         for (se = 0; se < num_se; se++) {
3530                 unsigned raster_config_se = raster_config;
3531                 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3532                 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3533                 int idx = (se / 2) * 2;
3534
3535                 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3536                         raster_config_se &= ~SE_MAP_MASK;
3537
3538                         if (!se_mask[idx]) {
3539                                 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3540                         } else {
3541                                 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3542                         }
3543                 }
3544
3545                 pkr0_mask &= rb_mask;
3546                 pkr1_mask &= rb_mask;
3547                 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3548                         raster_config_se &= ~PKR_MAP_MASK;
3549
3550                         if (!pkr0_mask) {
3551                                 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3552                         } else {
3553                                 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3554                         }
3555                 }
3556
3557                 if (rb_per_se >= 2) {
3558                         unsigned rb0_mask = 1 << (se * rb_per_se);
3559                         unsigned rb1_mask = rb0_mask << 1;
3560
3561                         rb0_mask &= rb_mask;
3562                         rb1_mask &= rb_mask;
3563                         if (!rb0_mask || !rb1_mask) {
3564                                 raster_config_se &= ~RB_MAP_PKR0_MASK;
3565
3566                                 if (!rb0_mask) {
3567                                         raster_config_se |=
3568                                                 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3569                                 } else {
3570                                         raster_config_se |=
3571                                                 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3572                                 }
3573                         }
3574
3575                         if (rb_per_se > 2) {
3576                                 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3577                                 rb1_mask = rb0_mask << 1;
3578                                 rb0_mask &= rb_mask;
3579                                 rb1_mask &= rb_mask;
3580                                 if (!rb0_mask || !rb1_mask) {
3581                                         raster_config_se &= ~RB_MAP_PKR1_MASK;
3582
3583                                         if (!rb0_mask) {
3584                                                 raster_config_se |=
3585                                                         RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3586                                         } else {
3587                                                 raster_config_se |=
3588                                                         RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3589                                         }
3590                                 }
3591                         }
3592                 }
3593
3594                 /* GRBM_GFX_INDEX has a different offset on VI */
3595                 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
3596                 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3597                 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3598         }
3599
3600         /* GRBM_GFX_INDEX has a different offset on VI */
3601         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3602 }
3603
3604 static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3605 {
3606         int i, j;
3607         u32 data;
3608         u32 raster_config = 0, raster_config_1 = 0;
3609         u32 active_rbs = 0;
3610         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3611                                         adev->gfx.config.max_sh_per_se;
3612         unsigned num_rb_pipes;
3613
3614         mutex_lock(&adev->grbm_idx_mutex);
3615         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3616                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3617                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3618                         data = gfx_v8_0_get_rb_active_bitmap(adev);
3619                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3620                                                rb_bitmap_width_per_sh);
3621                 }
3622         }
3623         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3624
3625         adev->gfx.config.backend_enable_mask = active_rbs;
3626         adev->gfx.config.num_rbs = hweight32(active_rbs);
3627
3628         num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3629                              adev->gfx.config.max_shader_engines, 16);
3630
3631         gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3632
3633         if (!adev->gfx.config.backend_enable_mask ||
3634                         adev->gfx.config.num_rbs >= num_rb_pipes) {
3635                 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3636                 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3637         } else {
3638                 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3639                                                         adev->gfx.config.backend_enable_mask,
3640                                                         num_rb_pipes);
3641         }
3642
3643         /* cache the values for userspace */
3644         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3645                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3646                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3647                         adev->gfx.config.rb_config[i][j].rb_backend_disable =
3648                                 RREG32(mmCC_RB_BACKEND_DISABLE);
3649                         adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3650                                 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3651                         adev->gfx.config.rb_config[i][j].raster_config =
3652                                 RREG32(mmPA_SC_RASTER_CONFIG);
3653                         adev->gfx.config.rb_config[i][j].raster_config_1 =
3654                                 RREG32(mmPA_SC_RASTER_CONFIG_1);
3655                 }
3656         }
3657         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3658         mutex_unlock(&adev->grbm_idx_mutex);
3659 }
3660
3661 #define DEFAULT_SH_MEM_BASES    (0x6000)
3662 /**
3663  * gfx_v8_0_init_compute_vmid - gart enable
3664  *
3665  * @adev: amdgpu_device pointer
3666  *
3667  * Initialize compute vmid sh_mem registers
3668  *
3669  */
3670 static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3671 {
3672         int i;
3673         uint32_t sh_mem_config;
3674         uint32_t sh_mem_bases;
3675
3676         /*
3677          * Configure apertures:
3678          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
3679          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
3680          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
3681          */
3682         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3683
3684         sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3685                         SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3686                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3687                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3688                         MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3689                         SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3690
3691         mutex_lock(&adev->srbm_mutex);
3692         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3693                 vi_srbm_select(adev, 0, 0, 0, i);
3694                 /* CP and shaders */
3695                 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3696                 WREG32(mmSH_MEM_APE1_BASE, 1);
3697                 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3698                 WREG32(mmSH_MEM_BASES, sh_mem_bases);
3699         }
3700         vi_srbm_select(adev, 0, 0, 0, 0);
3701         mutex_unlock(&adev->srbm_mutex);
3702
3703         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
3704            access. These should be enabled by FW for target VMIDs. */
3705         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3706                 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3707                 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3708                 WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3709                 WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3710         }
3711 }
3712
3713 static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3714 {
3715         int vmid;
3716
3717         /*
3718          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3719          * access. Compute VMIDs should be enabled by FW for target VMIDs,
3720          * the driver can enable them for graphics. VMID0 should maintain
3721          * access so that HWS firmware can save/restore entries.
3722          */
3723         for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
3724                 WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3725                 WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3726                 WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3727                 WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3728         }
3729 }
3730
3731 static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3732 {
3733         switch (adev->asic_type) {
3734         default:
3735                 adev->gfx.config.double_offchip_lds_buf = 1;
3736                 break;
3737         case CHIP_CARRIZO:
3738         case CHIP_STONEY:
3739                 adev->gfx.config.double_offchip_lds_buf = 0;
3740                 break;
3741         }
3742 }
3743
3744 static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3745 {
3746         u32 tmp, sh_static_mem_cfg;
3747         int i;
3748
3749         WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3750         WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3751         WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3752         WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3753
3754         gfx_v8_0_tiling_mode_table_init(adev);
3755         gfx_v8_0_setup_rb(adev);
3756         gfx_v8_0_get_cu_info(adev);
3757         gfx_v8_0_config_init(adev);
3758
3759         /* XXX SH_MEM regs */
3760         /* where to put LDS, scratch, GPUVM in FSA64 space */
3761         sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3762                                    SWIZZLE_ENABLE, 1);
3763         sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3764                                    ELEMENT_SIZE, 1);
3765         sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3766                                    INDEX_STRIDE, 3);
3767         WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3768
3769         mutex_lock(&adev->srbm_mutex);
3770         for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3771                 vi_srbm_select(adev, 0, 0, 0, i);
3772                 /* CP and shaders */
3773                 if (i == 0) {
3774                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3775                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3776                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3777                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3778                         WREG32(mmSH_MEM_CONFIG, tmp);
3779                         WREG32(mmSH_MEM_BASES, 0);
3780                 } else {
3781                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3782                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3783                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3784                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3785                         WREG32(mmSH_MEM_CONFIG, tmp);
3786                         tmp = adev->gmc.shared_aperture_start >> 48;
3787                         WREG32(mmSH_MEM_BASES, tmp);
3788                 }
3789
3790                 WREG32(mmSH_MEM_APE1_BASE, 1);
3791                 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3792         }
3793         vi_srbm_select(adev, 0, 0, 0, 0);
3794         mutex_unlock(&adev->srbm_mutex);
3795
3796         gfx_v8_0_init_compute_vmid(adev);
3797         gfx_v8_0_init_gds_vmid(adev);
3798
3799         mutex_lock(&adev->grbm_idx_mutex);
3800         /*
3801          * making sure that the following register writes will be broadcasted
3802          * to all the shaders
3803          */
3804         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3805
3806         WREG32(mmPA_SC_FIFO_SIZE,
3807                    (adev->gfx.config.sc_prim_fifo_size_frontend <<
3808                         PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3809                    (adev->gfx.config.sc_prim_fifo_size_backend <<
3810                         PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3811                    (adev->gfx.config.sc_hiz_tile_fifo_size <<
3812                         PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3813                    (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3814                         PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3815
3816         tmp = RREG32(mmSPI_ARB_PRIORITY);
3817         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3818         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3819         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3820         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3821         WREG32(mmSPI_ARB_PRIORITY, tmp);
3822
3823         mutex_unlock(&adev->grbm_idx_mutex);
3824
3825 }
3826
3827 static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3828 {
3829         u32 i, j, k;
3830         u32 mask;
3831
3832         mutex_lock(&adev->grbm_idx_mutex);
3833         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3834                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3835                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3836                         for (k = 0; k < adev->usec_timeout; k++) {
3837                                 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3838                                         break;
3839                                 udelay(1);
3840                         }
3841                         if (k == adev->usec_timeout) {
3842                                 gfx_v8_0_select_se_sh(adev, 0xffffffff,
3843                                                       0xffffffff, 0xffffffff, 0);
3844                                 mutex_unlock(&adev->grbm_idx_mutex);
3845                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3846                                          i, j);
3847                                 return;
3848                         }
3849                 }
3850         }
3851         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3852         mutex_unlock(&adev->grbm_idx_mutex);
3853
3854         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3855                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3856                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3857                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3858         for (k = 0; k < adev->usec_timeout; k++) {
3859                 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3860                         break;
3861                 udelay(1);
3862         }
3863 }
3864
3865 static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3866                                                bool enable)
3867 {
3868         u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3869
3870         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3871         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3872         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3873         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3874
3875         WREG32(mmCP_INT_CNTL_RING0, tmp);
3876 }
3877
3878 static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3879 {
3880         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3881         /* csib */
3882         WREG32(mmRLC_CSIB_ADDR_HI,
3883                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
3884         WREG32(mmRLC_CSIB_ADDR_LO,
3885                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3886         WREG32(mmRLC_CSIB_LENGTH,
3887                         adev->gfx.rlc.clear_state_size);
3888 }
3889
3890 static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3891                                 int ind_offset,
3892                                 int list_size,
3893                                 int *unique_indices,
3894                                 int *indices_count,
3895                                 int max_indices,
3896                                 int *ind_start_offsets,
3897                                 int *offset_count,
3898                                 int max_offset)
3899 {
3900         int indices;
3901         bool new_entry = true;
3902
3903         for (; ind_offset < list_size; ind_offset++) {
3904
3905                 if (new_entry) {
3906                         new_entry = false;
3907                         ind_start_offsets[*offset_count] = ind_offset;
3908                         *offset_count = *offset_count + 1;
3909                         BUG_ON(*offset_count >= max_offset);
3910                 }
3911
3912                 if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3913                         new_entry = true;
3914                         continue;
3915                 }
3916
3917                 ind_offset += 2;
3918
3919                 /* look for the matching indice */
3920                 for (indices = 0;
3921                         indices < *indices_count;
3922                         indices++) {
3923                         if (unique_indices[indices] ==
3924                                 register_list_format[ind_offset])
3925                                 break;
3926                 }
3927
3928                 if (indices >= *indices_count) {
3929                         unique_indices[*indices_count] =
3930                                 register_list_format[ind_offset];
3931                         indices = *indices_count;
3932                         *indices_count = *indices_count + 1;
3933                         BUG_ON(*indices_count >= max_indices);
3934                 }
3935
3936                 register_list_format[ind_offset] = indices;
3937         }
3938 }
3939
3940 static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3941 {
3942         int i, temp, data;
3943         int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3944         int indices_count = 0;
3945         int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3946         int offset_count = 0;
3947
3948         int list_size;
3949         unsigned int *register_list_format =
3950                 kmemdup(adev->gfx.rlc.register_list_format,
3951                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3952         if (!register_list_format)
3953                 return -ENOMEM;
3954
3955         gfx_v8_0_parse_ind_reg_list(register_list_format,
3956                                 RLC_FormatDirectRegListLength,
3957                                 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3958                                 unique_indices,
3959                                 &indices_count,
3960                                 ARRAY_SIZE(unique_indices),
3961                                 indirect_start_offsets,
3962                                 &offset_count,
3963                                 ARRAY_SIZE(indirect_start_offsets));
3964
3965         /* save and restore list */
3966         WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3967
3968         WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3969         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3970                 WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3971
3972         /* indirect list */
3973         WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3974         for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3975                 WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3976
3977         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3978         list_size = list_size >> 1;
3979         WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3980         WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
3981
3982         /* starting offsets starts */
3983         WREG32(mmRLC_GPM_SCRATCH_ADDR,
3984                 adev->gfx.rlc.starting_offsets_start);
3985         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
3986                 WREG32(mmRLC_GPM_SCRATCH_DATA,
3987                                 indirect_start_offsets[i]);
3988
3989         /* unique indices */
3990         temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
3991         data = mmRLC_SRM_INDEX_CNTL_DATA_0;
3992         for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
3993                 if (unique_indices[i] != 0) {
3994                         WREG32(temp + i, unique_indices[i] & 0x3FFFF);
3995                         WREG32(data + i, unique_indices[i] >> 20);
3996                 }
3997         }
3998         kfree(register_list_format);
3999
4000         return 0;
4001 }
4002
4003 static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
4004 {
4005         WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
4006 }
4007
4008 static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
4009 {
4010         uint32_t data;
4011
4012         WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4013
4014         data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4015         data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4016         data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4017         data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4018         WREG32(mmRLC_PG_DELAY, data);
4019
4020         WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4021         WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4022
4023 }
4024
4025 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4026                                                 bool enable)
4027 {
4028         WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4029 }
4030
4031 static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4032                                                   bool enable)
4033 {
4034         WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4035 }
4036
4037 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4038 {
4039         WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4040 }
4041
4042 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4043 {
4044         if ((adev->asic_type == CHIP_CARRIZO) ||
4045             (adev->asic_type == CHIP_STONEY)) {
4046                 gfx_v8_0_init_csb(adev);
4047                 gfx_v8_0_init_save_restore_list(adev);
4048                 gfx_v8_0_enable_save_restore_machine(adev);
4049                 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4050                 gfx_v8_0_init_power_gating(adev);
4051                 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4052         } else if ((adev->asic_type == CHIP_POLARIS11) ||
4053                    (adev->asic_type == CHIP_POLARIS12) ||
4054                    (adev->asic_type == CHIP_VEGAM)) {
4055                 gfx_v8_0_init_csb(adev);
4056                 gfx_v8_0_init_save_restore_list(adev);
4057                 gfx_v8_0_enable_save_restore_machine(adev);
4058                 gfx_v8_0_init_power_gating(adev);
4059         }
4060
4061 }
4062
4063 static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4064 {
4065         WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4066
4067         gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4068         gfx_v8_0_wait_for_rlc_serdes(adev);
4069 }
4070
4071 static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4072 {
4073         WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4074         udelay(50);
4075
4076         WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4077         udelay(50);
4078 }
4079
4080 static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4081 {
4082         WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4083
4084         /* carrizo do enable cp interrupt after cp inited */
4085         if (!(adev->flags & AMD_IS_APU))
4086                 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4087
4088         udelay(50);
4089 }
4090
4091 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4092 {
4093         if (amdgpu_sriov_vf(adev)) {
4094                 gfx_v8_0_init_csb(adev);
4095                 return 0;
4096         }
4097
4098         adev->gfx.rlc.funcs->stop(adev);
4099         adev->gfx.rlc.funcs->reset(adev);
4100         gfx_v8_0_init_pg(adev);
4101         adev->gfx.rlc.funcs->start(adev);
4102
4103         return 0;
4104 }
4105
4106 static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4107 {
4108         u32 tmp = RREG32(mmCP_ME_CNTL);
4109
4110         if (enable) {
4111                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4112                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4113                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4114         } else {
4115                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4116                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4117                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
4118         }
4119         WREG32(mmCP_ME_CNTL, tmp);
4120         udelay(50);
4121 }
4122
4123 static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4124 {
4125         u32 count = 0;
4126         const struct cs_section_def *sect = NULL;
4127         const struct cs_extent_def *ext = NULL;
4128
4129         /* begin clear state */
4130         count += 2;
4131         /* context control state */
4132         count += 3;
4133
4134         for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4135                 for (ext = sect->section; ext->extent != NULL; ++ext) {
4136                         if (sect->id == SECT_CONTEXT)
4137                                 count += 2 + ext->reg_count;
4138                         else
4139                                 return 0;
4140                 }
4141         }
4142         /* pa_sc_raster_config/pa_sc_raster_config1 */
4143         count += 4;
4144         /* end clear state */
4145         count += 2;
4146         /* clear state */
4147         count += 2;
4148
4149         return count;
4150 }
4151
4152 static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4153 {
4154         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4155         const struct cs_section_def *sect = NULL;
4156         const struct cs_extent_def *ext = NULL;
4157         int r, i;
4158
4159         /* init the CP */
4160         WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4161         WREG32(mmCP_ENDIAN_SWAP, 0);
4162         WREG32(mmCP_DEVICE_ID, 1);
4163
4164         gfx_v8_0_cp_gfx_enable(adev, true);
4165
4166         r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4167         if (r) {
4168                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4169                 return r;
4170         }
4171
4172         /* clear state buffer */
4173         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4174         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4175
4176         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4177         amdgpu_ring_write(ring, 0x80000000);
4178         amdgpu_ring_write(ring, 0x80000000);
4179
4180         for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4181                 for (ext = sect->section; ext->extent != NULL; ++ext) {
4182                         if (sect->id == SECT_CONTEXT) {
4183                                 amdgpu_ring_write(ring,
4184                                        PACKET3(PACKET3_SET_CONTEXT_REG,
4185                                                ext->reg_count));
4186                                 amdgpu_ring_write(ring,
4187                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4188                                 for (i = 0; i < ext->reg_count; i++)
4189                                         amdgpu_ring_write(ring, ext->extent[i]);
4190                         }
4191                 }
4192         }
4193
4194         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4195         amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4196         amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4197         amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4198
4199         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4200         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4201
4202         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4203         amdgpu_ring_write(ring, 0);
4204
4205         /* init the CE partitions */
4206         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4207         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4208         amdgpu_ring_write(ring, 0x8000);
4209         amdgpu_ring_write(ring, 0x8000);
4210
4211         amdgpu_ring_commit(ring);
4212
4213         return 0;
4214 }
4215 static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4216 {
4217         u32 tmp;
4218         /* no gfx doorbells on iceland */
4219         if (adev->asic_type == CHIP_TOPAZ)
4220                 return;
4221
4222         tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4223
4224         if (ring->use_doorbell) {
4225                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4226                                 DOORBELL_OFFSET, ring->doorbell_index);
4227                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4228                                                 DOORBELL_HIT, 0);
4229                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4230                                             DOORBELL_EN, 1);
4231         } else {
4232                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4233         }
4234
4235         WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4236
4237         if (adev->flags & AMD_IS_APU)
4238                 return;
4239
4240         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4241                                         DOORBELL_RANGE_LOWER,
4242                                         adev->doorbell_index.gfx_ring0);
4243         WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4244
4245         WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4246                 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4247 }
4248
4249 static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4250 {
4251         struct amdgpu_ring *ring;
4252         u32 tmp;
4253         u32 rb_bufsz;
4254         u64 rb_addr, rptr_addr, wptr_gpu_addr;
4255
4256         /* Set the write pointer delay */
4257         WREG32(mmCP_RB_WPTR_DELAY, 0);
4258
4259         /* set the RB to use vmid 0 */
4260         WREG32(mmCP_RB_VMID, 0);
4261
4262         /* Set ring buffer size */
4263         ring = &adev->gfx.gfx_ring[0];
4264         rb_bufsz = order_base_2(ring->ring_size / 8);
4265         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4266         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4267         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4268         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4269 #ifdef __BIG_ENDIAN
4270         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4271 #endif
4272         WREG32(mmCP_RB0_CNTL, tmp);
4273
4274         /* Initialize the ring buffer's read and write pointers */
4275         WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4276         ring->wptr = 0;
4277         WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4278
4279         /* set the wb address whether it's enabled or not */
4280         rptr_addr = ring->rptr_gpu_addr;
4281         WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4282         WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4283
4284         wptr_gpu_addr = ring->wptr_gpu_addr;
4285         WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4286         WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4287         mdelay(1);
4288         WREG32(mmCP_RB0_CNTL, tmp);
4289
4290         rb_addr = ring->gpu_addr >> 8;
4291         WREG32(mmCP_RB0_BASE, rb_addr);
4292         WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4293
4294         gfx_v8_0_set_cpg_door_bell(adev, ring);
4295         /* start the ring */
4296         amdgpu_ring_clear_ring(ring);
4297         gfx_v8_0_cp_gfx_start(adev);
4298
4299         return 0;
4300 }
4301
4302 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4303 {
4304         if (enable) {
4305                 WREG32(mmCP_MEC_CNTL, 0);
4306         } else {
4307                 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4308                 adev->gfx.kiq[0].ring.sched.ready = false;
4309         }
4310         udelay(50);
4311 }
4312
4313 /* KIQ functions */
4314 static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4315 {
4316         uint32_t tmp;
4317         struct amdgpu_device *adev = ring->adev;
4318
4319         /* tell RLC which is KIQ queue */
4320         tmp = RREG32(mmRLC_CP_SCHEDULERS);
4321         tmp &= 0xffffff00;
4322         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4323         WREG32(mmRLC_CP_SCHEDULERS, tmp | 0x80);
4324 }
4325
4326 static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4327 {
4328         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4329         uint64_t queue_mask = 0;
4330         int r, i;
4331
4332         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4333                 if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
4334                         continue;
4335
4336                 /* This situation may be hit in the future if a new HW
4337                  * generation exposes more than 64 queues. If so, the
4338                  * definition of queue_mask needs updating */
4339                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4340                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4341                         break;
4342                 }
4343
4344                 queue_mask |= (1ull << i);
4345         }
4346
4347         r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4348         if (r) {
4349                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4350                 return r;
4351         }
4352         /* set resources */
4353         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4354         amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */
4355         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
4356         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
4357         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
4358         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
4359         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
4360         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
4361         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4362                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4363                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4364                 uint64_t wptr_addr = ring->wptr_gpu_addr;
4365
4366                 /* map queues */
4367                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4368                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4369                 amdgpu_ring_write(kiq_ring,
4370                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4371                 amdgpu_ring_write(kiq_ring,
4372                                   PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4373                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4374                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4375                                   PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4376                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4377                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4378                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4379                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4380         }
4381
4382         amdgpu_ring_commit(kiq_ring);
4383
4384         return 0;
4385 }
4386
4387 static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4388 {
4389         int i, r = 0;
4390
4391         if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4392                 WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4393                 for (i = 0; i < adev->usec_timeout; i++) {
4394                         if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4395                                 break;
4396                         udelay(1);
4397                 }
4398                 if (i == adev->usec_timeout)
4399                         r = -ETIMEDOUT;
4400         }
4401         WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4402         WREG32(mmCP_HQD_PQ_RPTR, 0);
4403         WREG32(mmCP_HQD_PQ_WPTR, 0);
4404
4405         return r;
4406 }
4407
4408 static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4409 {
4410         struct amdgpu_device *adev = ring->adev;
4411
4412         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4413                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
4414                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4415                         mqd->cp_hqd_queue_priority =
4416                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4417                 }
4418         }
4419 }
4420
4421 static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4422 {
4423         struct amdgpu_device *adev = ring->adev;
4424         struct vi_mqd *mqd = ring->mqd_ptr;
4425         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4426         uint32_t tmp;
4427
4428         mqd->header = 0xC0310800;
4429         mqd->compute_pipelinestat_enable = 0x00000001;
4430         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4431         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4432         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4433         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4434         mqd->compute_misc_reserved = 0x00000003;
4435         mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4436                                                      + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4437         mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4438                                                      + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4439         eop_base_addr = ring->eop_gpu_addr >> 8;
4440         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4441         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4442
4443         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4444         tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4445         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4446                         (order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4447
4448         mqd->cp_hqd_eop_control = tmp;
4449
4450         /* enable doorbell? */
4451         tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4452                             CP_HQD_PQ_DOORBELL_CONTROL,
4453                             DOORBELL_EN,
4454                             ring->use_doorbell ? 1 : 0);
4455
4456         mqd->cp_hqd_pq_doorbell_control = tmp;
4457
4458         /* set the pointer to the MQD */
4459         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4460         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4461
4462         /* set MQD vmid to 0 */
4463         tmp = RREG32(mmCP_MQD_CONTROL);
4464         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4465         mqd->cp_mqd_control = tmp;
4466
4467         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4468         hqd_gpu_addr = ring->gpu_addr >> 8;
4469         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4470         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4471
4472         /* set up the HQD, this is similar to CP_RB0_CNTL */
4473         tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4474         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4475                             (order_base_2(ring->ring_size / 4) - 1));
4476         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4477                         (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4478 #ifdef __BIG_ENDIAN
4479         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4480 #endif
4481         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4482         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4483         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4484         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4485         mqd->cp_hqd_pq_control = tmp;
4486
4487         /* set the wb address whether it's enabled or not */
4488         wb_gpu_addr = ring->rptr_gpu_addr;
4489         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4490         mqd->cp_hqd_pq_rptr_report_addr_hi =
4491                 upper_32_bits(wb_gpu_addr) & 0xffff;
4492
4493         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4494         wb_gpu_addr = ring->wptr_gpu_addr;
4495         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4496         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4497
4498         tmp = 0;
4499         /* enable the doorbell if requested */
4500         if (ring->use_doorbell) {
4501                 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4502                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4503                                 DOORBELL_OFFSET, ring->doorbell_index);
4504
4505                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4506                                          DOORBELL_EN, 1);
4507                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4508                                          DOORBELL_SOURCE, 0);
4509                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4510                                          DOORBELL_HIT, 0);
4511         }
4512
4513         mqd->cp_hqd_pq_doorbell_control = tmp;
4514
4515         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4516         ring->wptr = 0;
4517         mqd->cp_hqd_pq_wptr = ring->wptr;
4518         mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4519
4520         /* set the vmid for the queue */
4521         mqd->cp_hqd_vmid = 0;
4522
4523         tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4524         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4525         mqd->cp_hqd_persistent_state = tmp;
4526
4527         /* set MTYPE */
4528         tmp = RREG32(mmCP_HQD_IB_CONTROL);
4529         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4530         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4531         mqd->cp_hqd_ib_control = tmp;
4532
4533         tmp = RREG32(mmCP_HQD_IQ_TIMER);
4534         tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4535         mqd->cp_hqd_iq_timer = tmp;
4536
4537         tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4538         tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4539         mqd->cp_hqd_ctx_save_control = tmp;
4540
4541         /* defaults */
4542         mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4543         mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4544         mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4545         mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4546         mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4547         mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4548         mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4549         mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4550         mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4551         mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4552         mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4553         mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4554
4555         /* set static priority for a queue/ring */
4556         gfx_v8_0_mqd_set_priority(ring, mqd);
4557         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4558
4559         /* map_queues packet doesn't need activate the queue,
4560          * so only kiq need set this field.
4561          */
4562         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4563                 mqd->cp_hqd_active = 1;
4564
4565         return 0;
4566 }
4567
4568 static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4569                         struct vi_mqd *mqd)
4570 {
4571         uint32_t mqd_reg;
4572         uint32_t *mqd_data;
4573
4574         /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4575         mqd_data = &mqd->cp_mqd_base_addr_lo;
4576
4577         /* disable wptr polling */
4578         WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4579
4580         /* program all HQD registers */
4581         for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4582                 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4583
4584         /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4585          * This is safe since EOP RPTR==WPTR for any inactive HQD
4586          * on ASICs that do not support context-save.
4587          * EOP writes/reads can start anywhere in the ring.
4588          */
4589         if (adev->asic_type != CHIP_TONGA) {
4590                 WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4591                 WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4592                 WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4593         }
4594
4595         for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4596                 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4597
4598         /* activate the HQD */
4599         for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4600                 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4601
4602         return 0;
4603 }
4604
4605 static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4606 {
4607         struct amdgpu_device *adev = ring->adev;
4608         struct vi_mqd *mqd = ring->mqd_ptr;
4609
4610         gfx_v8_0_kiq_setting(ring);
4611
4612         if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4613                 /* reset MQD to a clean status */
4614                 if (adev->gfx.kiq[0].mqd_backup)
4615                         memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
4616
4617                 /* reset ring buffer */
4618                 ring->wptr = 0;
4619                 amdgpu_ring_clear_ring(ring);
4620                 mutex_lock(&adev->srbm_mutex);
4621                 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4622                 gfx_v8_0_mqd_commit(adev, mqd);
4623                 vi_srbm_select(adev, 0, 0, 0, 0);
4624                 mutex_unlock(&adev->srbm_mutex);
4625         } else {
4626                 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4627                 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4628                 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4629                 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4630                         amdgpu_ring_clear_ring(ring);
4631                 mutex_lock(&adev->srbm_mutex);
4632                 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4633                 gfx_v8_0_mqd_init(ring);
4634                 gfx_v8_0_mqd_commit(adev, mqd);
4635                 vi_srbm_select(adev, 0, 0, 0, 0);
4636                 mutex_unlock(&adev->srbm_mutex);
4637
4638                 if (adev->gfx.kiq[0].mqd_backup)
4639                         memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
4640         }
4641
4642         return 0;
4643 }
4644
4645 static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4646 {
4647         struct amdgpu_device *adev = ring->adev;
4648         struct vi_mqd *mqd = ring->mqd_ptr;
4649         int mqd_idx = ring - &adev->gfx.compute_ring[0];
4650
4651         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4652                 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4653                 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4654                 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4655                 mutex_lock(&adev->srbm_mutex);
4656                 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4657                 gfx_v8_0_mqd_init(ring);
4658                 vi_srbm_select(adev, 0, 0, 0, 0);
4659                 mutex_unlock(&adev->srbm_mutex);
4660
4661                 if (adev->gfx.mec.mqd_backup[mqd_idx])
4662                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4663         } else {
4664                 /* restore MQD to a clean status */
4665                 if (adev->gfx.mec.mqd_backup[mqd_idx])
4666                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4667                 /* reset ring buffer */
4668                 ring->wptr = 0;
4669                 amdgpu_ring_clear_ring(ring);
4670         }
4671         return 0;
4672 }
4673
4674 static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4675 {
4676         if (adev->asic_type > CHIP_TONGA) {
4677                 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4678                 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4679         }
4680         /* enable doorbells */
4681         WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4682 }
4683
4684 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4685 {
4686         struct amdgpu_ring *ring;
4687         int r;
4688
4689         ring = &adev->gfx.kiq[0].ring;
4690
4691         r = amdgpu_bo_reserve(ring->mqd_obj, false);
4692         if (unlikely(r != 0))
4693                 return r;
4694
4695         r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4696         if (unlikely(r != 0)) {
4697                 amdgpu_bo_unreserve(ring->mqd_obj);
4698                 return r;
4699         }
4700
4701         gfx_v8_0_kiq_init_queue(ring);
4702         amdgpu_bo_kunmap(ring->mqd_obj);
4703         ring->mqd_ptr = NULL;
4704         amdgpu_bo_unreserve(ring->mqd_obj);
4705         return 0;
4706 }
4707
4708 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4709 {
4710         struct amdgpu_ring *ring = NULL;
4711         int r = 0, i;
4712
4713         gfx_v8_0_cp_compute_enable(adev, true);
4714
4715         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4716                 ring = &adev->gfx.compute_ring[i];
4717
4718                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4719                 if (unlikely(r != 0))
4720                         goto done;
4721                 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4722                 if (!r) {
4723                         r = gfx_v8_0_kcq_init_queue(ring);
4724                         amdgpu_bo_kunmap(ring->mqd_obj);
4725                         ring->mqd_ptr = NULL;
4726                 }
4727                 amdgpu_bo_unreserve(ring->mqd_obj);
4728                 if (r)
4729                         goto done;
4730         }
4731
4732         gfx_v8_0_set_mec_doorbell_range(adev);
4733
4734         r = gfx_v8_0_kiq_kcq_enable(adev);
4735         if (r)
4736                 goto done;
4737
4738 done:
4739         return r;
4740 }
4741
4742 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4743 {
4744         int r, i;
4745         struct amdgpu_ring *ring;
4746
4747         /* collect all the ring_tests here, gfx, kiq, compute */
4748         ring = &adev->gfx.gfx_ring[0];
4749         r = amdgpu_ring_test_helper(ring);
4750         if (r)
4751                 return r;
4752
4753         ring = &adev->gfx.kiq[0].ring;
4754         r = amdgpu_ring_test_helper(ring);
4755         if (r)
4756                 return r;
4757
4758         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4759                 ring = &adev->gfx.compute_ring[i];
4760                 amdgpu_ring_test_helper(ring);
4761         }
4762
4763         return 0;
4764 }
4765
4766 static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4767 {
4768         int r;
4769
4770         if (!(adev->flags & AMD_IS_APU))
4771                 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4772
4773         r = gfx_v8_0_kiq_resume(adev);
4774         if (r)
4775                 return r;
4776
4777         r = gfx_v8_0_cp_gfx_resume(adev);
4778         if (r)
4779                 return r;
4780
4781         r = gfx_v8_0_kcq_resume(adev);
4782         if (r)
4783                 return r;
4784
4785         r = gfx_v8_0_cp_test_all_rings(adev);
4786         if (r)
4787                 return r;
4788
4789         gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4790
4791         return 0;
4792 }
4793
4794 static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4795 {
4796         gfx_v8_0_cp_gfx_enable(adev, enable);
4797         gfx_v8_0_cp_compute_enable(adev, enable);
4798 }
4799
4800 static int gfx_v8_0_hw_init(struct amdgpu_ip_block *ip_block)
4801 {
4802         int r;
4803         struct amdgpu_device *adev = ip_block->adev;
4804
4805         gfx_v8_0_init_golden_registers(adev);
4806         gfx_v8_0_constants_init(adev);
4807
4808         r = adev->gfx.rlc.funcs->resume(adev);
4809         if (r)
4810                 return r;
4811
4812         r = gfx_v8_0_cp_resume(adev);
4813
4814         return r;
4815 }
4816
4817 static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4818 {
4819         int r, i;
4820         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4821
4822         r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4823         if (r)
4824                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4825
4826         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4827                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4828
4829                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4830                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4831                                                 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4832                                                 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4833                                                 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4834                                                 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4835                 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4836                 amdgpu_ring_write(kiq_ring, 0);
4837                 amdgpu_ring_write(kiq_ring, 0);
4838                 amdgpu_ring_write(kiq_ring, 0);
4839         }
4840         /* Submit unmap queue packet */
4841         amdgpu_ring_commit(kiq_ring);
4842         /*
4843          * Ring test will do a basic scratch register change check. Just run
4844          * this to ensure that unmap queues that is submitted before got
4845          * processed successfully before returning.
4846          */
4847         r = amdgpu_ring_test_helper(kiq_ring);
4848         if (r)
4849                 DRM_ERROR("KCQ disable failed\n");
4850
4851         return r;
4852 }
4853
4854 static bool gfx_v8_0_is_idle(void *handle)
4855 {
4856         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4857
4858         if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4859                 || RREG32(mmGRBM_STATUS2) != 0x8)
4860                 return false;
4861         else
4862                 return true;
4863 }
4864
4865 static bool gfx_v8_0_rlc_is_idle(void *handle)
4866 {
4867         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4868
4869         if (RREG32(mmGRBM_STATUS2) != 0x8)
4870                 return false;
4871         else
4872                 return true;
4873 }
4874
4875 static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4876 {
4877         unsigned int i;
4878         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4879
4880         for (i = 0; i < adev->usec_timeout; i++) {
4881                 if (gfx_v8_0_rlc_is_idle(handle))
4882                         return 0;
4883
4884                 udelay(1);
4885         }
4886         return -ETIMEDOUT;
4887 }
4888
4889 static int gfx_v8_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
4890 {
4891         unsigned int i;
4892         struct amdgpu_device *adev = ip_block->adev;
4893
4894         for (i = 0; i < adev->usec_timeout; i++) {
4895                 if (gfx_v8_0_is_idle(adev))
4896                         return 0;
4897
4898                 udelay(1);
4899         }
4900         return -ETIMEDOUT;
4901 }
4902
4903 static int gfx_v8_0_hw_fini(struct amdgpu_ip_block *ip_block)
4904 {
4905         struct amdgpu_device *adev = ip_block->adev;
4906
4907         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4908         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4909
4910         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4911
4912         amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4913
4914         /* disable KCQ to avoid CPC touch memory not valid anymore */
4915         gfx_v8_0_kcq_disable(adev);
4916
4917         if (amdgpu_sriov_vf(adev)) {
4918                 pr_debug("For SRIOV client, shouldn't do anything.\n");
4919                 return 0;
4920         }
4921
4922         amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4923         if (!gfx_v8_0_wait_for_idle(ip_block))
4924                 gfx_v8_0_cp_enable(adev, false);
4925         else
4926                 pr_err("cp is busy, skip halt cp\n");
4927         if (!gfx_v8_0_wait_for_rlc_idle(adev))
4928                 adev->gfx.rlc.funcs->stop(adev);
4929         else
4930                 pr_err("rlc is busy, skip halt rlc\n");
4931         amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4932
4933         return 0;
4934 }
4935
4936 static int gfx_v8_0_suspend(struct amdgpu_ip_block *ip_block)
4937 {
4938         return gfx_v8_0_hw_fini(ip_block);
4939 }
4940
4941 static int gfx_v8_0_resume(struct amdgpu_ip_block *ip_block)
4942 {
4943         return gfx_v8_0_hw_init(ip_block);
4944 }
4945
4946 static bool gfx_v8_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
4947 {
4948         struct amdgpu_device *adev = ip_block->adev;
4949         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4950         u32 tmp;
4951
4952         /* GRBM_STATUS */
4953         tmp = RREG32(mmGRBM_STATUS);
4954         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4955                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4956                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4957                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4958                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4959                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4960                    GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4961                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4962                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4963                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4964                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4965                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4966                                                 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4967         }
4968
4969         /* GRBM_STATUS2 */
4970         tmp = RREG32(mmGRBM_STATUS2);
4971         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4972                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4973                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4974
4975         if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
4976             REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
4977             REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
4978                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4979                                                 SOFT_RESET_CPF, 1);
4980                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4981                                                 SOFT_RESET_CPC, 1);
4982                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4983                                                 SOFT_RESET_CPG, 1);
4984                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
4985                                                 SOFT_RESET_GRBM, 1);
4986         }
4987
4988         /* SRBM_STATUS */
4989         tmp = RREG32(mmSRBM_STATUS);
4990         if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
4991                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4992                                                 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4993         if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
4994                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4995                                                 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
4996
4997         if (grbm_soft_reset || srbm_soft_reset) {
4998                 adev->gfx.grbm_soft_reset = grbm_soft_reset;
4999                 adev->gfx.srbm_soft_reset = srbm_soft_reset;
5000                 return true;
5001         } else {
5002                 adev->gfx.grbm_soft_reset = 0;
5003                 adev->gfx.srbm_soft_reset = 0;
5004                 return false;
5005         }
5006 }
5007
5008 static int gfx_v8_0_pre_soft_reset(struct amdgpu_ip_block *ip_block)
5009 {
5010         struct amdgpu_device *adev = ip_block->adev;
5011         u32 grbm_soft_reset = 0;
5012
5013         if ((!adev->gfx.grbm_soft_reset) &&
5014             (!adev->gfx.srbm_soft_reset))
5015                 return 0;
5016
5017         grbm_soft_reset = adev->gfx.grbm_soft_reset;
5018
5019         /* stop the rlc */
5020         adev->gfx.rlc.funcs->stop(adev);
5021
5022         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5023             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5024                 /* Disable GFX parsing/prefetching */
5025                 gfx_v8_0_cp_gfx_enable(adev, false);
5026
5027         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5028             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5029             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5030             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5031                 int i;
5032
5033                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5034                         struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5035
5036                         mutex_lock(&adev->srbm_mutex);
5037                         vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5038                         gfx_v8_0_deactivate_hqd(adev, 2);
5039                         vi_srbm_select(adev, 0, 0, 0, 0);
5040                         mutex_unlock(&adev->srbm_mutex);
5041                 }
5042                 /* Disable MEC parsing/prefetching */
5043                 gfx_v8_0_cp_compute_enable(adev, false);
5044         }
5045
5046         return 0;
5047 }
5048
5049 static int gfx_v8_0_soft_reset(struct amdgpu_ip_block *ip_block)
5050 {
5051         struct amdgpu_device *adev = ip_block->adev;
5052         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5053         u32 tmp;
5054
5055         if ((!adev->gfx.grbm_soft_reset) &&
5056             (!adev->gfx.srbm_soft_reset))
5057                 return 0;
5058
5059         grbm_soft_reset = adev->gfx.grbm_soft_reset;
5060         srbm_soft_reset = adev->gfx.srbm_soft_reset;
5061
5062         if (grbm_soft_reset || srbm_soft_reset) {
5063                 tmp = RREG32(mmGMCON_DEBUG);
5064                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5065                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5066                 WREG32(mmGMCON_DEBUG, tmp);
5067                 udelay(50);
5068         }
5069
5070         if (grbm_soft_reset) {
5071                 tmp = RREG32(mmGRBM_SOFT_RESET);
5072                 tmp |= grbm_soft_reset;
5073                 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5074                 WREG32(mmGRBM_SOFT_RESET, tmp);
5075                 tmp = RREG32(mmGRBM_SOFT_RESET);
5076
5077                 udelay(50);
5078
5079                 tmp &= ~grbm_soft_reset;
5080                 WREG32(mmGRBM_SOFT_RESET, tmp);
5081                 tmp = RREG32(mmGRBM_SOFT_RESET);
5082         }
5083
5084         if (srbm_soft_reset) {
5085                 tmp = RREG32(mmSRBM_SOFT_RESET);
5086                 tmp |= srbm_soft_reset;
5087                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5088                 WREG32(mmSRBM_SOFT_RESET, tmp);
5089                 tmp = RREG32(mmSRBM_SOFT_RESET);
5090
5091                 udelay(50);
5092
5093                 tmp &= ~srbm_soft_reset;
5094                 WREG32(mmSRBM_SOFT_RESET, tmp);
5095                 tmp = RREG32(mmSRBM_SOFT_RESET);
5096         }
5097
5098         if (grbm_soft_reset || srbm_soft_reset) {
5099                 tmp = RREG32(mmGMCON_DEBUG);
5100                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5101                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5102                 WREG32(mmGMCON_DEBUG, tmp);
5103         }
5104
5105         /* Wait a little for things to settle down */
5106         udelay(50);
5107
5108         return 0;
5109 }
5110
5111 static int gfx_v8_0_post_soft_reset(struct amdgpu_ip_block *ip_block)
5112 {
5113         struct amdgpu_device *adev = ip_block->adev;
5114         u32 grbm_soft_reset = 0;
5115
5116         if ((!adev->gfx.grbm_soft_reset) &&
5117             (!adev->gfx.srbm_soft_reset))
5118                 return 0;
5119
5120         grbm_soft_reset = adev->gfx.grbm_soft_reset;
5121
5122         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5123             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5124             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5125             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5126                 int i;
5127
5128                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5129                         struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5130
5131                         mutex_lock(&adev->srbm_mutex);
5132                         vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5133                         gfx_v8_0_deactivate_hqd(adev, 2);
5134                         vi_srbm_select(adev, 0, 0, 0, 0);
5135                         mutex_unlock(&adev->srbm_mutex);
5136                 }
5137                 gfx_v8_0_kiq_resume(adev);
5138                 gfx_v8_0_kcq_resume(adev);
5139         }
5140
5141         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5142             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5143                 gfx_v8_0_cp_gfx_resume(adev);
5144
5145         gfx_v8_0_cp_test_all_rings(adev);
5146
5147         adev->gfx.rlc.funcs->start(adev);
5148
5149         return 0;
5150 }
5151
5152 /**
5153  * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5154  *
5155  * @adev: amdgpu_device pointer
5156  *
5157  * Fetches a GPU clock counter snapshot.
5158  * Returns the 64 bit clock counter snapshot.
5159  */
5160 static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5161 {
5162         uint64_t clock;
5163
5164         mutex_lock(&adev->gfx.gpu_clock_mutex);
5165         WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5166         clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5167                 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5168         mutex_unlock(&adev->gfx.gpu_clock_mutex);
5169         return clock;
5170 }
5171
5172 static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5173                                           uint32_t vmid,
5174                                           uint32_t gds_base, uint32_t gds_size,
5175                                           uint32_t gws_base, uint32_t gws_size,
5176                                           uint32_t oa_base, uint32_t oa_size)
5177 {
5178         /* GDS Base */
5179         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5180         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5181                                 WRITE_DATA_DST_SEL(0)));
5182         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5183         amdgpu_ring_write(ring, 0);
5184         amdgpu_ring_write(ring, gds_base);
5185
5186         /* GDS Size */
5187         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5188         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5189                                 WRITE_DATA_DST_SEL(0)));
5190         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5191         amdgpu_ring_write(ring, 0);
5192         amdgpu_ring_write(ring, gds_size);
5193
5194         /* GWS */
5195         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5196         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5197                                 WRITE_DATA_DST_SEL(0)));
5198         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5199         amdgpu_ring_write(ring, 0);
5200         amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5201
5202         /* OA */
5203         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5204         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5205                                 WRITE_DATA_DST_SEL(0)));
5206         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5207         amdgpu_ring_write(ring, 0);
5208         amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5209 }
5210
5211 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5212 {
5213         WREG32(mmSQ_IND_INDEX,
5214                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5215                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5216                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
5217                 (SQ_IND_INDEX__FORCE_READ_MASK));
5218         return RREG32(mmSQ_IND_DATA);
5219 }
5220
5221 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5222                            uint32_t wave, uint32_t thread,
5223                            uint32_t regno, uint32_t num, uint32_t *out)
5224 {
5225         WREG32(mmSQ_IND_INDEX,
5226                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5227                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5228                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
5229                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5230                 (SQ_IND_INDEX__FORCE_READ_MASK) |
5231                 (SQ_IND_INDEX__AUTO_INCR_MASK));
5232         while (num--)
5233                 *(out++) = RREG32(mmSQ_IND_DATA);
5234 }
5235
5236 static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5237 {
5238         /* type 0 wave data */
5239         dst[(*no_fields)++] = 0;
5240         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5241         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5242         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5243         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5244         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5245         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5246         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5247         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5248         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5249         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5250         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5251         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5252         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5253         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5254         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5255         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5256         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5257         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5258         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
5259 }
5260
5261 static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
5262                                      uint32_t wave, uint32_t start,
5263                                      uint32_t size, uint32_t *dst)
5264 {
5265         wave_read_regs(
5266                 adev, simd, wave, 0,
5267                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5268 }
5269
5270
5271 static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5272         .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5273         .select_se_sh = &gfx_v8_0_select_se_sh,
5274         .read_wave_data = &gfx_v8_0_read_wave_data,
5275         .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5276         .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5277 };
5278
5279 static int gfx_v8_0_early_init(struct amdgpu_ip_block *ip_block)
5280 {
5281         struct amdgpu_device *adev = ip_block->adev;
5282
5283         adev->gfx.xcc_mask = 1;
5284         adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5285         adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5286                                           AMDGPU_MAX_COMPUTE_RINGS);
5287         adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5288         gfx_v8_0_set_ring_funcs(adev);
5289         gfx_v8_0_set_irq_funcs(adev);
5290         gfx_v8_0_set_gds_init(adev);
5291         gfx_v8_0_set_rlc_funcs(adev);
5292
5293         return 0;
5294 }
5295
5296 static int gfx_v8_0_late_init(struct amdgpu_ip_block *ip_block)
5297 {
5298         struct amdgpu_device *adev = ip_block->adev;
5299         int r;
5300
5301         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5302         if (r)
5303                 return r;
5304
5305         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5306         if (r)
5307                 return r;
5308
5309         /* requires IBs so do in late init after IB pool is initialized */
5310         r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5311         if (r)
5312                 return r;
5313
5314         r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5315         if (r) {
5316                 DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5317                 return r;
5318         }
5319
5320         r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5321         if (r) {
5322                 DRM_ERROR(
5323                         "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5324                         r);
5325                 return r;
5326         }
5327
5328         return 0;
5329 }
5330
5331 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5332                                                        bool enable)
5333 {
5334         if ((adev->asic_type == CHIP_POLARIS11) ||
5335             (adev->asic_type == CHIP_POLARIS12) ||
5336             (adev->asic_type == CHIP_VEGAM))
5337                 /* Send msg to SMU via Powerplay */
5338                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable, 0);
5339
5340         WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5341 }
5342
5343 static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5344                                                         bool enable)
5345 {
5346         WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5347 }
5348
5349 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5350                 bool enable)
5351 {
5352         WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5353 }
5354
5355 static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5356                                           bool enable)
5357 {
5358         WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5359 }
5360
5361 static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5362                                                 bool enable)
5363 {
5364         WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5365
5366         /* Read any GFX register to wake up GFX. */
5367         if (!enable)
5368                 RREG32(mmDB_RENDER_CONTROL);
5369 }
5370
5371 static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5372                                           bool enable)
5373 {
5374         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5375                 cz_enable_gfx_cg_power_gating(adev, true);
5376                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5377                         cz_enable_gfx_pipeline_power_gating(adev, true);
5378         } else {
5379                 cz_enable_gfx_cg_power_gating(adev, false);
5380                 cz_enable_gfx_pipeline_power_gating(adev, false);
5381         }
5382 }
5383
5384 static int gfx_v8_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
5385                                           enum amd_powergating_state state)
5386 {
5387         struct amdgpu_device *adev = ip_block->adev;
5388         bool enable = (state == AMD_PG_STATE_GATE);
5389
5390         if (amdgpu_sriov_vf(adev))
5391                 return 0;
5392
5393         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5394                                 AMD_PG_SUPPORT_RLC_SMU_HS |
5395                                 AMD_PG_SUPPORT_CP |
5396                                 AMD_PG_SUPPORT_GFX_DMG))
5397                 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5398         switch (adev->asic_type) {
5399         case CHIP_CARRIZO:
5400         case CHIP_STONEY:
5401
5402                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5403                         cz_enable_sck_slow_down_on_power_up(adev, true);
5404                         cz_enable_sck_slow_down_on_power_down(adev, true);
5405                 } else {
5406                         cz_enable_sck_slow_down_on_power_up(adev, false);
5407                         cz_enable_sck_slow_down_on_power_down(adev, false);
5408                 }
5409                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5410                         cz_enable_cp_power_gating(adev, true);
5411                 else
5412                         cz_enable_cp_power_gating(adev, false);
5413
5414                 cz_update_gfx_cg_power_gating(adev, enable);
5415
5416                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5417                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5418                 else
5419                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5420
5421                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5422                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5423                 else
5424                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5425                 break;
5426         case CHIP_POLARIS11:
5427         case CHIP_POLARIS12:
5428         case CHIP_VEGAM:
5429                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5430                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5431                 else
5432                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5433
5434                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5435                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5436                 else
5437                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5438
5439                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5440                         polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5441                 else
5442                         polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5443                 break;
5444         default:
5445                 break;
5446         }
5447         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5448                                 AMD_PG_SUPPORT_RLC_SMU_HS |
5449                                 AMD_PG_SUPPORT_CP |
5450                                 AMD_PG_SUPPORT_GFX_DMG))
5451                 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5452         return 0;
5453 }
5454
5455 static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
5456 {
5457         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5458         int data;
5459
5460         if (amdgpu_sriov_vf(adev))
5461                 *flags = 0;
5462
5463         /* AMD_CG_SUPPORT_GFX_MGCG */
5464         data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5465         if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5466                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5467
5468         /* AMD_CG_SUPPORT_GFX_CGLG */
5469         data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5470         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5471                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5472
5473         /* AMD_CG_SUPPORT_GFX_CGLS */
5474         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5475                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5476
5477         /* AMD_CG_SUPPORT_GFX_CGTS */
5478         data = RREG32(mmCGTS_SM_CTRL_REG);
5479         if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5480                 *flags |= AMD_CG_SUPPORT_GFX_CGTS;
5481
5482         /* AMD_CG_SUPPORT_GFX_CGTS_LS */
5483         if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5484                 *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5485
5486         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5487         data = RREG32(mmRLC_MEM_SLP_CNTL);
5488         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5489                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5490
5491         /* AMD_CG_SUPPORT_GFX_CP_LS */
5492         data = RREG32(mmCP_MEM_SLP_CNTL);
5493         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5494                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5495 }
5496
5497 static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5498                                      uint32_t reg_addr, uint32_t cmd)
5499 {
5500         uint32_t data;
5501
5502         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5503
5504         WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5505         WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5506
5507         data = RREG32(mmRLC_SERDES_WR_CTRL);
5508         if (adev->asic_type == CHIP_STONEY)
5509                 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5510                           RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5511                           RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5512                           RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5513                           RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5514                           RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5515                           RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5516                           RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5517                           RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5518         else
5519                 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5520                           RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5521                           RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5522                           RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5523                           RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5524                           RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5525                           RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5526                           RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5527                           RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5528                           RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5529                           RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5530         data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5531                  (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5532                  (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5533                  (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5534
5535         WREG32(mmRLC_SERDES_WR_CTRL, data);
5536 }
5537
5538 #define MSG_ENTER_RLC_SAFE_MODE     1
5539 #define MSG_EXIT_RLC_SAFE_MODE      0
5540 #define RLC_GPR_REG2__REQ_MASK 0x00000001
5541 #define RLC_GPR_REG2__REQ__SHIFT 0
5542 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5543 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5544
5545 static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5546 {
5547         uint32_t rlc_setting;
5548
5549         rlc_setting = RREG32(mmRLC_CNTL);
5550         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5551                 return false;
5552
5553         return true;
5554 }
5555
5556 static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5557 {
5558         uint32_t data;
5559         unsigned i;
5560         data = RREG32(mmRLC_CNTL);
5561         data |= RLC_SAFE_MODE__CMD_MASK;
5562         data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5563         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5564         WREG32(mmRLC_SAFE_MODE, data);
5565
5566         /* wait for RLC_SAFE_MODE */
5567         for (i = 0; i < adev->usec_timeout; i++) {
5568                 if ((RREG32(mmRLC_GPM_STAT) &
5569                      (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5570                       RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5571                     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5572                      RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5573                         break;
5574                 udelay(1);
5575         }
5576         for (i = 0; i < adev->usec_timeout; i++) {
5577                 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5578                         break;
5579                 udelay(1);
5580         }
5581 }
5582
5583 static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5584 {
5585         uint32_t data;
5586         unsigned i;
5587
5588         data = RREG32(mmRLC_CNTL);
5589         data |= RLC_SAFE_MODE__CMD_MASK;
5590         data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5591         WREG32(mmRLC_SAFE_MODE, data);
5592
5593         for (i = 0; i < adev->usec_timeout; i++) {
5594                 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5595                         break;
5596                 udelay(1);
5597         }
5598 }
5599
5600 static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid)
5601 {
5602         u32 data;
5603
5604         amdgpu_gfx_off_ctrl(adev, false);
5605
5606         if (amdgpu_sriov_is_pp_one_vf(adev))
5607                 data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5608         else
5609                 data = RREG32(mmRLC_SPM_VMID);
5610
5611         data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5612         data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5613
5614         if (amdgpu_sriov_is_pp_one_vf(adev))
5615                 WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5616         else
5617                 WREG32(mmRLC_SPM_VMID, data);
5618
5619         amdgpu_gfx_off_ctrl(adev, true);
5620 }
5621
5622 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5623         .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5624         .set_safe_mode = gfx_v8_0_set_safe_mode,
5625         .unset_safe_mode = gfx_v8_0_unset_safe_mode,
5626         .init = gfx_v8_0_rlc_init,
5627         .get_csb_size = gfx_v8_0_get_csb_size,
5628         .get_csb_buffer = gfx_v8_0_get_csb_buffer,
5629         .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5630         .resume = gfx_v8_0_rlc_resume,
5631         .stop = gfx_v8_0_rlc_stop,
5632         .reset = gfx_v8_0_rlc_reset,
5633         .start = gfx_v8_0_rlc_start,
5634         .update_spm_vmid = gfx_v8_0_update_spm_vmid
5635 };
5636
5637 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5638                                                       bool enable)
5639 {
5640         uint32_t temp, data;
5641
5642         /* It is disabled by HW by default */
5643         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5644                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5645                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5646                                 /* 1 - RLC memory Light sleep */
5647                                 WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
5648
5649                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5650                                 WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5651                 }
5652
5653                 /* 3 - RLC_CGTT_MGCG_OVERRIDE */
5654                 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5655                 if (adev->flags & AMD_IS_APU)
5656                         data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5657                                   RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5658                                   RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5659                 else
5660                         data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5661                                   RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5662                                   RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5663                                   RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5664
5665                 if (temp != data)
5666                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5667
5668                 /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5669                 gfx_v8_0_wait_for_rlc_serdes(adev);
5670
5671                 /* 5 - clear mgcg override */
5672                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5673
5674                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5675                         /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5676                         temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5677                         data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5678                         data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5679                         data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5680                         data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5681                         if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5682                             (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5683                                 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5684                         data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5685                         data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5686                         if (temp != data)
5687                                 WREG32(mmCGTS_SM_CTRL_REG, data);
5688                 }
5689                 udelay(50);
5690
5691                 /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5692                 gfx_v8_0_wait_for_rlc_serdes(adev);
5693         } else {
5694                 /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5695                 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5696                 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5697                                 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5698                                 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5699                                 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5700                 if (temp != data)
5701                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5702
5703                 /* 2 - disable MGLS in RLC */
5704                 data = RREG32(mmRLC_MEM_SLP_CNTL);
5705                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5706                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5707                         WREG32(mmRLC_MEM_SLP_CNTL, data);
5708                 }
5709
5710                 /* 3 - disable MGLS in CP */
5711                 data = RREG32(mmCP_MEM_SLP_CNTL);
5712                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5713                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5714                         WREG32(mmCP_MEM_SLP_CNTL, data);
5715                 }
5716
5717                 /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5718                 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5719                 data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5720                                 CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5721                 if (temp != data)
5722                         WREG32(mmCGTS_SM_CTRL_REG, data);
5723
5724                 /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5725                 gfx_v8_0_wait_for_rlc_serdes(adev);
5726
5727                 /* 6 - set mgcg override */
5728                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5729
5730                 udelay(50);
5731
5732                 /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5733                 gfx_v8_0_wait_for_rlc_serdes(adev);
5734         }
5735 }
5736
5737 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5738                                                       bool enable)
5739 {
5740         uint32_t temp, temp1, data, data1;
5741
5742         temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5743
5744         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5745                 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5746                 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5747                 if (temp1 != data1)
5748                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5749
5750                 /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5751                 gfx_v8_0_wait_for_rlc_serdes(adev);
5752
5753                 /* 2 - clear cgcg override */
5754                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5755
5756                 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5757                 gfx_v8_0_wait_for_rlc_serdes(adev);
5758
5759                 /* 3 - write cmd to set CGLS */
5760                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5761
5762                 /* 4 - enable cgcg */
5763                 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5764
5765                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5766                         /* enable cgls*/
5767                         data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5768
5769                         temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5770                         data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5771
5772                         if (temp1 != data1)
5773                                 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5774                 } else {
5775                         data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5776                 }
5777
5778                 if (temp != data)
5779                         WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5780
5781                 /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5782                  * Cmp_busy/GFX_Idle interrupts
5783                  */
5784                 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5785         } else {
5786                 /* disable cntx_empty_int_enable & GFX Idle interrupt */
5787                 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5788
5789                 /* TEST CGCG */
5790                 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5791                 data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5792                                 RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5793                 if (temp1 != data1)
5794                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5795
5796                 /* read gfx register to wake up cgcg */
5797                 RREG32(mmCB_CGTT_SCLK_CTRL);
5798                 RREG32(mmCB_CGTT_SCLK_CTRL);
5799                 RREG32(mmCB_CGTT_SCLK_CTRL);
5800                 RREG32(mmCB_CGTT_SCLK_CTRL);
5801
5802                 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5803                 gfx_v8_0_wait_for_rlc_serdes(adev);
5804
5805                 /* write cmd to Set CGCG Override */
5806                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5807
5808                 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5809                 gfx_v8_0_wait_for_rlc_serdes(adev);
5810
5811                 /* write cmd to Clear CGLS */
5812                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5813
5814                 /* disable cgcg, cgls should be disabled too. */
5815                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5816                           RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5817                 if (temp != data)
5818                         WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5819                 /* enable interrupts again for PG */
5820                 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5821         }
5822
5823         gfx_v8_0_wait_for_rlc_serdes(adev);
5824 }
5825 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5826                                             bool enable)
5827 {
5828         amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5829
5830         if (enable) {
5831                 /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5832                  * ===  MGCG + MGLS + TS(CG/LS) ===
5833                  */
5834                 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5835                 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5836         } else {
5837                 /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5838                  * ===  CGCG + CGLS ===
5839                  */
5840                 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5841                 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5842         }
5843
5844         amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5845         return 0;
5846 }
5847
5848 static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5849                                           enum amd_clockgating_state state)
5850 {
5851         uint32_t msg_id, pp_state = 0;
5852         uint32_t pp_support_state = 0;
5853
5854         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5855                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5856                         pp_support_state = PP_STATE_SUPPORT_LS;
5857                         pp_state = PP_STATE_LS;
5858                 }
5859                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5860                         pp_support_state |= PP_STATE_SUPPORT_CG;
5861                         pp_state |= PP_STATE_CG;
5862                 }
5863                 if (state == AMD_CG_STATE_UNGATE)
5864                         pp_state = 0;
5865
5866                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5867                                 PP_BLOCK_GFX_CG,
5868                                 pp_support_state,
5869                                 pp_state);
5870                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5871         }
5872
5873         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5874                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5875                         pp_support_state = PP_STATE_SUPPORT_LS;
5876                         pp_state = PP_STATE_LS;
5877                 }
5878
5879                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5880                         pp_support_state |= PP_STATE_SUPPORT_CG;
5881                         pp_state |= PP_STATE_CG;
5882                 }
5883
5884                 if (state == AMD_CG_STATE_UNGATE)
5885                         pp_state = 0;
5886
5887                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5888                                 PP_BLOCK_GFX_MG,
5889                                 pp_support_state,
5890                                 pp_state);
5891                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5892         }
5893
5894         return 0;
5895 }
5896
5897 static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5898                                           enum amd_clockgating_state state)
5899 {
5900
5901         uint32_t msg_id, pp_state = 0;
5902         uint32_t pp_support_state = 0;
5903
5904         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5905                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5906                         pp_support_state = PP_STATE_SUPPORT_LS;
5907                         pp_state = PP_STATE_LS;
5908                 }
5909                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5910                         pp_support_state |= PP_STATE_SUPPORT_CG;
5911                         pp_state |= PP_STATE_CG;
5912                 }
5913                 if (state == AMD_CG_STATE_UNGATE)
5914                         pp_state = 0;
5915
5916                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5917                                 PP_BLOCK_GFX_CG,
5918                                 pp_support_state,
5919                                 pp_state);
5920                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5921         }
5922
5923         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5924                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5925                         pp_support_state = PP_STATE_SUPPORT_LS;
5926                         pp_state = PP_STATE_LS;
5927                 }
5928                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5929                         pp_support_state |= PP_STATE_SUPPORT_CG;
5930                         pp_state |= PP_STATE_CG;
5931                 }
5932                 if (state == AMD_CG_STATE_UNGATE)
5933                         pp_state = 0;
5934
5935                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5936                                 PP_BLOCK_GFX_3D,
5937                                 pp_support_state,
5938                                 pp_state);
5939                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5940         }
5941
5942         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5943                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5944                         pp_support_state = PP_STATE_SUPPORT_LS;
5945                         pp_state = PP_STATE_LS;
5946                 }
5947
5948                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5949                         pp_support_state |= PP_STATE_SUPPORT_CG;
5950                         pp_state |= PP_STATE_CG;
5951                 }
5952
5953                 if (state == AMD_CG_STATE_UNGATE)
5954                         pp_state = 0;
5955
5956                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5957                                 PP_BLOCK_GFX_MG,
5958                                 pp_support_state,
5959                                 pp_state);
5960                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5961         }
5962
5963         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5964                 pp_support_state = PP_STATE_SUPPORT_LS;
5965
5966                 if (state == AMD_CG_STATE_UNGATE)
5967                         pp_state = 0;
5968                 else
5969                         pp_state = PP_STATE_LS;
5970
5971                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5972                                 PP_BLOCK_GFX_RLC,
5973                                 pp_support_state,
5974                                 pp_state);
5975                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5976         }
5977
5978         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
5979                 pp_support_state = PP_STATE_SUPPORT_LS;
5980
5981                 if (state == AMD_CG_STATE_UNGATE)
5982                         pp_state = 0;
5983                 else
5984                         pp_state = PP_STATE_LS;
5985                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5986                         PP_BLOCK_GFX_CP,
5987                         pp_support_state,
5988                         pp_state);
5989                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5990         }
5991
5992         return 0;
5993 }
5994
5995 static int gfx_v8_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
5996                                           enum amd_clockgating_state state)
5997 {
5998         struct amdgpu_device *adev = ip_block->adev;
5999
6000         if (amdgpu_sriov_vf(adev))
6001                 return 0;
6002
6003         switch (adev->asic_type) {
6004         case CHIP_FIJI:
6005         case CHIP_CARRIZO:
6006         case CHIP_STONEY:
6007                 gfx_v8_0_update_gfx_clock_gating(adev,
6008                                                  state == AMD_CG_STATE_GATE);
6009                 break;
6010         case CHIP_TONGA:
6011                 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
6012                 break;
6013         case CHIP_POLARIS10:
6014         case CHIP_POLARIS11:
6015         case CHIP_POLARIS12:
6016         case CHIP_VEGAM:
6017                 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6018                 break;
6019         default:
6020                 break;
6021         }
6022         return 0;
6023 }
6024
6025 static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6026 {
6027         return *ring->rptr_cpu_addr;
6028 }
6029
6030 static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6031 {
6032         struct amdgpu_device *adev = ring->adev;
6033
6034         if (ring->use_doorbell)
6035                 /* XXX check if swapping is necessary on BE */
6036                 return *ring->wptr_cpu_addr;
6037         else
6038                 return RREG32(mmCP_RB0_WPTR);
6039 }
6040
6041 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6042 {
6043         struct amdgpu_device *adev = ring->adev;
6044
6045         if (ring->use_doorbell) {
6046                 /* XXX check if swapping is necessary on BE */
6047                 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6048                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6049         } else {
6050                 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6051                 (void)RREG32(mmCP_RB0_WPTR);
6052         }
6053 }
6054
6055 static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6056 {
6057         u32 ref_and_mask, reg_mem_engine;
6058
6059         if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6060             (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6061                 switch (ring->me) {
6062                 case 1:
6063                         ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6064                         break;
6065                 case 2:
6066                         ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6067                         break;
6068                 default:
6069                         return;
6070                 }
6071                 reg_mem_engine = 0;
6072         } else {
6073                 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6074                 reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6075         }
6076
6077         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6078         amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6079                                  WAIT_REG_MEM_FUNCTION(3) |  /* == */
6080                                  reg_mem_engine));
6081         amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6082         amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6083         amdgpu_ring_write(ring, ref_and_mask);
6084         amdgpu_ring_write(ring, ref_and_mask);
6085         amdgpu_ring_write(ring, 0x20); /* poll interval */
6086 }
6087
6088 static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6089 {
6090         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6091         amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6092                 EVENT_INDEX(4));
6093
6094         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6095         amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6096                 EVENT_INDEX(0));
6097 }
6098
6099 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6100                                         struct amdgpu_job *job,
6101                                         struct amdgpu_ib *ib,
6102                                         uint32_t flags)
6103 {
6104         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6105         u32 header, control = 0;
6106
6107         if (ib->flags & AMDGPU_IB_FLAG_CE)
6108                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6109         else
6110                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6111
6112         control |= ib->length_dw | (vmid << 24);
6113
6114         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6115                 control |= INDIRECT_BUFFER_PRE_ENB(1);
6116
6117                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6118                         gfx_v8_0_ring_emit_de_meta(ring);
6119         }
6120
6121         amdgpu_ring_write(ring, header);
6122         amdgpu_ring_write(ring,
6123 #ifdef __BIG_ENDIAN
6124                           (2 << 0) |
6125 #endif
6126                           (ib->gpu_addr & 0xFFFFFFFC));
6127         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6128         amdgpu_ring_write(ring, control);
6129 }
6130
6131 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6132                                           struct amdgpu_job *job,
6133                                           struct amdgpu_ib *ib,
6134                                           uint32_t flags)
6135 {
6136         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6137         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6138
6139         /* Currently, there is a high possibility to get wave ID mismatch
6140          * between ME and GDS, leading to a hw deadlock, because ME generates
6141          * different wave IDs than the GDS expects. This situation happens
6142          * randomly when at least 5 compute pipes use GDS ordered append.
6143          * The wave IDs generated by ME are also wrong after suspend/resume.
6144          * Those are probably bugs somewhere else in the kernel driver.
6145          *
6146          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6147          * GDS to 0 for this ring (me/pipe).
6148          */
6149         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6150                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6151                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6152                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6153         }
6154
6155         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6156         amdgpu_ring_write(ring,
6157 #ifdef __BIG_ENDIAN
6158                                 (2 << 0) |
6159 #endif
6160                                 (ib->gpu_addr & 0xFFFFFFFC));
6161         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6162         amdgpu_ring_write(ring, control);
6163 }
6164
6165 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6166                                          u64 seq, unsigned flags)
6167 {
6168         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6169         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6170         bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
6171
6172         /* Workaround for cache flush problems. First send a dummy EOP
6173          * event down the pipe with seq one below.
6174          */
6175         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6176         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6177                                  EOP_TC_ACTION_EN |
6178                                  EOP_TC_WB_ACTION_EN |
6179                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6180                                  EVENT_INDEX(5)));
6181         amdgpu_ring_write(ring, addr & 0xfffffffc);
6182         amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6183                                 DATA_SEL(1) | INT_SEL(0));
6184         amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6185         amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6186
6187         /* Then send the real EOP event down the pipe:
6188          * EVENT_WRITE_EOP - flush caches, send int */
6189         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6190         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6191                                  EOP_TC_ACTION_EN |
6192                                  EOP_TC_WB_ACTION_EN |
6193                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6194                                  EVENT_INDEX(5) |
6195                                  (exec ? EOP_EXEC : 0)));
6196         amdgpu_ring_write(ring, addr & 0xfffffffc);
6197         amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6198                           DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6199         amdgpu_ring_write(ring, lower_32_bits(seq));
6200         amdgpu_ring_write(ring, upper_32_bits(seq));
6201
6202 }
6203
6204 static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6205 {
6206         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6207         uint32_t seq = ring->fence_drv.sync_seq;
6208         uint64_t addr = ring->fence_drv.gpu_addr;
6209
6210         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6211         amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6212                                  WAIT_REG_MEM_FUNCTION(3) | /* equal */
6213                                  WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6214         amdgpu_ring_write(ring, addr & 0xfffffffc);
6215         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6216         amdgpu_ring_write(ring, seq);
6217         amdgpu_ring_write(ring, 0xffffffff);
6218         amdgpu_ring_write(ring, 4); /* poll interval */
6219 }
6220
6221 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6222                                         unsigned vmid, uint64_t pd_addr)
6223 {
6224         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6225
6226         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6227
6228         /* wait for the invalidate to complete */
6229         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6230         amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6231                                  WAIT_REG_MEM_FUNCTION(0) |  /* always */
6232                                  WAIT_REG_MEM_ENGINE(0))); /* me */
6233         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6234         amdgpu_ring_write(ring, 0);
6235         amdgpu_ring_write(ring, 0); /* ref */
6236         amdgpu_ring_write(ring, 0); /* mask */
6237         amdgpu_ring_write(ring, 0x20); /* poll interval */
6238
6239         /* compute doesn't have PFP */
6240         if (usepfp) {
6241                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
6242                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6243                 amdgpu_ring_write(ring, 0x0);
6244         }
6245 }
6246
6247 static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
6248 {
6249         return *ring->wptr_cpu_addr;
6250 }
6251
6252 static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6253 {
6254         struct amdgpu_device *adev = ring->adev;
6255
6256         /* XXX check if swapping is necessary on BE */
6257         *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6258         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6259 }
6260
6261 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6262                                              u64 addr, u64 seq,
6263                                              unsigned flags)
6264 {
6265         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6266         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6267
6268         /* RELEASE_MEM - flush caches, send int */
6269         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6270         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6271                                  EOP_TC_ACTION_EN |
6272                                  EOP_TC_WB_ACTION_EN |
6273                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6274                                  EVENT_INDEX(5)));
6275         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6276         amdgpu_ring_write(ring, addr & 0xfffffffc);
6277         amdgpu_ring_write(ring, upper_32_bits(addr));
6278         amdgpu_ring_write(ring, lower_32_bits(seq));
6279         amdgpu_ring_write(ring, upper_32_bits(seq));
6280 }
6281
6282 static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6283                                          u64 seq, unsigned int flags)
6284 {
6285         /* we only allocate 32bit for each seq wb address */
6286         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6287
6288         /* write fence seq to the "addr" */
6289         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6290         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6291                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6292         amdgpu_ring_write(ring, lower_32_bits(addr));
6293         amdgpu_ring_write(ring, upper_32_bits(addr));
6294         amdgpu_ring_write(ring, lower_32_bits(seq));
6295
6296         if (flags & AMDGPU_FENCE_FLAG_INT) {
6297                 /* set register to trigger INT */
6298                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6299                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6300                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6301                 amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6302                 amdgpu_ring_write(ring, 0);
6303                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6304         }
6305 }
6306
6307 static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6308 {
6309         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6310         amdgpu_ring_write(ring, 0);
6311 }
6312
6313 static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6314 {
6315         uint32_t dw2 = 0;
6316
6317         if (amdgpu_sriov_vf(ring->adev))
6318                 gfx_v8_0_ring_emit_ce_meta(ring);
6319
6320         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6321         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6322                 gfx_v8_0_ring_emit_vgt_flush(ring);
6323                 /* set load_global_config & load_global_uconfig */
6324                 dw2 |= 0x8001;
6325                 /* set load_cs_sh_regs */
6326                 dw2 |= 0x01000000;
6327                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
6328                 dw2 |= 0x10002;
6329
6330                 /* set load_ce_ram if preamble presented */
6331                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6332                         dw2 |= 0x10000000;
6333         } else {
6334                 /* still load_ce_ram if this is the first time preamble presented
6335                  * although there is no context switch happens.
6336                  */
6337                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6338                         dw2 |= 0x10000000;
6339         }
6340
6341         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6342         amdgpu_ring_write(ring, dw2);
6343         amdgpu_ring_write(ring, 0);
6344 }
6345
6346 static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
6347                                                   uint64_t addr)
6348 {
6349         unsigned ret;
6350
6351         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6352         amdgpu_ring_write(ring, lower_32_bits(addr));
6353         amdgpu_ring_write(ring, upper_32_bits(addr));
6354         /* discard following DWs if *cond_exec_gpu_addr==0 */
6355         amdgpu_ring_write(ring, 0);
6356         ret = ring->wptr & ring->buf_mask;
6357         /* patch dummy value later */
6358         amdgpu_ring_write(ring, 0);
6359         return ret;
6360 }
6361
6362 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6363                                     uint32_t reg_val_offs)
6364 {
6365         struct amdgpu_device *adev = ring->adev;
6366
6367         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6368         amdgpu_ring_write(ring, 0 |     /* src: register*/
6369                                 (5 << 8) |      /* dst: memory */
6370                                 (1 << 20));     /* write confirm */
6371         amdgpu_ring_write(ring, reg);
6372         amdgpu_ring_write(ring, 0);
6373         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6374                                 reg_val_offs * 4));
6375         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6376                                 reg_val_offs * 4));
6377 }
6378
6379 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6380                                   uint32_t val)
6381 {
6382         uint32_t cmd;
6383
6384         switch (ring->funcs->type) {
6385         case AMDGPU_RING_TYPE_GFX:
6386                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6387                 break;
6388         case AMDGPU_RING_TYPE_KIQ:
6389                 cmd = 1 << 16; /* no inc addr */
6390                 break;
6391         default:
6392                 cmd = WR_CONFIRM;
6393                 break;
6394         }
6395
6396         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6397         amdgpu_ring_write(ring, cmd);
6398         amdgpu_ring_write(ring, reg);
6399         amdgpu_ring_write(ring, 0);
6400         amdgpu_ring_write(ring, val);
6401 }
6402
6403 static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
6404                                   int mem_space, int opt, uint32_t addr0,
6405                                   uint32_t addr1, uint32_t ref, uint32_t mask,
6406                                   uint32_t inv)
6407 {
6408         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6409         amdgpu_ring_write(ring,
6410                           /* memory (1) or register (0) */
6411                           (WAIT_REG_MEM_MEM_SPACE(mem_space) |
6412                            WAIT_REG_MEM_OPERATION(opt) | /* wait */
6413                            WAIT_REG_MEM_FUNCTION(3) |  /* equal */
6414                            WAIT_REG_MEM_ENGINE(eng_sel)));
6415
6416         if (mem_space)
6417                 BUG_ON(addr0 & 0x3); /* Dword align */
6418         amdgpu_ring_write(ring, addr0);
6419         amdgpu_ring_write(ring, addr1);
6420         amdgpu_ring_write(ring, ref);
6421         amdgpu_ring_write(ring, mask);
6422         amdgpu_ring_write(ring, inv); /* poll interval */
6423 }
6424
6425 static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
6426                                         uint32_t val, uint32_t mask)
6427 {
6428         gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
6429 }
6430
6431 static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6432 {
6433         struct amdgpu_device *adev = ring->adev;
6434         uint32_t value = 0;
6435
6436         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6437         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6438         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6439         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6440         WREG32(mmSQ_CMD, value);
6441 }
6442
6443 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6444                                                  enum amdgpu_interrupt_state state)
6445 {
6446         WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6447                      state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6448 }
6449
6450 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6451                                                      int me, int pipe,
6452                                                      enum amdgpu_interrupt_state state)
6453 {
6454         u32 mec_int_cntl, mec_int_cntl_reg;
6455
6456         /*
6457          * amdgpu controls only the first MEC. That's why this function only
6458          * handles the setting of interrupts for this specific MEC. All other
6459          * pipes' interrupts are set by amdkfd.
6460          */
6461
6462         if (me == 1) {
6463                 switch (pipe) {
6464                 case 0:
6465                         mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6466                         break;
6467                 case 1:
6468                         mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6469                         break;
6470                 case 2:
6471                         mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6472                         break;
6473                 case 3:
6474                         mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6475                         break;
6476                 default:
6477                         DRM_DEBUG("invalid pipe %d\n", pipe);
6478                         return;
6479                 }
6480         } else {
6481                 DRM_DEBUG("invalid me %d\n", me);
6482                 return;
6483         }
6484
6485         switch (state) {
6486         case AMDGPU_IRQ_STATE_DISABLE:
6487                 mec_int_cntl = RREG32(mec_int_cntl_reg);
6488                 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6489                 WREG32(mec_int_cntl_reg, mec_int_cntl);
6490                 break;
6491         case AMDGPU_IRQ_STATE_ENABLE:
6492                 mec_int_cntl = RREG32(mec_int_cntl_reg);
6493                 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6494                 WREG32(mec_int_cntl_reg, mec_int_cntl);
6495                 break;
6496         default:
6497                 break;
6498         }
6499 }
6500
6501 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6502                                              struct amdgpu_irq_src *source,
6503                                              unsigned type,
6504                                              enum amdgpu_interrupt_state state)
6505 {
6506         WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6507                      state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6508
6509         return 0;
6510 }
6511
6512 static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6513                                               struct amdgpu_irq_src *source,
6514                                               unsigned type,
6515                                               enum amdgpu_interrupt_state state)
6516 {
6517         WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6518                      state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6519
6520         return 0;
6521 }
6522
6523 static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6524                                             struct amdgpu_irq_src *src,
6525                                             unsigned type,
6526                                             enum amdgpu_interrupt_state state)
6527 {
6528         switch (type) {
6529         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6530                 gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6531                 break;
6532         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6533                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6534                 break;
6535         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6536                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6537                 break;
6538         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6539                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6540                 break;
6541         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6542                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6543                 break;
6544         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6545                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6546                 break;
6547         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6548                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6549                 break;
6550         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6551                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6552                 break;
6553         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6554                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6555                 break;
6556         default:
6557                 break;
6558         }
6559         return 0;
6560 }
6561
6562 static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6563                                          struct amdgpu_irq_src *source,
6564                                          unsigned int type,
6565                                          enum amdgpu_interrupt_state state)
6566 {
6567         int enable_flag;
6568
6569         switch (state) {
6570         case AMDGPU_IRQ_STATE_DISABLE:
6571                 enable_flag = 0;
6572                 break;
6573
6574         case AMDGPU_IRQ_STATE_ENABLE:
6575                 enable_flag = 1;
6576                 break;
6577
6578         default:
6579                 return -EINVAL;
6580         }
6581
6582         WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6583         WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6584         WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6585         WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6586         WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6587         WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6588                      enable_flag);
6589         WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6590                      enable_flag);
6591         WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6592                      enable_flag);
6593         WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6594                      enable_flag);
6595         WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6596                      enable_flag);
6597         WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6598                      enable_flag);
6599         WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6600                      enable_flag);
6601         WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6602                      enable_flag);
6603
6604         return 0;
6605 }
6606
6607 static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6608                                      struct amdgpu_irq_src *source,
6609                                      unsigned int type,
6610                                      enum amdgpu_interrupt_state state)
6611 {
6612         int enable_flag;
6613
6614         switch (state) {
6615         case AMDGPU_IRQ_STATE_DISABLE:
6616                 enable_flag = 1;
6617                 break;
6618
6619         case AMDGPU_IRQ_STATE_ENABLE:
6620                 enable_flag = 0;
6621                 break;
6622
6623         default:
6624                 return -EINVAL;
6625         }
6626
6627         WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6628                      enable_flag);
6629
6630         return 0;
6631 }
6632
6633 static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6634                             struct amdgpu_irq_src *source,
6635                             struct amdgpu_iv_entry *entry)
6636 {
6637         int i;
6638         u8 me_id, pipe_id, queue_id;
6639         struct amdgpu_ring *ring;
6640
6641         DRM_DEBUG("IH: CP EOP\n");
6642         me_id = (entry->ring_id & 0x0c) >> 2;
6643         pipe_id = (entry->ring_id & 0x03) >> 0;
6644         queue_id = (entry->ring_id & 0x70) >> 4;
6645
6646         switch (me_id) {
6647         case 0:
6648                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6649                 break;
6650         case 1:
6651         case 2:
6652                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6653                         ring = &adev->gfx.compute_ring[i];
6654                         /* Per-queue interrupt is supported for MEC starting from VI.
6655                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
6656                           */
6657                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6658                                 amdgpu_fence_process(ring);
6659                 }
6660                 break;
6661         }
6662         return 0;
6663 }
6664
6665 static void gfx_v8_0_fault(struct amdgpu_device *adev,
6666                            struct amdgpu_iv_entry *entry)
6667 {
6668         u8 me_id, pipe_id, queue_id;
6669         struct amdgpu_ring *ring;
6670         int i;
6671
6672         me_id = (entry->ring_id & 0x0c) >> 2;
6673         pipe_id = (entry->ring_id & 0x03) >> 0;
6674         queue_id = (entry->ring_id & 0x70) >> 4;
6675
6676         switch (me_id) {
6677         case 0:
6678                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6679                 break;
6680         case 1:
6681         case 2:
6682                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6683                         ring = &adev->gfx.compute_ring[i];
6684                         if (ring->me == me_id && ring->pipe == pipe_id &&
6685                             ring->queue == queue_id)
6686                                 drm_sched_fault(&ring->sched);
6687                 }
6688                 break;
6689         }
6690 }
6691
6692 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6693                                  struct amdgpu_irq_src *source,
6694                                  struct amdgpu_iv_entry *entry)
6695 {
6696         DRM_ERROR("Illegal register access in command stream\n");
6697         gfx_v8_0_fault(adev, entry);
6698         return 0;
6699 }
6700
6701 static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6702                                   struct amdgpu_irq_src *source,
6703                                   struct amdgpu_iv_entry *entry)
6704 {
6705         DRM_ERROR("Illegal instruction in command stream\n");
6706         gfx_v8_0_fault(adev, entry);
6707         return 0;
6708 }
6709
6710 static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6711                                      struct amdgpu_irq_src *source,
6712                                      struct amdgpu_iv_entry *entry)
6713 {
6714         DRM_ERROR("CP EDC/ECC error detected.");
6715         return 0;
6716 }
6717
6718 static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
6719                                   bool from_wq)
6720 {
6721         u32 enc, se_id, sh_id, cu_id;
6722         char type[20];
6723         int sq_edc_source = -1;
6724
6725         enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6726         se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6727
6728         switch (enc) {
6729                 case 0:
6730                         DRM_INFO("SQ general purpose intr detected:"
6731                                         "se_id %d, immed_overflow %d, host_reg_overflow %d,"
6732                                         "host_cmd_overflow %d, cmd_timestamp %d,"
6733                                         "reg_timestamp %d, thread_trace_buff_full %d,"
6734                                         "wlt %d, thread_trace %d.\n",
6735                                         se_id,
6736                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6737                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6738                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6739                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6740                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6741                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6742                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6743                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6744                                         );
6745                         break;
6746                 case 1:
6747                 case 2:
6748
6749                         cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6750                         sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6751
6752                         /*
6753                          * This function can be called either directly from ISR
6754                          * or from BH in which case we can access SQ_EDC_INFO
6755                          * instance
6756                          */
6757                         if (from_wq) {
6758                                 mutex_lock(&adev->grbm_idx_mutex);
6759                                 gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
6760
6761                                 sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6762
6763                                 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6764                                 mutex_unlock(&adev->grbm_idx_mutex);
6765                         }
6766
6767                         if (enc == 1)
6768                                 sprintf(type, "instruction intr");
6769                         else
6770                                 sprintf(type, "EDC/ECC error");
6771
6772                         DRM_INFO(
6773                                 "SQ %s detected: "
6774                                         "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6775                                         "trap %s, sq_ed_info.source %s.\n",
6776                                         type, se_id, sh_id, cu_id,
6777                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6778                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6779                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6780                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6781                                         (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6782                                 );
6783                         break;
6784                 default:
6785                         DRM_ERROR("SQ invalid encoding type\n.");
6786         }
6787 }
6788
6789 static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6790 {
6791
6792         struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6793         struct sq_work *sq_work = container_of(work, struct sq_work, work);
6794
6795         gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
6796 }
6797
6798 static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6799                            struct amdgpu_irq_src *source,
6800                            struct amdgpu_iv_entry *entry)
6801 {
6802         unsigned ih_data = entry->src_data[0];
6803
6804         /*
6805          * Try to submit work so SQ_EDC_INFO can be accessed from
6806          * BH. If previous work submission hasn't finished yet
6807          * just print whatever info is possible directly from the ISR.
6808          */
6809         if (work_pending(&adev->gfx.sq_work.work)) {
6810                 gfx_v8_0_parse_sq_irq(adev, ih_data, false);
6811         } else {
6812                 adev->gfx.sq_work.ih_data = ih_data;
6813                 schedule_work(&adev->gfx.sq_work.work);
6814         }
6815
6816         return 0;
6817 }
6818
6819 static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6820 {
6821         amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6822         amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6823                           PACKET3_TC_ACTION_ENA |
6824                           PACKET3_SH_KCACHE_ACTION_ENA |
6825                           PACKET3_SH_ICACHE_ACTION_ENA |
6826                           PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6827         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6828         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
6829         amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6830 }
6831
6832 static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6833 {
6834         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6835         amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6836                           PACKET3_TC_ACTION_ENA |
6837                           PACKET3_SH_KCACHE_ACTION_ENA |
6838                           PACKET3_SH_ICACHE_ACTION_ENA |
6839                           PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6840         amdgpu_ring_write(ring, 0xffffffff);    /* CP_COHER_SIZE */
6841         amdgpu_ring_write(ring, 0xff);          /* CP_COHER_SIZE_HI */
6842         amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE */
6843         amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE_HI */
6844         amdgpu_ring_write(ring, 0x0000000A);    /* poll interval */
6845 }
6846
6847
6848 /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6849 #define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT       0x0000007f
6850 static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6851                                         uint32_t pipe, bool enable)
6852 {
6853         uint32_t val;
6854         uint32_t wcl_cs_reg;
6855
6856         val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
6857
6858         switch (pipe) {
6859         case 0:
6860                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
6861                 break;
6862         case 1:
6863                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
6864                 break;
6865         case 2:
6866                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
6867                 break;
6868         case 3:
6869                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
6870                 break;
6871         default:
6872                 DRM_DEBUG("invalid pipe %d\n", pipe);
6873                 return;
6874         }
6875
6876         amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6877
6878 }
6879
6880 #define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT      0x07ffffff
6881 static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6882 {
6883         struct amdgpu_device *adev = ring->adev;
6884         uint32_t val;
6885         int i;
6886
6887         /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6888          * number of gfx waves. Setting 5 bit will make sure gfx only gets
6889          * around 25% of gpu resources.
6890          */
6891         val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6892         amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
6893
6894         /* Restrict waves for normal/low priority compute queues as well
6895          * to get best QoS for high priority compute jobs.
6896          *
6897          * amdgpu controls only 1st ME(0-3 CS pipes).
6898          */
6899         for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6900                 if (i != ring->pipe)
6901                         gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
6902
6903         }
6904
6905 }
6906
6907 static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
6908 {
6909         struct amdgpu_device *adev = ring->adev;
6910         struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
6911         struct amdgpu_ring *kiq_ring = &kiq->ring;
6912         unsigned long flags;
6913         u32 tmp;
6914         int r;
6915
6916         if (amdgpu_sriov_vf(adev))
6917                 return -EINVAL;
6918
6919         if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
6920                 return -EINVAL;
6921
6922         spin_lock_irqsave(&kiq->ring_lock, flags);
6923
6924         if (amdgpu_ring_alloc(kiq_ring, 5)) {
6925                 spin_unlock_irqrestore(&kiq->ring_lock, flags);
6926                 return -ENOMEM;
6927         }
6928
6929         tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
6930         gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp);
6931         amdgpu_ring_commit(kiq_ring);
6932
6933         spin_unlock_irqrestore(&kiq->ring_lock, flags);
6934
6935         r = amdgpu_ring_test_ring(kiq_ring);
6936         if (r)
6937                 return r;
6938
6939         if (amdgpu_ring_alloc(ring, 7 + 12 + 5))
6940                 return -ENOMEM;
6941         gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr,
6942                                      ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC);
6943         gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff);
6944         gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0);
6945
6946         return amdgpu_ring_test_ring(ring);
6947 }
6948
6949 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6950         .name = "gfx_v8_0",
6951         .early_init = gfx_v8_0_early_init,
6952         .late_init = gfx_v8_0_late_init,
6953         .sw_init = gfx_v8_0_sw_init,
6954         .sw_fini = gfx_v8_0_sw_fini,
6955         .hw_init = gfx_v8_0_hw_init,
6956         .hw_fini = gfx_v8_0_hw_fini,
6957         .suspend = gfx_v8_0_suspend,
6958         .resume = gfx_v8_0_resume,
6959         .is_idle = gfx_v8_0_is_idle,
6960         .wait_for_idle = gfx_v8_0_wait_for_idle,
6961         .check_soft_reset = gfx_v8_0_check_soft_reset,
6962         .pre_soft_reset = gfx_v8_0_pre_soft_reset,
6963         .soft_reset = gfx_v8_0_soft_reset,
6964         .post_soft_reset = gfx_v8_0_post_soft_reset,
6965         .set_clockgating_state = gfx_v8_0_set_clockgating_state,
6966         .set_powergating_state = gfx_v8_0_set_powergating_state,
6967         .get_clockgating_state = gfx_v8_0_get_clockgating_state,
6968 };
6969
6970 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6971         .type = AMDGPU_RING_TYPE_GFX,
6972         .align_mask = 0xff,
6973         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6974         .support_64bit_ptrs = false,
6975         .get_rptr = gfx_v8_0_ring_get_rptr,
6976         .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6977         .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6978         .emit_frame_size = /* maximum 215dw if count 16 IBs in */
6979                 5 +  /* COND_EXEC */
6980                 7 +  /* PIPELINE_SYNC */
6981                 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6982                 12 +  /* FENCE for VM_FLUSH */
6983                 20 + /* GDS switch */
6984                 4 + /* double SWITCH_BUFFER,
6985                        the first COND_EXEC jump to the place just
6986                            prior to this double SWITCH_BUFFER  */
6987                 5 + /* COND_EXEC */
6988                 7 +      /*     HDP_flush */
6989                 4 +      /*     VGT_flush */
6990                 14 + /* CE_META */
6991                 31 + /* DE_META */
6992                 3 + /* CNTX_CTRL */
6993                 5 + /* HDP_INVL */
6994                 12 + 12 + /* FENCE x2 */
6995                 2 + /* SWITCH_BUFFER */
6996                 5, /* SURFACE_SYNC */
6997         .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
6998         .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6999         .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
7000         .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
7001         .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
7002         .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
7003         .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
7004         .test_ring = gfx_v8_0_ring_test_ring,
7005         .test_ib = gfx_v8_0_ring_test_ib,
7006         .insert_nop = amdgpu_ring_insert_nop,
7007         .pad_ib = amdgpu_ring_generic_pad_ib,
7008         .emit_switch_buffer = gfx_v8_ring_emit_sb,
7009         .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
7010         .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
7011         .emit_wreg = gfx_v8_0_ring_emit_wreg,
7012         .soft_recovery = gfx_v8_0_ring_soft_recovery,
7013         .emit_mem_sync = gfx_v8_0_emit_mem_sync,
7014         .reset = gfx_v8_0_reset_kgq,
7015 };
7016
7017 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
7018         .type = AMDGPU_RING_TYPE_COMPUTE,
7019         .align_mask = 0xff,
7020         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
7021         .support_64bit_ptrs = false,
7022         .get_rptr = gfx_v8_0_ring_get_rptr,
7023         .get_wptr = gfx_v8_0_ring_get_wptr_compute,
7024         .set_wptr = gfx_v8_0_ring_set_wptr_compute,
7025         .emit_frame_size =
7026                 20 + /* gfx_v8_0_ring_emit_gds_switch */
7027                 7 + /* gfx_v8_0_ring_emit_hdp_flush */
7028                 5 + /* hdp_invalidate */
7029                 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
7030                 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7031                 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
7032                 7 + /* gfx_v8_0_emit_mem_sync_compute */
7033                 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
7034                 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
7035         .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
7036         .emit_ib = gfx_v8_0_ring_emit_ib_compute,
7037         .emit_fence = gfx_v8_0_ring_emit_fence_compute,
7038         .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
7039         .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
7040         .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
7041         .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
7042         .test_ring = gfx_v8_0_ring_test_ring,
7043         .test_ib = gfx_v8_0_ring_test_ib,
7044         .insert_nop = amdgpu_ring_insert_nop,
7045         .pad_ib = amdgpu_ring_generic_pad_ib,
7046         .emit_wreg = gfx_v8_0_ring_emit_wreg,
7047         .soft_recovery = gfx_v8_0_ring_soft_recovery,
7048         .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
7049         .emit_wave_limit = gfx_v8_0_emit_wave_limit,
7050 };
7051
7052 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
7053         .type = AMDGPU_RING_TYPE_KIQ,
7054         .align_mask = 0xff,
7055         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
7056         .support_64bit_ptrs = false,
7057         .get_rptr = gfx_v8_0_ring_get_rptr,
7058         .get_wptr = gfx_v8_0_ring_get_wptr_compute,
7059         .set_wptr = gfx_v8_0_ring_set_wptr_compute,
7060         .emit_frame_size =
7061                 20 + /* gfx_v8_0_ring_emit_gds_switch */
7062                 7 + /* gfx_v8_0_ring_emit_hdp_flush */
7063                 5 + /* hdp_invalidate */
7064                 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
7065                 17 + /* gfx_v8_0_ring_emit_vm_flush */
7066                 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
7067         .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
7068         .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
7069         .test_ring = gfx_v8_0_ring_test_ring,
7070         .insert_nop = amdgpu_ring_insert_nop,
7071         .pad_ib = amdgpu_ring_generic_pad_ib,
7072         .emit_rreg = gfx_v8_0_ring_emit_rreg,
7073         .emit_wreg = gfx_v8_0_ring_emit_wreg,
7074 };
7075
7076 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
7077 {
7078         int i;
7079
7080         adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
7081
7082         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7083                 adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
7084
7085         for (i = 0; i < adev->gfx.num_compute_rings; i++)
7086                 adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
7087 }
7088
7089 static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
7090         .set = gfx_v8_0_set_eop_interrupt_state,
7091         .process = gfx_v8_0_eop_irq,
7092 };
7093
7094 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
7095         .set = gfx_v8_0_set_priv_reg_fault_state,
7096         .process = gfx_v8_0_priv_reg_irq,
7097 };
7098
7099 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
7100         .set = gfx_v8_0_set_priv_inst_fault_state,
7101         .process = gfx_v8_0_priv_inst_irq,
7102 };
7103
7104 static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
7105         .set = gfx_v8_0_set_cp_ecc_int_state,
7106         .process = gfx_v8_0_cp_ecc_error_irq,
7107 };
7108
7109 static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7110         .set = gfx_v8_0_set_sq_int_state,
7111         .process = gfx_v8_0_sq_irq,
7112 };
7113
7114 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7115 {
7116         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7117         adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7118
7119         adev->gfx.priv_reg_irq.num_types = 1;
7120         adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7121
7122         adev->gfx.priv_inst_irq.num_types = 1;
7123         adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7124
7125         adev->gfx.cp_ecc_error_irq.num_types = 1;
7126         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7127
7128         adev->gfx.sq_irq.num_types = 1;
7129         adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7130 }
7131
7132 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7133 {
7134         adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7135 }
7136
7137 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7138 {
7139         /* init asci gds info */
7140         adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7141         adev->gds.gws_size = 64;
7142         adev->gds.oa_size = 16;
7143         adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7144 }
7145
7146 static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7147                                                  u32 bitmap)
7148 {
7149         u32 data;
7150
7151         if (!bitmap)
7152                 return;
7153
7154         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7155         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7156
7157         WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
7158 }
7159
7160 static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7161 {
7162         u32 data, mask;
7163
7164         data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7165                 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7166
7167         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7168
7169         return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7170 }
7171
7172 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
7173 {
7174         int i, j, k, counter, active_cu_number = 0;
7175         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7176         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7177         unsigned disable_masks[4 * 2];
7178         u32 ao_cu_num;
7179
7180         memset(cu_info, 0, sizeof(*cu_info));
7181
7182         if (adev->flags & AMD_IS_APU)
7183                 ao_cu_num = 2;
7184         else
7185                 ao_cu_num = adev->gfx.config.max_cu_per_sh;
7186
7187         amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7188
7189         mutex_lock(&adev->grbm_idx_mutex);
7190         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7191                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7192                         mask = 1;
7193                         ao_bitmap = 0;
7194                         counter = 0;
7195                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7196                         if (i < 4 && j < 2)
7197                                 gfx_v8_0_set_user_cu_inactive_bitmap(
7198                                         adev, disable_masks[i * 2 + j]);
7199                         bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7200                         cu_info->bitmap[0][i][j] = bitmap;
7201
7202                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7203                                 if (bitmap & mask) {
7204                                         if (counter < ao_cu_num)
7205                                                 ao_bitmap |= mask;
7206                                         counter ++;
7207                                 }
7208                                 mask <<= 1;
7209                         }
7210                         active_cu_number += counter;
7211                         if (i < 2 && j < 2)
7212                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7213                         cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7214                 }
7215         }
7216         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7217         mutex_unlock(&adev->grbm_idx_mutex);
7218
7219         cu_info->number = active_cu_number;
7220         cu_info->ao_cu_mask = ao_cu_mask;
7221         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7222         cu_info->max_waves_per_simd = 10;
7223         cu_info->max_scratch_slots_per_cu = 32;
7224         cu_info->wave_front_size = 64;
7225         cu_info->lds_size = 64;
7226 }
7227
7228 const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7229 {
7230         .type = AMD_IP_BLOCK_TYPE_GFX,
7231         .major = 8,
7232         .minor = 0,
7233         .rev = 0,
7234         .funcs = &gfx_v8_0_ip_funcs,
7235 };
7236
7237 const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7238 {
7239         .type = AMD_IP_BLOCK_TYPE_GFX,
7240         .major = 8,
7241         .minor = 1,
7242         .rev = 0,
7243         .funcs = &gfx_v8_0_ip_funcs,
7244 };
7245
7246 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7247 {
7248         uint64_t ce_payload_addr;
7249         int cnt_ce;
7250         union {
7251                 struct vi_ce_ib_state regular;
7252                 struct vi_ce_ib_state_chained_ib chained;
7253         } ce_payload = {};
7254
7255         if (ring->adev->virt.chained_ib_support) {
7256                 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7257                         offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7258                 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7259         } else {
7260                 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7261                         offsetof(struct vi_gfx_meta_data, ce_payload);
7262                 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7263         }
7264
7265         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7266         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7267                                 WRITE_DATA_DST_SEL(8) |
7268                                 WR_CONFIRM) |
7269                                 WRITE_DATA_CACHE_POLICY(0));
7270         amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7271         amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7272         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7273 }
7274
7275 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7276 {
7277         uint64_t de_payload_addr, gds_addr, csa_addr;
7278         int cnt_de;
7279         union {
7280                 struct vi_de_ib_state regular;
7281                 struct vi_de_ib_state_chained_ib chained;
7282         } de_payload = {};
7283
7284         csa_addr = amdgpu_csa_vaddr(ring->adev);
7285         gds_addr = csa_addr + 4096;
7286         if (ring->adev->virt.chained_ib_support) {
7287                 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7288                 de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7289                 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7290                 cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7291         } else {
7292                 de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7293                 de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7294                 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7295                 cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7296         }
7297
7298         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7299         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7300                                 WRITE_DATA_DST_SEL(8) |
7301                                 WR_CONFIRM) |
7302                                 WRITE_DATA_CACHE_POLICY(0));
7303         amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7304         amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7305         amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7306 }
This page took 0.467829 seconds and 4 git commands to generate.