]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drm/amdgpu: use amdgpu_bo_param for amdgpu_bo_create v2
[linux.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30
31 #include "gc/gc_9_0_offset.h"
32 #include "gc/gc_9_0_sh_mask.h"
33 #include "vega10_enum.h"
34 #include "hdp/hdp_4_0_offset.h"
35
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
39
40 #define GFX9_NUM_GFX_RINGS     1
41 #define GFX9_MEC_HPD_SIZE 2048
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
45
46 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
52
53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
59
60 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
61 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
62 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
66
67 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
68 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/raven_me.bin");
70 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
71 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
72 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
73
74 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
75 {
76         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
77         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
78         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
79         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
80         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
81         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
82         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
83         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
84         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
85         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
86         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
87         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
88         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
89         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
90         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
91         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
92         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
93         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
94         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
95         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
96         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
97         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
98         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
99 };
100
101 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
102 {
103         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
104         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
105         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
106         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
107         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
108         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
109         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
110 };
111
112 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
113 {
114         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
115         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
116         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
117         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
118         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
119         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
120         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
121         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
122         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
123         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
124         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
125         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
126         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
127         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
128         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
129         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
130         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
131         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
132         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
133         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
134         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
135 };
136
137 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
138 {
139         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
140         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
141         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
142         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
143         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
144         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
145         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
146 };
147
148 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
149 {
150         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
151         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
152 };
153
154 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
155 {
156         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
157         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
158         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
159         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
160         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
161         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
162         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
163         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
164         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
165         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
166         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
167         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
168         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
169         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
170         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
171         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
172 };
173
174 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
175 {
176         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
177         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
178         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
179         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
180         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
181         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
182         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
183         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
184         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
185         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
186 };
187
188 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
189 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
190 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
191
192 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
193 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
194 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
195 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
196 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
197                                  struct amdgpu_cu_info *cu_info);
198 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
199 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
200 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
201
202 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
203 {
204         switch (adev->asic_type) {
205         case CHIP_VEGA10:
206                 soc15_program_register_sequence(adev,
207                                                  golden_settings_gc_9_0,
208                                                  ARRAY_SIZE(golden_settings_gc_9_0));
209                 soc15_program_register_sequence(adev,
210                                                  golden_settings_gc_9_0_vg10,
211                                                  ARRAY_SIZE(golden_settings_gc_9_0_vg10));
212                 break;
213         case CHIP_VEGA12:
214                 soc15_program_register_sequence(adev,
215                                                 golden_settings_gc_9_2_1,
216                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
217                 soc15_program_register_sequence(adev,
218                                                 golden_settings_gc_9_2_1_vg12,
219                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
220                 break;
221         case CHIP_RAVEN:
222                 soc15_program_register_sequence(adev,
223                                                  golden_settings_gc_9_1,
224                                                  ARRAY_SIZE(golden_settings_gc_9_1));
225                 soc15_program_register_sequence(adev,
226                                                  golden_settings_gc_9_1_rv1,
227                                                  ARRAY_SIZE(golden_settings_gc_9_1_rv1));
228                 break;
229         default:
230                 break;
231         }
232
233         soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
234                                         (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
235 }
236
237 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
238 {
239         adev->gfx.scratch.num_reg = 8;
240         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
241         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
242 }
243
244 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
245                                        bool wc, uint32_t reg, uint32_t val)
246 {
247         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
248         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
249                                 WRITE_DATA_DST_SEL(0) |
250                                 (wc ? WR_CONFIRM : 0));
251         amdgpu_ring_write(ring, reg);
252         amdgpu_ring_write(ring, 0);
253         amdgpu_ring_write(ring, val);
254 }
255
256 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
257                                   int mem_space, int opt, uint32_t addr0,
258                                   uint32_t addr1, uint32_t ref, uint32_t mask,
259                                   uint32_t inv)
260 {
261         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
262         amdgpu_ring_write(ring,
263                                  /* memory (1) or register (0) */
264                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
265                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
266                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
267                                  WAIT_REG_MEM_ENGINE(eng_sel)));
268
269         if (mem_space)
270                 BUG_ON(addr0 & 0x3); /* Dword align */
271         amdgpu_ring_write(ring, addr0);
272         amdgpu_ring_write(ring, addr1);
273         amdgpu_ring_write(ring, ref);
274         amdgpu_ring_write(ring, mask);
275         amdgpu_ring_write(ring, inv); /* poll interval */
276 }
277
278 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
279 {
280         struct amdgpu_device *adev = ring->adev;
281         uint32_t scratch;
282         uint32_t tmp = 0;
283         unsigned i;
284         int r;
285
286         r = amdgpu_gfx_scratch_get(adev, &scratch);
287         if (r) {
288                 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
289                 return r;
290         }
291         WREG32(scratch, 0xCAFEDEAD);
292         r = amdgpu_ring_alloc(ring, 3);
293         if (r) {
294                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
295                           ring->idx, r);
296                 amdgpu_gfx_scratch_free(adev, scratch);
297                 return r;
298         }
299         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
300         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
301         amdgpu_ring_write(ring, 0xDEADBEEF);
302         amdgpu_ring_commit(ring);
303
304         for (i = 0; i < adev->usec_timeout; i++) {
305                 tmp = RREG32(scratch);
306                 if (tmp == 0xDEADBEEF)
307                         break;
308                 DRM_UDELAY(1);
309         }
310         if (i < adev->usec_timeout) {
311                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
312                          ring->idx, i);
313         } else {
314                 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
315                           ring->idx, scratch, tmp);
316                 r = -EINVAL;
317         }
318         amdgpu_gfx_scratch_free(adev, scratch);
319         return r;
320 }
321
322 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
323 {
324         struct amdgpu_device *adev = ring->adev;
325         struct amdgpu_ib ib;
326         struct dma_fence *f = NULL;
327
328         unsigned index;
329         uint64_t gpu_addr;
330         uint32_t tmp;
331         long r;
332
333         r = amdgpu_device_wb_get(adev, &index);
334         if (r) {
335                 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
336                 return r;
337         }
338
339         gpu_addr = adev->wb.gpu_addr + (index * 4);
340         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
341         memset(&ib, 0, sizeof(ib));
342         r = amdgpu_ib_get(adev, NULL, 16, &ib);
343         if (r) {
344                 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
345                 goto err1;
346         }
347         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
348         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
349         ib.ptr[2] = lower_32_bits(gpu_addr);
350         ib.ptr[3] = upper_32_bits(gpu_addr);
351         ib.ptr[4] = 0xDEADBEEF;
352         ib.length_dw = 5;
353
354         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
355         if (r)
356                 goto err2;
357
358         r = dma_fence_wait_timeout(f, false, timeout);
359         if (r == 0) {
360                         DRM_ERROR("amdgpu: IB test timed out.\n");
361                         r = -ETIMEDOUT;
362                         goto err2;
363         } else if (r < 0) {
364                         DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
365                         goto err2;
366         }
367
368         tmp = adev->wb.wb[index];
369         if (tmp == 0xDEADBEEF) {
370                         DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
371                         r = 0;
372         } else {
373                         DRM_ERROR("ib test on ring %d failed\n", ring->idx);
374                         r = -EINVAL;
375         }
376
377 err2:
378         amdgpu_ib_free(adev, &ib, NULL);
379         dma_fence_put(f);
380 err1:
381         amdgpu_device_wb_free(adev, index);
382         return r;
383 }
384
385
386 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
387 {
388         release_firmware(adev->gfx.pfp_fw);
389         adev->gfx.pfp_fw = NULL;
390         release_firmware(adev->gfx.me_fw);
391         adev->gfx.me_fw = NULL;
392         release_firmware(adev->gfx.ce_fw);
393         adev->gfx.ce_fw = NULL;
394         release_firmware(adev->gfx.rlc_fw);
395         adev->gfx.rlc_fw = NULL;
396         release_firmware(adev->gfx.mec_fw);
397         adev->gfx.mec_fw = NULL;
398         release_firmware(adev->gfx.mec2_fw);
399         adev->gfx.mec2_fw = NULL;
400
401         kfree(adev->gfx.rlc.register_list_format);
402 }
403
404 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
405 {
406         const char *chip_name;
407         char fw_name[30];
408         int err;
409         struct amdgpu_firmware_info *info = NULL;
410         const struct common_firmware_header *header = NULL;
411         const struct gfx_firmware_header_v1_0 *cp_hdr;
412         const struct rlc_firmware_header_v2_0 *rlc_hdr;
413         unsigned int *tmp = NULL;
414         unsigned int i = 0;
415
416         DRM_DEBUG("\n");
417
418         switch (adev->asic_type) {
419         case CHIP_VEGA10:
420                 chip_name = "vega10";
421                 break;
422         case CHIP_VEGA12:
423                 chip_name = "vega12";
424                 break;
425         case CHIP_RAVEN:
426                 chip_name = "raven";
427                 break;
428         default:
429                 BUG();
430         }
431
432         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
433         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
434         if (err)
435                 goto out;
436         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
437         if (err)
438                 goto out;
439         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
440         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
441         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
442
443         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
444         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
445         if (err)
446                 goto out;
447         err = amdgpu_ucode_validate(adev->gfx.me_fw);
448         if (err)
449                 goto out;
450         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
451         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
452         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
453
454         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
455         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
456         if (err)
457                 goto out;
458         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
459         if (err)
460                 goto out;
461         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
462         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
463         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
464
465         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
466         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
467         if (err)
468                 goto out;
469         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
470         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
471         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
472         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
473         adev->gfx.rlc.save_and_restore_offset =
474                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
475         adev->gfx.rlc.clear_state_descriptor_offset =
476                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
477         adev->gfx.rlc.avail_scratch_ram_locations =
478                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
479         adev->gfx.rlc.reg_restore_list_size =
480                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
481         adev->gfx.rlc.reg_list_format_start =
482                         le32_to_cpu(rlc_hdr->reg_list_format_start);
483         adev->gfx.rlc.reg_list_format_separate_start =
484                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
485         adev->gfx.rlc.starting_offsets_start =
486                         le32_to_cpu(rlc_hdr->starting_offsets_start);
487         adev->gfx.rlc.reg_list_format_size_bytes =
488                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
489         adev->gfx.rlc.reg_list_size_bytes =
490                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
491         adev->gfx.rlc.register_list_format =
492                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
493                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
494         if (!adev->gfx.rlc.register_list_format) {
495                 err = -ENOMEM;
496                 goto out;
497         }
498
499         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
500                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
501         for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
502                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
503
504         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
505
506         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
507                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
508         for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
509                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
510
511         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
512         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
513         if (err)
514                 goto out;
515         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
516         if (err)
517                 goto out;
518         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
519         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
520         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
521
522
523         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
524         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
525         if (!err) {
526                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
527                 if (err)
528                         goto out;
529                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
530                 adev->gfx.mec2_fw->data;
531                 adev->gfx.mec2_fw_version =
532                 le32_to_cpu(cp_hdr->header.ucode_version);
533                 adev->gfx.mec2_feature_version =
534                 le32_to_cpu(cp_hdr->ucode_feature_version);
535         } else {
536                 err = 0;
537                 adev->gfx.mec2_fw = NULL;
538         }
539
540         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
541                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
542                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
543                 info->fw = adev->gfx.pfp_fw;
544                 header = (const struct common_firmware_header *)info->fw->data;
545                 adev->firmware.fw_size +=
546                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
547
548                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
549                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
550                 info->fw = adev->gfx.me_fw;
551                 header = (const struct common_firmware_header *)info->fw->data;
552                 adev->firmware.fw_size +=
553                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
554
555                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
556                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
557                 info->fw = adev->gfx.ce_fw;
558                 header = (const struct common_firmware_header *)info->fw->data;
559                 adev->firmware.fw_size +=
560                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
561
562                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
563                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
564                 info->fw = adev->gfx.rlc_fw;
565                 header = (const struct common_firmware_header *)info->fw->data;
566                 adev->firmware.fw_size +=
567                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
568
569                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
570                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
571                 info->fw = adev->gfx.mec_fw;
572                 header = (const struct common_firmware_header *)info->fw->data;
573                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
574                 adev->firmware.fw_size +=
575                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
576
577                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
578                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
579                 info->fw = adev->gfx.mec_fw;
580                 adev->firmware.fw_size +=
581                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
582
583                 if (adev->gfx.mec2_fw) {
584                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
585                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
586                         info->fw = adev->gfx.mec2_fw;
587                         header = (const struct common_firmware_header *)info->fw->data;
588                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
589                         adev->firmware.fw_size +=
590                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
591                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
592                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
593                         info->fw = adev->gfx.mec2_fw;
594                         adev->firmware.fw_size +=
595                                 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
596                 }
597
598         }
599
600 out:
601         if (err) {
602                 dev_err(adev->dev,
603                         "gfx9: Failed to load firmware \"%s\"\n",
604                         fw_name);
605                 release_firmware(adev->gfx.pfp_fw);
606                 adev->gfx.pfp_fw = NULL;
607                 release_firmware(adev->gfx.me_fw);
608                 adev->gfx.me_fw = NULL;
609                 release_firmware(adev->gfx.ce_fw);
610                 adev->gfx.ce_fw = NULL;
611                 release_firmware(adev->gfx.rlc_fw);
612                 adev->gfx.rlc_fw = NULL;
613                 release_firmware(adev->gfx.mec_fw);
614                 adev->gfx.mec_fw = NULL;
615                 release_firmware(adev->gfx.mec2_fw);
616                 adev->gfx.mec2_fw = NULL;
617         }
618         return err;
619 }
620
621 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
622 {
623         u32 count = 0;
624         const struct cs_section_def *sect = NULL;
625         const struct cs_extent_def *ext = NULL;
626
627         /* begin clear state */
628         count += 2;
629         /* context control state */
630         count += 3;
631
632         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
633                 for (ext = sect->section; ext->extent != NULL; ++ext) {
634                         if (sect->id == SECT_CONTEXT)
635                                 count += 2 + ext->reg_count;
636                         else
637                                 return 0;
638                 }
639         }
640
641         /* end clear state */
642         count += 2;
643         /* clear state */
644         count += 2;
645
646         return count;
647 }
648
649 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
650                                     volatile u32 *buffer)
651 {
652         u32 count = 0, i;
653         const struct cs_section_def *sect = NULL;
654         const struct cs_extent_def *ext = NULL;
655
656         if (adev->gfx.rlc.cs_data == NULL)
657                 return;
658         if (buffer == NULL)
659                 return;
660
661         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
662         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
663
664         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
665         buffer[count++] = cpu_to_le32(0x80000000);
666         buffer[count++] = cpu_to_le32(0x80000000);
667
668         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
669                 for (ext = sect->section; ext->extent != NULL; ++ext) {
670                         if (sect->id == SECT_CONTEXT) {
671                                 buffer[count++] =
672                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
673                                 buffer[count++] = cpu_to_le32(ext->reg_index -
674                                                 PACKET3_SET_CONTEXT_REG_START);
675                                 for (i = 0; i < ext->reg_count; i++)
676                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
677                         } else {
678                                 return;
679                         }
680                 }
681         }
682
683         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
684         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
685
686         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
687         buffer[count++] = cpu_to_le32(0);
688 }
689
690 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
691 {
692         uint32_t data;
693
694         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
695         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
696         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
697         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
698         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
699
700         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
701         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
702
703         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
704         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
705
706         mutex_lock(&adev->grbm_idx_mutex);
707         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
708         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
709         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
710
711         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
712         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
713         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
714         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
715         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
716
717         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
718         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
719         data &= 0x0000FFFF;
720         data |= 0x00C00000;
721         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
722
723         /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
724         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
725
726         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
727          * but used for RLC_LB_CNTL configuration */
728         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
729         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
730         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
731         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
732         mutex_unlock(&adev->grbm_idx_mutex);
733 }
734
735 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
736 {
737         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
738 }
739
740 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
741 {
742         const __le32 *fw_data;
743         volatile u32 *dst_ptr;
744         int me, i, max_me = 5;
745         u32 bo_offset = 0;
746         u32 table_offset, table_size;
747
748         /* write the cp table buffer */
749         dst_ptr = adev->gfx.rlc.cp_table_ptr;
750         for (me = 0; me < max_me; me++) {
751                 if (me == 0) {
752                         const struct gfx_firmware_header_v1_0 *hdr =
753                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
754                         fw_data = (const __le32 *)
755                                 (adev->gfx.ce_fw->data +
756                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
757                         table_offset = le32_to_cpu(hdr->jt_offset);
758                         table_size = le32_to_cpu(hdr->jt_size);
759                 } else if (me == 1) {
760                         const struct gfx_firmware_header_v1_0 *hdr =
761                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
762                         fw_data = (const __le32 *)
763                                 (adev->gfx.pfp_fw->data +
764                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
765                         table_offset = le32_to_cpu(hdr->jt_offset);
766                         table_size = le32_to_cpu(hdr->jt_size);
767                 } else if (me == 2) {
768                         const struct gfx_firmware_header_v1_0 *hdr =
769                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
770                         fw_data = (const __le32 *)
771                                 (adev->gfx.me_fw->data +
772                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
773                         table_offset = le32_to_cpu(hdr->jt_offset);
774                         table_size = le32_to_cpu(hdr->jt_size);
775                 } else if (me == 3) {
776                         const struct gfx_firmware_header_v1_0 *hdr =
777                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
778                         fw_data = (const __le32 *)
779                                 (adev->gfx.mec_fw->data +
780                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
781                         table_offset = le32_to_cpu(hdr->jt_offset);
782                         table_size = le32_to_cpu(hdr->jt_size);
783                 } else  if (me == 4) {
784                         const struct gfx_firmware_header_v1_0 *hdr =
785                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
786                         fw_data = (const __le32 *)
787                                 (adev->gfx.mec2_fw->data +
788                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
789                         table_offset = le32_to_cpu(hdr->jt_offset);
790                         table_size = le32_to_cpu(hdr->jt_size);
791                 }
792
793                 for (i = 0; i < table_size; i ++) {
794                         dst_ptr[bo_offset + i] =
795                                 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
796                 }
797
798                 bo_offset += table_size;
799         }
800 }
801
802 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
803 {
804         /* clear state block */
805         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
806                         &adev->gfx.rlc.clear_state_gpu_addr,
807                         (void **)&adev->gfx.rlc.cs_ptr);
808
809         /* jump table block */
810         amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
811                         &adev->gfx.rlc.cp_table_gpu_addr,
812                         (void **)&adev->gfx.rlc.cp_table_ptr);
813 }
814
815 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
816 {
817         volatile u32 *dst_ptr;
818         u32 dws;
819         const struct cs_section_def *cs_data;
820         int r;
821
822         adev->gfx.rlc.cs_data = gfx9_cs_data;
823
824         cs_data = adev->gfx.rlc.cs_data;
825
826         if (cs_data) {
827                 /* clear state block */
828                 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
829                 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
830                                               AMDGPU_GEM_DOMAIN_VRAM,
831                                               &adev->gfx.rlc.clear_state_obj,
832                                               &adev->gfx.rlc.clear_state_gpu_addr,
833                                               (void **)&adev->gfx.rlc.cs_ptr);
834                 if (r) {
835                         dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
836                                 r);
837                         gfx_v9_0_rlc_fini(adev);
838                         return r;
839                 }
840                 /* set up the cs buffer */
841                 dst_ptr = adev->gfx.rlc.cs_ptr;
842                 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
843                 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
844                 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
845         }
846
847         if (adev->asic_type == CHIP_RAVEN) {
848                 /* TODO: double check the cp_table_size for RV */
849                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
850                 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
851                                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
852                                               &adev->gfx.rlc.cp_table_obj,
853                                               &adev->gfx.rlc.cp_table_gpu_addr,
854                                               (void **)&adev->gfx.rlc.cp_table_ptr);
855                 if (r) {
856                         dev_err(adev->dev,
857                                 "(%d) failed to create cp table bo\n", r);
858                         gfx_v9_0_rlc_fini(adev);
859                         return r;
860                 }
861
862                 rv_init_cp_jump_table(adev);
863                 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
864                 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
865
866                 gfx_v9_0_init_lbpw(adev);
867         }
868
869         return 0;
870 }
871
872 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
873 {
874         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
875         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
876 }
877
878 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
879 {
880         int r;
881         u32 *hpd;
882         const __le32 *fw_data;
883         unsigned fw_size;
884         u32 *fw;
885         size_t mec_hpd_size;
886
887         const struct gfx_firmware_header_v1_0 *mec_hdr;
888
889         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
890
891         /* take ownership of the relevant compute queues */
892         amdgpu_gfx_compute_queue_acquire(adev);
893         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
894
895         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
896                                       AMDGPU_GEM_DOMAIN_GTT,
897                                       &adev->gfx.mec.hpd_eop_obj,
898                                       &adev->gfx.mec.hpd_eop_gpu_addr,
899                                       (void **)&hpd);
900         if (r) {
901                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
902                 gfx_v9_0_mec_fini(adev);
903                 return r;
904         }
905
906         memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
907
908         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
909         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
910
911         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
912
913         fw_data = (const __le32 *)
914                 (adev->gfx.mec_fw->data +
915                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
916         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
917
918         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
919                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
920                                       &adev->gfx.mec.mec_fw_obj,
921                                       &adev->gfx.mec.mec_fw_gpu_addr,
922                                       (void **)&fw);
923         if (r) {
924                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
925                 gfx_v9_0_mec_fini(adev);
926                 return r;
927         }
928
929         memcpy(fw, fw_data, fw_size);
930
931         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
932         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
933
934         return 0;
935 }
936
937 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
938 {
939         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
940                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
941                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
942                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
943                 (SQ_IND_INDEX__FORCE_READ_MASK));
944         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
945 }
946
947 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
948                            uint32_t wave, uint32_t thread,
949                            uint32_t regno, uint32_t num, uint32_t *out)
950 {
951         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
952                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
953                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
954                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
955                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
956                 (SQ_IND_INDEX__FORCE_READ_MASK) |
957                 (SQ_IND_INDEX__AUTO_INCR_MASK));
958         while (num--)
959                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
960 }
961
962 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
963 {
964         /* type 1 wave data */
965         dst[(*no_fields)++] = 1;
966         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
967         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
968         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
969         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
970         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
971         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
972         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
973         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
974         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
975         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
976         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
977         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
978         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
979         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
980 }
981
982 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
983                                      uint32_t wave, uint32_t start,
984                                      uint32_t size, uint32_t *dst)
985 {
986         wave_read_regs(
987                 adev, simd, wave, 0,
988                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
989 }
990
991 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
992                                      uint32_t wave, uint32_t thread,
993                                      uint32_t start, uint32_t size,
994                                      uint32_t *dst)
995 {
996         wave_read_regs(
997                 adev, simd, wave, thread,
998                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
999 }
1000
1001 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1002                                   u32 me, u32 pipe, u32 q)
1003 {
1004         soc15_grbm_select(adev, me, pipe, q, 0);
1005 }
1006
1007 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1008         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1009         .select_se_sh = &gfx_v9_0_select_se_sh,
1010         .read_wave_data = &gfx_v9_0_read_wave_data,
1011         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1012         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1013         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1014 };
1015
1016 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1017 {
1018         u32 gb_addr_config;
1019
1020         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1021
1022         switch (adev->asic_type) {
1023         case CHIP_VEGA10:
1024                 adev->gfx.config.max_hw_contexts = 8;
1025                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1026                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1027                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1028                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1029                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1030                 break;
1031         case CHIP_VEGA12:
1032                 adev->gfx.config.max_hw_contexts = 8;
1033                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1034                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1035                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1036                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1037                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1038                 DRM_INFO("fix gfx.config for vega12\n");
1039                 break;
1040         case CHIP_RAVEN:
1041                 adev->gfx.config.max_hw_contexts = 8;
1042                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1043                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1044                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1045                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1046                 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1047                 break;
1048         default:
1049                 BUG();
1050                 break;
1051         }
1052
1053         adev->gfx.config.gb_addr_config = gb_addr_config;
1054
1055         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1056                         REG_GET_FIELD(
1057                                         adev->gfx.config.gb_addr_config,
1058                                         GB_ADDR_CONFIG,
1059                                         NUM_PIPES);
1060
1061         adev->gfx.config.max_tile_pipes =
1062                 adev->gfx.config.gb_addr_config_fields.num_pipes;
1063
1064         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1065                         REG_GET_FIELD(
1066                                         adev->gfx.config.gb_addr_config,
1067                                         GB_ADDR_CONFIG,
1068                                         NUM_BANKS);
1069         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1070                         REG_GET_FIELD(
1071                                         adev->gfx.config.gb_addr_config,
1072                                         GB_ADDR_CONFIG,
1073                                         MAX_COMPRESSED_FRAGS);
1074         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1075                         REG_GET_FIELD(
1076                                         adev->gfx.config.gb_addr_config,
1077                                         GB_ADDR_CONFIG,
1078                                         NUM_RB_PER_SE);
1079         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1080                         REG_GET_FIELD(
1081                                         adev->gfx.config.gb_addr_config,
1082                                         GB_ADDR_CONFIG,
1083                                         NUM_SHADER_ENGINES);
1084         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1085                         REG_GET_FIELD(
1086                                         adev->gfx.config.gb_addr_config,
1087                                         GB_ADDR_CONFIG,
1088                                         PIPE_INTERLEAVE_SIZE));
1089 }
1090
1091 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1092                                    struct amdgpu_ngg_buf *ngg_buf,
1093                                    int size_se,
1094                                    int default_size_se)
1095 {
1096         int r;
1097
1098         if (size_se < 0) {
1099                 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1100                 return -EINVAL;
1101         }
1102         size_se = size_se ? size_se : default_size_se;
1103
1104         ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1105         r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1106                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1107                                     &ngg_buf->bo,
1108                                     &ngg_buf->gpu_addr,
1109                                     NULL);
1110         if (r) {
1111                 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1112                 return r;
1113         }
1114         ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1115
1116         return r;
1117 }
1118
1119 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1120 {
1121         int i;
1122
1123         for (i = 0; i < NGG_BUF_MAX; i++)
1124                 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1125                                       &adev->gfx.ngg.buf[i].gpu_addr,
1126                                       NULL);
1127
1128         memset(&adev->gfx.ngg.buf[0], 0,
1129                         sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1130
1131         adev->gfx.ngg.init = false;
1132
1133         return 0;
1134 }
1135
1136 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1137 {
1138         int r;
1139
1140         if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1141                 return 0;
1142
1143         /* GDS reserve memory: 64 bytes alignment */
1144         adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1145         adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1146         adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1147         adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1148         adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1149
1150         /* Primitive Buffer */
1151         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1152                                     amdgpu_prim_buf_per_se,
1153                                     64 * 1024);
1154         if (r) {
1155                 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1156                 goto err;
1157         }
1158
1159         /* Position Buffer */
1160         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1161                                     amdgpu_pos_buf_per_se,
1162                                     256 * 1024);
1163         if (r) {
1164                 dev_err(adev->dev, "Failed to create Position Buffer\n");
1165                 goto err;
1166         }
1167
1168         /* Control Sideband */
1169         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1170                                     amdgpu_cntl_sb_buf_per_se,
1171                                     256);
1172         if (r) {
1173                 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1174                 goto err;
1175         }
1176
1177         /* Parameter Cache, not created by default */
1178         if (amdgpu_param_buf_per_se <= 0)
1179                 goto out;
1180
1181         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1182                                     amdgpu_param_buf_per_se,
1183                                     512 * 1024);
1184         if (r) {
1185                 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1186                 goto err;
1187         }
1188
1189 out:
1190         adev->gfx.ngg.init = true;
1191         return 0;
1192 err:
1193         gfx_v9_0_ngg_fini(adev);
1194         return r;
1195 }
1196
1197 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1198 {
1199         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1200         int r;
1201         u32 data, base;
1202
1203         if (!amdgpu_ngg)
1204                 return 0;
1205
1206         /* Program buffer size */
1207         data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1208                              adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1209         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1210                              adev->gfx.ngg.buf[NGG_POS].size >> 8);
1211         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1212
1213         data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1214                              adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1215         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1216                              adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1217         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1218
1219         /* Program buffer base address */
1220         base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1221         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1222         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1223
1224         base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1225         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1226         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1227
1228         base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1229         data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1230         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1231
1232         base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1233         data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1234         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1235
1236         base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1237         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1238         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1239
1240         base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1241         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1242         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1243
1244         /* Clear GDS reserved memory */
1245         r = amdgpu_ring_alloc(ring, 17);
1246         if (r) {
1247                 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1248                           ring->idx, r);
1249                 return r;
1250         }
1251
1252         gfx_v9_0_write_data_to_reg(ring, 0, false,
1253                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1254                                    (adev->gds.mem.total_size +
1255                                     adev->gfx.ngg.gds_reserve_size) >>
1256                                    AMDGPU_GDS_SHIFT);
1257
1258         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1259         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1260                                 PACKET3_DMA_DATA_DST_SEL(1) |
1261                                 PACKET3_DMA_DATA_SRC_SEL(2)));
1262         amdgpu_ring_write(ring, 0);
1263         amdgpu_ring_write(ring, 0);
1264         amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1265         amdgpu_ring_write(ring, 0);
1266         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1267                                 adev->gfx.ngg.gds_reserve_size);
1268
1269         gfx_v9_0_write_data_to_reg(ring, 0, false,
1270                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1271
1272         amdgpu_ring_commit(ring);
1273
1274         return 0;
1275 }
1276
1277 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1278                                       int mec, int pipe, int queue)
1279 {
1280         int r;
1281         unsigned irq_type;
1282         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1283
1284         ring = &adev->gfx.compute_ring[ring_id];
1285
1286         /* mec0 is me1 */
1287         ring->me = mec + 1;
1288         ring->pipe = pipe;
1289         ring->queue = queue;
1290
1291         ring->ring_obj = NULL;
1292         ring->use_doorbell = true;
1293         ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1294         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1295                                 + (ring_id * GFX9_MEC_HPD_SIZE);
1296         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1297
1298         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1299                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1300                 + ring->pipe;
1301
1302         /* type-2 packets are deprecated on MEC, use type-3 instead */
1303         r = amdgpu_ring_init(adev, ring, 1024,
1304                              &adev->gfx.eop_irq, irq_type);
1305         if (r)
1306                 return r;
1307
1308
1309         return 0;
1310 }
1311
1312 static int gfx_v9_0_sw_init(void *handle)
1313 {
1314         int i, j, k, r, ring_id;
1315         struct amdgpu_ring *ring;
1316         struct amdgpu_kiq *kiq;
1317         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318
1319         switch (adev->asic_type) {
1320         case CHIP_VEGA10:
1321         case CHIP_VEGA12:
1322         case CHIP_RAVEN:
1323                 adev->gfx.mec.num_mec = 2;
1324                 break;
1325         default:
1326                 adev->gfx.mec.num_mec = 1;
1327                 break;
1328         }
1329
1330         adev->gfx.mec.num_pipe_per_mec = 4;
1331         adev->gfx.mec.num_queue_per_pipe = 8;
1332
1333         /* KIQ event */
1334         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1335         if (r)
1336                 return r;
1337
1338         /* EOP Event */
1339         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1340         if (r)
1341                 return r;
1342
1343         /* Privileged reg */
1344         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
1345                               &adev->gfx.priv_reg_irq);
1346         if (r)
1347                 return r;
1348
1349         /* Privileged inst */
1350         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
1351                               &adev->gfx.priv_inst_irq);
1352         if (r)
1353                 return r;
1354
1355         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1356
1357         gfx_v9_0_scratch_init(adev);
1358
1359         r = gfx_v9_0_init_microcode(adev);
1360         if (r) {
1361                 DRM_ERROR("Failed to load gfx firmware!\n");
1362                 return r;
1363         }
1364
1365         r = gfx_v9_0_rlc_init(adev);
1366         if (r) {
1367                 DRM_ERROR("Failed to init rlc BOs!\n");
1368                 return r;
1369         }
1370
1371         r = gfx_v9_0_mec_init(adev);
1372         if (r) {
1373                 DRM_ERROR("Failed to init MEC BOs!\n");
1374                 return r;
1375         }
1376
1377         /* set up the gfx ring */
1378         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1379                 ring = &adev->gfx.gfx_ring[i];
1380                 ring->ring_obj = NULL;
1381                 if (!i)
1382                         sprintf(ring->name, "gfx");
1383                 else
1384                         sprintf(ring->name, "gfx_%d", i);
1385                 ring->use_doorbell = true;
1386                 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1387                 r = amdgpu_ring_init(adev, ring, 1024,
1388                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1389                 if (r)
1390                         return r;
1391         }
1392
1393         /* set up the compute queues - allocate horizontally across pipes */
1394         ring_id = 0;
1395         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1396                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1397                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1398                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1399                                         continue;
1400
1401                                 r = gfx_v9_0_compute_ring_init(adev,
1402                                                                ring_id,
1403                                                                i, k, j);
1404                                 if (r)
1405                                         return r;
1406
1407                                 ring_id++;
1408                         }
1409                 }
1410         }
1411
1412         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1413         if (r) {
1414                 DRM_ERROR("Failed to init KIQ BOs!\n");
1415                 return r;
1416         }
1417
1418         kiq = &adev->gfx.kiq;
1419         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1420         if (r)
1421                 return r;
1422
1423         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1424         r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1425         if (r)
1426                 return r;
1427
1428         /* reserve GDS, GWS and OA resource for gfx */
1429         r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1430                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1431                                     &adev->gds.gds_gfx_bo, NULL, NULL);
1432         if (r)
1433                 return r;
1434
1435         r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1436                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1437                                     &adev->gds.gws_gfx_bo, NULL, NULL);
1438         if (r)
1439                 return r;
1440
1441         r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1442                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1443                                     &adev->gds.oa_gfx_bo, NULL, NULL);
1444         if (r)
1445                 return r;
1446
1447         adev->gfx.ce_ram_size = 0x8000;
1448
1449         gfx_v9_0_gpu_early_init(adev);
1450
1451         r = gfx_v9_0_ngg_init(adev);
1452         if (r)
1453                 return r;
1454
1455         return 0;
1456 }
1457
1458
1459 static int gfx_v9_0_sw_fini(void *handle)
1460 {
1461         int i;
1462         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1463
1464         amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1465         amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1466         amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1467
1468         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1469                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1470         for (i = 0; i < adev->gfx.num_compute_rings; i++)
1471                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1472
1473         amdgpu_gfx_compute_mqd_sw_fini(adev);
1474         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1475         amdgpu_gfx_kiq_fini(adev);
1476
1477         gfx_v9_0_mec_fini(adev);
1478         gfx_v9_0_ngg_fini(adev);
1479         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1480                                 &adev->gfx.rlc.clear_state_gpu_addr,
1481                                 (void **)&adev->gfx.rlc.cs_ptr);
1482         if (adev->asic_type == CHIP_RAVEN) {
1483                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1484                                 &adev->gfx.rlc.cp_table_gpu_addr,
1485                                 (void **)&adev->gfx.rlc.cp_table_ptr);
1486         }
1487         gfx_v9_0_free_microcode(adev);
1488
1489         return 0;
1490 }
1491
1492
1493 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1494 {
1495         /* TODO */
1496 }
1497
1498 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1499 {
1500         u32 data;
1501
1502         if (instance == 0xffffffff)
1503                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1504         else
1505                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1506
1507         if (se_num == 0xffffffff)
1508                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1509         else
1510                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1511
1512         if (sh_num == 0xffffffff)
1513                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1514         else
1515                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1516
1517         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1518 }
1519
1520 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1521 {
1522         u32 data, mask;
1523
1524         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1525         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1526
1527         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1528         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1529
1530         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1531                                          adev->gfx.config.max_sh_per_se);
1532
1533         return (~data) & mask;
1534 }
1535
1536 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1537 {
1538         int i, j;
1539         u32 data;
1540         u32 active_rbs = 0;
1541         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1542                                         adev->gfx.config.max_sh_per_se;
1543
1544         mutex_lock(&adev->grbm_idx_mutex);
1545         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1546                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1547                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1548                         data = gfx_v9_0_get_rb_active_bitmap(adev);
1549                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1550                                                rb_bitmap_width_per_sh);
1551                 }
1552         }
1553         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1554         mutex_unlock(&adev->grbm_idx_mutex);
1555
1556         adev->gfx.config.backend_enable_mask = active_rbs;
1557         adev->gfx.config.num_rbs = hweight32(active_rbs);
1558 }
1559
1560 #define DEFAULT_SH_MEM_BASES    (0x6000)
1561 #define FIRST_COMPUTE_VMID      (8)
1562 #define LAST_COMPUTE_VMID       (16)
1563 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1564 {
1565         int i;
1566         uint32_t sh_mem_config;
1567         uint32_t sh_mem_bases;
1568
1569         /*
1570          * Configure apertures:
1571          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1572          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1573          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1574          */
1575         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1576
1577         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1578                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1579                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1580
1581         mutex_lock(&adev->srbm_mutex);
1582         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1583                 soc15_grbm_select(adev, 0, 0, 0, i);
1584                 /* CP and shaders */
1585                 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1586                 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1587         }
1588         soc15_grbm_select(adev, 0, 0, 0, 0);
1589         mutex_unlock(&adev->srbm_mutex);
1590 }
1591
1592 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1593 {
1594         u32 tmp;
1595         int i;
1596
1597         WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1598
1599         gfx_v9_0_tiling_mode_table_init(adev);
1600
1601         gfx_v9_0_setup_rb(adev);
1602         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1603         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1604
1605         /* XXX SH_MEM regs */
1606         /* where to put LDS, scratch, GPUVM in FSA64 space */
1607         mutex_lock(&adev->srbm_mutex);
1608         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1609                 soc15_grbm_select(adev, 0, 0, 0, i);
1610                 /* CP and shaders */
1611                 if (i == 0) {
1612                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1613                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1614                         WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1615                         WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1616                 } else {
1617                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1618                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1619                         WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1620                         tmp = adev->gmc.shared_aperture_start >> 48;
1621                         WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1622                 }
1623         }
1624         soc15_grbm_select(adev, 0, 0, 0, 0);
1625
1626         mutex_unlock(&adev->srbm_mutex);
1627
1628         gfx_v9_0_init_compute_vmid(adev);
1629
1630         mutex_lock(&adev->grbm_idx_mutex);
1631         /*
1632          * making sure that the following register writes will be broadcasted
1633          * to all the shaders
1634          */
1635         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1636
1637         WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1638                    (adev->gfx.config.sc_prim_fifo_size_frontend <<
1639                         PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1640                    (adev->gfx.config.sc_prim_fifo_size_backend <<
1641                         PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1642                    (adev->gfx.config.sc_hiz_tile_fifo_size <<
1643                         PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1644                    (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1645                         PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1646         mutex_unlock(&adev->grbm_idx_mutex);
1647
1648 }
1649
1650 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1651 {
1652         u32 i, j, k;
1653         u32 mask;
1654
1655         mutex_lock(&adev->grbm_idx_mutex);
1656         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1657                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1658                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1659                         for (k = 0; k < adev->usec_timeout; k++) {
1660                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1661                                         break;
1662                                 udelay(1);
1663                         }
1664                         if (k == adev->usec_timeout) {
1665                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1666                                                       0xffffffff, 0xffffffff);
1667                                 mutex_unlock(&adev->grbm_idx_mutex);
1668                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1669                                          i, j);
1670                                 return;
1671                         }
1672                 }
1673         }
1674         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1675         mutex_unlock(&adev->grbm_idx_mutex);
1676
1677         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1678                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1679                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1680                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1681         for (k = 0; k < adev->usec_timeout; k++) {
1682                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1683                         break;
1684                 udelay(1);
1685         }
1686 }
1687
1688 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1689                                                bool enable)
1690 {
1691         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1692
1693         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1694         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1695         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1696         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1697
1698         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1699 }
1700
1701 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1702 {
1703         /* csib */
1704         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1705                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
1706         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1707                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1708         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1709                         adev->gfx.rlc.clear_state_size);
1710 }
1711
1712 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
1713                                 int indirect_offset,
1714                                 int list_size,
1715                                 int *unique_indirect_regs,
1716                                 int *unique_indirect_reg_count,
1717                                 int max_indirect_reg_count,
1718                                 int *indirect_start_offsets,
1719                                 int *indirect_start_offsets_count,
1720                                 int max_indirect_start_offsets_count)
1721 {
1722         int idx;
1723         bool new_entry = true;
1724
1725         for (; indirect_offset < list_size; indirect_offset++) {
1726
1727                 if (new_entry) {
1728                         new_entry = false;
1729                         indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1730                         *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1731                         BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1732                 }
1733
1734                 if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
1735                         new_entry = true;
1736                         continue;
1737                 }
1738
1739                 indirect_offset += 2;
1740
1741                 /* look for the matching indice */
1742                 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1743                         if (unique_indirect_regs[idx] ==
1744                                 register_list_format[indirect_offset])
1745                                 break;
1746                 }
1747
1748                 if (idx >= *unique_indirect_reg_count) {
1749                         unique_indirect_regs[*unique_indirect_reg_count] =
1750                                 register_list_format[indirect_offset];
1751                         idx = *unique_indirect_reg_count;
1752                         *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1753                         BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1754                 }
1755
1756                 register_list_format[indirect_offset] = idx;
1757         }
1758 }
1759
1760 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1761 {
1762         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1763         int unique_indirect_reg_count = 0;
1764
1765         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1766         int indirect_start_offsets_count = 0;
1767
1768         int list_size = 0;
1769         int i = 0;
1770         u32 tmp = 0;
1771
1772         u32 *register_list_format =
1773                 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1774         if (!register_list_format)
1775                 return -ENOMEM;
1776         memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1777                 adev->gfx.rlc.reg_list_format_size_bytes);
1778
1779         /* setup unique_indirect_regs array and indirect_start_offsets array */
1780         gfx_v9_0_parse_ind_reg_list(register_list_format,
1781                                 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
1782                                 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1783                                 unique_indirect_regs,
1784                                 &unique_indirect_reg_count,
1785                                 ARRAY_SIZE(unique_indirect_regs),
1786                                 indirect_start_offsets,
1787                                 &indirect_start_offsets_count,
1788                                 ARRAY_SIZE(indirect_start_offsets));
1789
1790         /* enable auto inc in case it is disabled */
1791         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1792         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1793         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1794
1795         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1796         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1797                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1798         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1799                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1800                         adev->gfx.rlc.register_restore[i]);
1801
1802         /* load direct register */
1803         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1804         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1805                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1806                         adev->gfx.rlc.register_restore[i]);
1807
1808         /* load indirect register */
1809         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1810                 adev->gfx.rlc.reg_list_format_start);
1811         for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
1812                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1813                         register_list_format[i]);
1814
1815         /* set save/restore list size */
1816         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1817         list_size = list_size >> 1;
1818         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1819                 adev->gfx.rlc.reg_restore_list_size);
1820         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1821
1822         /* write the starting offsets to RLC scratch ram */
1823         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1824                 adev->gfx.rlc.starting_offsets_start);
1825         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
1826                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1827                         indirect_start_offsets[i]);
1828
1829         /* load unique indirect regs*/
1830         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
1831                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1832                         unique_indirect_regs[i] & 0x3FFFF);
1833                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
1834                         unique_indirect_regs[i] >> 20);
1835         }
1836
1837         kfree(register_list_format);
1838         return 0;
1839 }
1840
1841 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
1842 {
1843         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
1844 }
1845
1846 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
1847                                              bool enable)
1848 {
1849         uint32_t data = 0;
1850         uint32_t default_data = 0;
1851
1852         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
1853         if (enable == true) {
1854                 /* enable GFXIP control over CGPG */
1855                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1856                 if(default_data != data)
1857                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1858
1859                 /* update status */
1860                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
1861                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
1862                 if(default_data != data)
1863                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1864         } else {
1865                 /* restore GFXIP control over GCPG */
1866                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1867                 if(default_data != data)
1868                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1869         }
1870 }
1871
1872 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
1873 {
1874         uint32_t data = 0;
1875
1876         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1877                               AMD_PG_SUPPORT_GFX_SMG |
1878                               AMD_PG_SUPPORT_GFX_DMG)) {
1879                 /* init IDLE_POLL_COUNT = 60 */
1880                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
1881                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
1882                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
1883                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
1884
1885                 /* init RLC PG Delay */
1886                 data = 0;
1887                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
1888                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
1889                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
1890                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
1891                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
1892
1893                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
1894                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
1895                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
1896                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
1897
1898                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
1899                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
1900                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
1901                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
1902
1903                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
1904                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
1905
1906                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1907                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
1908                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
1909
1910                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
1911         }
1912 }
1913
1914 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
1915                                                 bool enable)
1916 {
1917         uint32_t data = 0;
1918         uint32_t default_data = 0;
1919
1920         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1921         data = REG_SET_FIELD(data, RLC_PG_CNTL,
1922                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
1923                              enable ? 1 : 0);
1924         if (default_data != data)
1925                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1926 }
1927
1928 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
1929                                                 bool enable)
1930 {
1931         uint32_t data = 0;
1932         uint32_t default_data = 0;
1933
1934         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1935         data = REG_SET_FIELD(data, RLC_PG_CNTL,
1936                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
1937                              enable ? 1 : 0);
1938         if(default_data != data)
1939                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1940 }
1941
1942 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
1943                                         bool enable)
1944 {
1945         uint32_t data = 0;
1946         uint32_t default_data = 0;
1947
1948         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1949         data = REG_SET_FIELD(data, RLC_PG_CNTL,
1950                              CP_PG_DISABLE,
1951                              enable ? 0 : 1);
1952         if(default_data != data)
1953                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1954 }
1955
1956 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
1957                                                 bool enable)
1958 {
1959         uint32_t data, default_data;
1960
1961         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1962         data = REG_SET_FIELD(data, RLC_PG_CNTL,
1963                              GFX_POWER_GATING_ENABLE,
1964                              enable ? 1 : 0);
1965         if(default_data != data)
1966                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1967 }
1968
1969 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
1970                                                 bool enable)
1971 {
1972         uint32_t data, default_data;
1973
1974         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1975         data = REG_SET_FIELD(data, RLC_PG_CNTL,
1976                              GFX_PIPELINE_PG_ENABLE,
1977                              enable ? 1 : 0);
1978         if(default_data != data)
1979                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1980
1981         if (!enable)
1982                 /* read any GFX register to wake up GFX */
1983                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
1984 }
1985
1986 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
1987                                                        bool enable)
1988 {
1989         uint32_t data, default_data;
1990
1991         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1992         data = REG_SET_FIELD(data, RLC_PG_CNTL,
1993                              STATIC_PER_CU_PG_ENABLE,
1994                              enable ? 1 : 0);
1995         if(default_data != data)
1996                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1997 }
1998
1999 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2000                                                 bool enable)
2001 {
2002         uint32_t data, default_data;
2003
2004         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2005         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2006                              DYN_PER_CU_PG_ENABLE,
2007                              enable ? 1 : 0);
2008         if(default_data != data)
2009                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2010 }
2011
2012 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2013 {
2014         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2015                               AMD_PG_SUPPORT_GFX_SMG |
2016                               AMD_PG_SUPPORT_GFX_DMG |
2017                               AMD_PG_SUPPORT_CP |
2018                               AMD_PG_SUPPORT_GDS |
2019                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2020                 gfx_v9_0_init_csb(adev);
2021                 gfx_v9_0_init_rlc_save_restore_list(adev);
2022                 gfx_v9_0_enable_save_restore_machine(adev);
2023
2024                 if (adev->asic_type == CHIP_RAVEN) {
2025                         WREG32(mmRLC_JUMP_TABLE_RESTORE,
2026                                 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2027                         gfx_v9_0_init_gfx_power_gating(adev);
2028
2029                         if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
2030                                 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
2031                                 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
2032                         } else {
2033                                 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
2034                                 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
2035                         }
2036
2037                         if (adev->pg_flags & AMD_PG_SUPPORT_CP)
2038                                 gfx_v9_0_enable_cp_power_gating(adev, true);
2039                         else
2040                                 gfx_v9_0_enable_cp_power_gating(adev, false);
2041                 }
2042         }
2043 }
2044
2045 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2046 {
2047         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2048         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2049         gfx_v9_0_wait_for_rlc_serdes(adev);
2050 }
2051
2052 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2053 {
2054         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2055         udelay(50);
2056         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2057         udelay(50);
2058 }
2059
2060 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2061 {
2062 #ifdef AMDGPU_RLC_DEBUG_RETRY
2063         u32 rlc_ucode_ver;
2064 #endif
2065
2066         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2067
2068         /* carrizo do enable cp interrupt after cp inited */
2069         if (!(adev->flags & AMD_IS_APU))
2070                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2071
2072         udelay(50);
2073
2074 #ifdef AMDGPU_RLC_DEBUG_RETRY
2075         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2076         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2077         if(rlc_ucode_ver == 0x108) {
2078                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2079                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2080                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2081                  * default is 0x9C4 to create a 100us interval */
2082                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2083                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2084                  * to disable the page fault retry interrupts, default is
2085                  * 0x100 (256) */
2086                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2087         }
2088 #endif
2089 }
2090
2091 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2092 {
2093         const struct rlc_firmware_header_v2_0 *hdr;
2094         const __le32 *fw_data;
2095         unsigned i, fw_size;
2096
2097         if (!adev->gfx.rlc_fw)
2098                 return -EINVAL;
2099
2100         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2101         amdgpu_ucode_print_rlc_hdr(&hdr->header);
2102
2103         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2104                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2105         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2106
2107         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2108                         RLCG_UCODE_LOADING_START_ADDRESS);
2109         for (i = 0; i < fw_size; i++)
2110                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2111         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2112
2113         return 0;
2114 }
2115
2116 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2117 {
2118         int r;
2119
2120         if (amdgpu_sriov_vf(adev)) {
2121                 gfx_v9_0_init_csb(adev);
2122                 return 0;
2123         }
2124
2125         gfx_v9_0_rlc_stop(adev);
2126
2127         /* disable CG */
2128         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2129
2130         /* disable PG */
2131         WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2132
2133         gfx_v9_0_rlc_reset(adev);
2134
2135         gfx_v9_0_init_pg(adev);
2136
2137         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2138                 /* legacy rlc firmware loading */
2139                 r = gfx_v9_0_rlc_load_microcode(adev);
2140                 if (r)
2141                         return r;
2142         }
2143
2144         if (adev->asic_type == CHIP_RAVEN) {
2145                 if (amdgpu_lbpw != 0)
2146                         gfx_v9_0_enable_lbpw(adev, true);
2147                 else
2148                         gfx_v9_0_enable_lbpw(adev, false);
2149         }
2150
2151         gfx_v9_0_rlc_start(adev);
2152
2153         return 0;
2154 }
2155
2156 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2157 {
2158         int i;
2159         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2160
2161         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2162         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2163         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2164         if (!enable) {
2165                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2166                         adev->gfx.gfx_ring[i].ready = false;
2167         }
2168         WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2169         udelay(50);
2170 }
2171
2172 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2173 {
2174         const struct gfx_firmware_header_v1_0 *pfp_hdr;
2175         const struct gfx_firmware_header_v1_0 *ce_hdr;
2176         const struct gfx_firmware_header_v1_0 *me_hdr;
2177         const __le32 *fw_data;
2178         unsigned i, fw_size;
2179
2180         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2181                 return -EINVAL;
2182
2183         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2184                 adev->gfx.pfp_fw->data;
2185         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2186                 adev->gfx.ce_fw->data;
2187         me_hdr = (const struct gfx_firmware_header_v1_0 *)
2188                 adev->gfx.me_fw->data;
2189
2190         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2191         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2192         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2193
2194         gfx_v9_0_cp_gfx_enable(adev, false);
2195
2196         /* PFP */
2197         fw_data = (const __le32 *)
2198                 (adev->gfx.pfp_fw->data +
2199                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2200         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2201         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2202         for (i = 0; i < fw_size; i++)
2203                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2204         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2205
2206         /* CE */
2207         fw_data = (const __le32 *)
2208                 (adev->gfx.ce_fw->data +
2209                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2210         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2211         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2212         for (i = 0; i < fw_size; i++)
2213                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2214         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2215
2216         /* ME */
2217         fw_data = (const __le32 *)
2218                 (adev->gfx.me_fw->data +
2219                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2220         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2221         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2222         for (i = 0; i < fw_size; i++)
2223                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2224         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2225
2226         return 0;
2227 }
2228
2229 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2230 {
2231         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2232         const struct cs_section_def *sect = NULL;
2233         const struct cs_extent_def *ext = NULL;
2234         int r, i, tmp;
2235
2236         /* init the CP */
2237         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2238         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2239
2240         gfx_v9_0_cp_gfx_enable(adev, true);
2241
2242         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2243         if (r) {
2244                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2245                 return r;
2246         }
2247
2248         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2249         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2250
2251         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2252         amdgpu_ring_write(ring, 0x80000000);
2253         amdgpu_ring_write(ring, 0x80000000);
2254
2255         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2256                 for (ext = sect->section; ext->extent != NULL; ++ext) {
2257                         if (sect->id == SECT_CONTEXT) {
2258                                 amdgpu_ring_write(ring,
2259                                        PACKET3(PACKET3_SET_CONTEXT_REG,
2260                                                ext->reg_count));
2261                                 amdgpu_ring_write(ring,
2262                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2263                                 for (i = 0; i < ext->reg_count; i++)
2264                                         amdgpu_ring_write(ring, ext->extent[i]);
2265                         }
2266                 }
2267         }
2268
2269         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2270         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2271
2272         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2273         amdgpu_ring_write(ring, 0);
2274
2275         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2276         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2277         amdgpu_ring_write(ring, 0x8000);
2278         amdgpu_ring_write(ring, 0x8000);
2279
2280         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2281         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2282                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2283         amdgpu_ring_write(ring, tmp);
2284         amdgpu_ring_write(ring, 0);
2285
2286         amdgpu_ring_commit(ring);
2287
2288         return 0;
2289 }
2290
2291 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2292 {
2293         struct amdgpu_ring *ring;
2294         u32 tmp;
2295         u32 rb_bufsz;
2296         u64 rb_addr, rptr_addr, wptr_gpu_addr;
2297
2298         /* Set the write pointer delay */
2299         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2300
2301         /* set the RB to use vmid 0 */
2302         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2303
2304         /* Set ring buffer size */
2305         ring = &adev->gfx.gfx_ring[0];
2306         rb_bufsz = order_base_2(ring->ring_size / 8);
2307         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2308         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2309 #ifdef __BIG_ENDIAN
2310         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2311 #endif
2312         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2313
2314         /* Initialize the ring buffer's write pointers */
2315         ring->wptr = 0;
2316         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2317         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2318
2319         /* set the wb address wether it's enabled or not */
2320         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2321         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2322         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2323
2324         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2325         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2326         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2327
2328         mdelay(1);
2329         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2330
2331         rb_addr = ring->gpu_addr >> 8;
2332         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2333         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2334
2335         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2336         if (ring->use_doorbell) {
2337                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2338                                     DOORBELL_OFFSET, ring->doorbell_index);
2339                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2340                                     DOORBELL_EN, 1);
2341         } else {
2342                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2343         }
2344         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2345
2346         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2347                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
2348         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2349
2350         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2351                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2352
2353
2354         /* start the ring */
2355         gfx_v9_0_cp_gfx_start(adev);
2356         ring->ready = true;
2357
2358         return 0;
2359 }
2360
2361 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2362 {
2363         int i;
2364
2365         if (enable) {
2366                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2367         } else {
2368                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2369                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2370                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2371                         adev->gfx.compute_ring[i].ready = false;
2372                 adev->gfx.kiq.ring.ready = false;
2373         }
2374         udelay(50);
2375 }
2376
2377 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2378 {
2379         const struct gfx_firmware_header_v1_0 *mec_hdr;
2380         const __le32 *fw_data;
2381         unsigned i;
2382         u32 tmp;
2383
2384         if (!adev->gfx.mec_fw)
2385                 return -EINVAL;
2386
2387         gfx_v9_0_cp_compute_enable(adev, false);
2388
2389         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2390         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2391
2392         fw_data = (const __le32 *)
2393                 (adev->gfx.mec_fw->data +
2394                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2395         tmp = 0;
2396         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2397         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2398         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2399
2400         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2401                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2402         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2403                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2404
2405         /* MEC1 */
2406         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2407                          mec_hdr->jt_offset);
2408         for (i = 0; i < mec_hdr->jt_size; i++)
2409                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2410                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2411
2412         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2413                         adev->gfx.mec_fw_version);
2414         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2415
2416         return 0;
2417 }
2418
2419 /* KIQ functions */
2420 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2421 {
2422         uint32_t tmp;
2423         struct amdgpu_device *adev = ring->adev;
2424
2425         /* tell RLC which is KIQ queue */
2426         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2427         tmp &= 0xffffff00;
2428         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2429         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2430         tmp |= 0x80;
2431         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2432 }
2433
2434 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2435 {
2436         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2437         uint32_t scratch, tmp = 0;
2438         uint64_t queue_mask = 0;
2439         int r, i;
2440
2441         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2442                 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2443                         continue;
2444
2445                 /* This situation may be hit in the future if a new HW
2446                  * generation exposes more than 64 queues. If so, the
2447                  * definition of queue_mask needs updating */
2448                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2449                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2450                         break;
2451                 }
2452
2453                 queue_mask |= (1ull << i);
2454         }
2455
2456         r = amdgpu_gfx_scratch_get(adev, &scratch);
2457         if (r) {
2458                 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2459                 return r;
2460         }
2461         WREG32(scratch, 0xCAFEDEAD);
2462
2463         r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2464         if (r) {
2465                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2466                 amdgpu_gfx_scratch_free(adev, scratch);
2467                 return r;
2468         }
2469
2470         /* set resources */
2471         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2472         amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2473                           PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2474         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2475         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2476         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2477         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2478         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2479         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2480         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2481                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2482                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2483                 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2484
2485                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2486                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2487                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2488                                   PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2489                                   PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2490                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2491                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2492                                   PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2493                                   PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2494                                   PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2495                                   PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2496                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2497                 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2498                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2499                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2500                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2501                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2502         }
2503         /* write to scratch for completion */
2504         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2505         amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2506         amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2507         amdgpu_ring_commit(kiq_ring);
2508
2509         for (i = 0; i < adev->usec_timeout; i++) {
2510                 tmp = RREG32(scratch);
2511                 if (tmp == 0xDEADBEEF)
2512                         break;
2513                 DRM_UDELAY(1);
2514         }
2515         if (i >= adev->usec_timeout) {
2516                 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2517                           scratch, tmp);
2518                 r = -EINVAL;
2519         }
2520         amdgpu_gfx_scratch_free(adev, scratch);
2521
2522         return r;
2523 }
2524
2525 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2526 {
2527         struct amdgpu_device *adev = ring->adev;
2528         struct v9_mqd *mqd = ring->mqd_ptr;
2529         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2530         uint32_t tmp;
2531
2532         mqd->header = 0xC0310800;
2533         mqd->compute_pipelinestat_enable = 0x00000001;
2534         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2535         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2536         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2537         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2538         mqd->compute_misc_reserved = 0x00000003;
2539
2540         mqd->dynamic_cu_mask_addr_lo =
2541                 lower_32_bits(ring->mqd_gpu_addr
2542                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2543         mqd->dynamic_cu_mask_addr_hi =
2544                 upper_32_bits(ring->mqd_gpu_addr
2545                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2546
2547         eop_base_addr = ring->eop_gpu_addr >> 8;
2548         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2549         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2550
2551         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2552         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2553         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2554                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2555
2556         mqd->cp_hqd_eop_control = tmp;
2557
2558         /* enable doorbell? */
2559         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2560
2561         if (ring->use_doorbell) {
2562                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2563                                     DOORBELL_OFFSET, ring->doorbell_index);
2564                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2565                                     DOORBELL_EN, 1);
2566                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2567                                     DOORBELL_SOURCE, 0);
2568                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2569                                     DOORBELL_HIT, 0);
2570         } else {
2571                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2572                                          DOORBELL_EN, 0);
2573         }
2574
2575         mqd->cp_hqd_pq_doorbell_control = tmp;
2576
2577         /* disable the queue if it's active */
2578         ring->wptr = 0;
2579         mqd->cp_hqd_dequeue_request = 0;
2580         mqd->cp_hqd_pq_rptr = 0;
2581         mqd->cp_hqd_pq_wptr_lo = 0;
2582         mqd->cp_hqd_pq_wptr_hi = 0;
2583
2584         /* set the pointer to the MQD */
2585         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2586         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2587
2588         /* set MQD vmid to 0 */
2589         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2590         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2591         mqd->cp_mqd_control = tmp;
2592
2593         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2594         hqd_gpu_addr = ring->gpu_addr >> 8;
2595         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2596         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2597
2598         /* set up the HQD, this is similar to CP_RB0_CNTL */
2599         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2600         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2601                             (order_base_2(ring->ring_size / 4) - 1));
2602         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2603                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2604 #ifdef __BIG_ENDIAN
2605         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2606 #endif
2607         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2608         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2609         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2610         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2611         mqd->cp_hqd_pq_control = tmp;
2612
2613         /* set the wb address whether it's enabled or not */
2614         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2615         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2616         mqd->cp_hqd_pq_rptr_report_addr_hi =
2617                 upper_32_bits(wb_gpu_addr) & 0xffff;
2618
2619         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2620         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2621         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2622         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2623
2624         tmp = 0;
2625         /* enable the doorbell if requested */
2626         if (ring->use_doorbell) {
2627                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2628                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2629                                 DOORBELL_OFFSET, ring->doorbell_index);
2630
2631                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2632                                          DOORBELL_EN, 1);
2633                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2634                                          DOORBELL_SOURCE, 0);
2635                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2636                                          DOORBELL_HIT, 0);
2637         }
2638
2639         mqd->cp_hqd_pq_doorbell_control = tmp;
2640
2641         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2642         ring->wptr = 0;
2643         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2644
2645         /* set the vmid for the queue */
2646         mqd->cp_hqd_vmid = 0;
2647
2648         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2649         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2650         mqd->cp_hqd_persistent_state = tmp;
2651
2652         /* set MIN_IB_AVAIL_SIZE */
2653         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2654         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2655         mqd->cp_hqd_ib_control = tmp;
2656
2657         /* activate the queue */
2658         mqd->cp_hqd_active = 1;
2659
2660         return 0;
2661 }
2662
2663 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2664 {
2665         struct amdgpu_device *adev = ring->adev;
2666         struct v9_mqd *mqd = ring->mqd_ptr;
2667         int j;
2668
2669         /* disable wptr polling */
2670         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2671
2672         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2673                mqd->cp_hqd_eop_base_addr_lo);
2674         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2675                mqd->cp_hqd_eop_base_addr_hi);
2676
2677         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2678         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2679                mqd->cp_hqd_eop_control);
2680
2681         /* enable doorbell? */
2682         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2683                mqd->cp_hqd_pq_doorbell_control);
2684
2685         /* disable the queue if it's active */
2686         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2687                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2688                 for (j = 0; j < adev->usec_timeout; j++) {
2689                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2690                                 break;
2691                         udelay(1);
2692                 }
2693                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2694                        mqd->cp_hqd_dequeue_request);
2695                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2696                        mqd->cp_hqd_pq_rptr);
2697                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2698                        mqd->cp_hqd_pq_wptr_lo);
2699                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2700                        mqd->cp_hqd_pq_wptr_hi);
2701         }
2702
2703         /* set the pointer to the MQD */
2704         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2705                mqd->cp_mqd_base_addr_lo);
2706         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2707                mqd->cp_mqd_base_addr_hi);
2708
2709         /* set MQD vmid to 0 */
2710         WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2711                mqd->cp_mqd_control);
2712
2713         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2714         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2715                mqd->cp_hqd_pq_base_lo);
2716         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2717                mqd->cp_hqd_pq_base_hi);
2718
2719         /* set up the HQD, this is similar to CP_RB0_CNTL */
2720         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2721                mqd->cp_hqd_pq_control);
2722
2723         /* set the wb address whether it's enabled or not */
2724         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2725                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
2726         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2727                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
2728
2729         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2730         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2731                mqd->cp_hqd_pq_wptr_poll_addr_lo);
2732         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2733                mqd->cp_hqd_pq_wptr_poll_addr_hi);
2734
2735         /* enable the doorbell if requested */
2736         if (ring->use_doorbell) {
2737                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2738                                         (AMDGPU_DOORBELL64_KIQ *2) << 2);
2739                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2740                                         (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2741         }
2742
2743         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2744                mqd->cp_hqd_pq_doorbell_control);
2745
2746         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2747         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2748                mqd->cp_hqd_pq_wptr_lo);
2749         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2750                mqd->cp_hqd_pq_wptr_hi);
2751
2752         /* set the vmid for the queue */
2753         WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2754
2755         WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2756                mqd->cp_hqd_persistent_state);
2757
2758         /* activate the queue */
2759         WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2760                mqd->cp_hqd_active);
2761
2762         if (ring->use_doorbell)
2763                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2764
2765         return 0;
2766 }
2767
2768 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
2769 {
2770         struct amdgpu_device *adev = ring->adev;
2771         int j;
2772
2773         /* disable the queue if it's active */
2774         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2775
2776                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2777
2778                 for (j = 0; j < adev->usec_timeout; j++) {
2779                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2780                                 break;
2781                         udelay(1);
2782                 }
2783
2784                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2785                         DRM_DEBUG("KIQ dequeue request failed.\n");
2786
2787                         /* Manual disable if dequeue request times out */
2788                         WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
2789                 }
2790
2791                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2792                       0);
2793         }
2794
2795         WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
2796         WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
2797         WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
2798         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2799         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
2800         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
2801         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
2802         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
2803
2804         return 0;
2805 }
2806
2807 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2808 {
2809         struct amdgpu_device *adev = ring->adev;
2810         struct v9_mqd *mqd = ring->mqd_ptr;
2811         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2812
2813         gfx_v9_0_kiq_setting(ring);
2814
2815         if (adev->in_gpu_reset) { /* for GPU_RESET case */
2816                 /* reset MQD to a clean status */
2817                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2818                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2819
2820                 /* reset ring buffer */
2821                 ring->wptr = 0;
2822                 amdgpu_ring_clear_ring(ring);
2823
2824                 mutex_lock(&adev->srbm_mutex);
2825                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2826                 gfx_v9_0_kiq_init_register(ring);
2827                 soc15_grbm_select(adev, 0, 0, 0, 0);
2828                 mutex_unlock(&adev->srbm_mutex);
2829         } else {
2830                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2831                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2832                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2833                 mutex_lock(&adev->srbm_mutex);
2834                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2835                 gfx_v9_0_mqd_init(ring);
2836                 gfx_v9_0_kiq_init_register(ring);
2837                 soc15_grbm_select(adev, 0, 0, 0, 0);
2838                 mutex_unlock(&adev->srbm_mutex);
2839
2840                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2841                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2842         }
2843
2844         return 0;
2845 }
2846
2847 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2848 {
2849         struct amdgpu_device *adev = ring->adev;
2850         struct v9_mqd *mqd = ring->mqd_ptr;
2851         int mqd_idx = ring - &adev->gfx.compute_ring[0];
2852
2853         if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
2854                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2855                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2856                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2857                 mutex_lock(&adev->srbm_mutex);
2858                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2859                 gfx_v9_0_mqd_init(ring);
2860                 soc15_grbm_select(adev, 0, 0, 0, 0);
2861                 mutex_unlock(&adev->srbm_mutex);
2862
2863                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2864                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2865         } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
2866                 /* reset MQD to a clean status */
2867                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2868                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2869
2870                 /* reset ring buffer */
2871                 ring->wptr = 0;
2872                 amdgpu_ring_clear_ring(ring);
2873         } else {
2874                 amdgpu_ring_clear_ring(ring);
2875         }
2876
2877         return 0;
2878 }
2879
2880 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2881 {
2882         struct amdgpu_ring *ring = NULL;
2883         int r = 0, i;
2884
2885         gfx_v9_0_cp_compute_enable(adev, true);
2886
2887         ring = &adev->gfx.kiq.ring;
2888
2889         r = amdgpu_bo_reserve(ring->mqd_obj, false);
2890         if (unlikely(r != 0))
2891                 goto done;
2892
2893         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2894         if (!r) {
2895                 r = gfx_v9_0_kiq_init_queue(ring);
2896                 amdgpu_bo_kunmap(ring->mqd_obj);
2897                 ring->mqd_ptr = NULL;
2898         }
2899         amdgpu_bo_unreserve(ring->mqd_obj);
2900         if (r)
2901                 goto done;
2902
2903         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2904                 ring = &adev->gfx.compute_ring[i];
2905
2906                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2907                 if (unlikely(r != 0))
2908                         goto done;
2909                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2910                 if (!r) {
2911                         r = gfx_v9_0_kcq_init_queue(ring);
2912                         amdgpu_bo_kunmap(ring->mqd_obj);
2913                         ring->mqd_ptr = NULL;
2914                 }
2915                 amdgpu_bo_unreserve(ring->mqd_obj);
2916                 if (r)
2917                         goto done;
2918         }
2919
2920         r = gfx_v9_0_kiq_kcq_enable(adev);
2921 done:
2922         return r;
2923 }
2924
2925 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2926 {
2927         int r, i;
2928         struct amdgpu_ring *ring;
2929
2930         if (!(adev->flags & AMD_IS_APU))
2931                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2932
2933         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2934                 /* legacy firmware loading */
2935                 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2936                 if (r)
2937                         return r;
2938
2939                 r = gfx_v9_0_cp_compute_load_microcode(adev);
2940                 if (r)
2941                         return r;
2942         }
2943
2944         r = gfx_v9_0_cp_gfx_resume(adev);
2945         if (r)
2946                 return r;
2947
2948         r = gfx_v9_0_kiq_resume(adev);
2949         if (r)
2950                 return r;
2951
2952         ring = &adev->gfx.gfx_ring[0];
2953         r = amdgpu_ring_test_ring(ring);
2954         if (r) {
2955                 ring->ready = false;
2956                 return r;
2957         }
2958
2959         ring = &adev->gfx.kiq.ring;
2960         ring->ready = true;
2961         r = amdgpu_ring_test_ring(ring);
2962         if (r)
2963                 ring->ready = false;
2964
2965         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2966                 ring = &adev->gfx.compute_ring[i];
2967
2968                 ring->ready = true;
2969                 r = amdgpu_ring_test_ring(ring);
2970                 if (r)
2971                         ring->ready = false;
2972         }
2973
2974         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2975
2976         return 0;
2977 }
2978
2979 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2980 {
2981         gfx_v9_0_cp_gfx_enable(adev, enable);
2982         gfx_v9_0_cp_compute_enable(adev, enable);
2983 }
2984
2985 static int gfx_v9_0_hw_init(void *handle)
2986 {
2987         int r;
2988         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2989
2990         gfx_v9_0_init_golden_registers(adev);
2991
2992         gfx_v9_0_gpu_init(adev);
2993
2994         r = gfx_v9_0_rlc_resume(adev);
2995         if (r)
2996                 return r;
2997
2998         r = gfx_v9_0_cp_resume(adev);
2999         if (r)
3000                 return r;
3001
3002         r = gfx_v9_0_ngg_en(adev);
3003         if (r)
3004                 return r;
3005
3006         return r;
3007 }
3008
3009 static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
3010 {
3011         struct amdgpu_device *adev = kiq_ring->adev;
3012         uint32_t scratch, tmp = 0;
3013         int r, i;
3014
3015         r = amdgpu_gfx_scratch_get(adev, &scratch);
3016         if (r) {
3017                 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
3018                 return r;
3019         }
3020         WREG32(scratch, 0xCAFEDEAD);
3021
3022         r = amdgpu_ring_alloc(kiq_ring, 10);
3023         if (r) {
3024                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3025                 amdgpu_gfx_scratch_free(adev, scratch);
3026                 return r;
3027         }
3028
3029         /* unmap queues */
3030         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3031         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3032                                                 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3033                                                 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3034                                                 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3035                                                 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3036         amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3037         amdgpu_ring_write(kiq_ring, 0);
3038         amdgpu_ring_write(kiq_ring, 0);
3039         amdgpu_ring_write(kiq_ring, 0);
3040         /* write to scratch for completion */
3041         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3042         amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
3043         amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
3044         amdgpu_ring_commit(kiq_ring);
3045
3046         for (i = 0; i < adev->usec_timeout; i++) {
3047                 tmp = RREG32(scratch);
3048                 if (tmp == 0xDEADBEEF)
3049                         break;
3050                 DRM_UDELAY(1);
3051         }
3052         if (i >= adev->usec_timeout) {
3053                 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
3054                 r = -EINVAL;
3055         }
3056         amdgpu_gfx_scratch_free(adev, scratch);
3057         return r;
3058 }
3059
3060 static int gfx_v9_0_hw_fini(void *handle)
3061 {
3062         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3063         int i;
3064
3065         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3066         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3067
3068         /* disable KCQ to avoid CPC touch memory not valid anymore */
3069         for (i = 0; i < adev->gfx.num_compute_rings; i++)
3070                 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
3071
3072         if (amdgpu_sriov_vf(adev)) {
3073                 gfx_v9_0_cp_gfx_enable(adev, false);
3074                 /* must disable polling for SRIOV when hw finished, otherwise
3075                  * CPC engine may still keep fetching WB address which is already
3076                  * invalid after sw finished and trigger DMAR reading error in
3077                  * hypervisor side.
3078                  */
3079                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3080                 return 0;
3081         }
3082
3083         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3084          * otherwise KIQ is hanging when binding back
3085          */
3086         if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3087                 mutex_lock(&adev->srbm_mutex);
3088                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3089                                 adev->gfx.kiq.ring.pipe,
3090                                 adev->gfx.kiq.ring.queue, 0);
3091                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3092                 soc15_grbm_select(adev, 0, 0, 0, 0);
3093                 mutex_unlock(&adev->srbm_mutex);
3094         }
3095
3096         gfx_v9_0_cp_enable(adev, false);
3097         gfx_v9_0_rlc_stop(adev);
3098
3099         return 0;
3100 }
3101
3102 static int gfx_v9_0_suspend(void *handle)
3103 {
3104         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3105
3106         adev->gfx.in_suspend = true;
3107         return gfx_v9_0_hw_fini(adev);
3108 }
3109
3110 static int gfx_v9_0_resume(void *handle)
3111 {
3112         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3113         int r;
3114
3115         r = gfx_v9_0_hw_init(adev);
3116         adev->gfx.in_suspend = false;
3117         return r;
3118 }
3119
3120 static bool gfx_v9_0_is_idle(void *handle)
3121 {
3122         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3123
3124         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3125                                 GRBM_STATUS, GUI_ACTIVE))
3126                 return false;
3127         else
3128                 return true;
3129 }
3130
3131 static int gfx_v9_0_wait_for_idle(void *handle)
3132 {
3133         unsigned i;
3134         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3135
3136         for (i = 0; i < adev->usec_timeout; i++) {
3137                 if (gfx_v9_0_is_idle(handle))
3138                         return 0;
3139                 udelay(1);
3140         }
3141         return -ETIMEDOUT;
3142 }
3143
3144 static int gfx_v9_0_soft_reset(void *handle)
3145 {
3146         u32 grbm_soft_reset = 0;
3147         u32 tmp;
3148         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3149
3150         /* GRBM_STATUS */
3151         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3152         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3153                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3154                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3155                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3156                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3157                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3158                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3159                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3160                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3161                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3162         }
3163
3164         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3165                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3166                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3167         }
3168
3169         /* GRBM_STATUS2 */
3170         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3171         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3172                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3173                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3174
3175
3176         if (grbm_soft_reset) {
3177                 /* stop the rlc */
3178                 gfx_v9_0_rlc_stop(adev);
3179
3180                 /* Disable GFX parsing/prefetching */
3181                 gfx_v9_0_cp_gfx_enable(adev, false);
3182
3183                 /* Disable MEC parsing/prefetching */
3184                 gfx_v9_0_cp_compute_enable(adev, false);
3185
3186                 if (grbm_soft_reset) {
3187                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3188                         tmp |= grbm_soft_reset;
3189                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3190                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3191                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3192
3193                         udelay(50);
3194
3195                         tmp &= ~grbm_soft_reset;
3196                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3197                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3198                 }
3199
3200                 /* Wait a little for things to settle down */
3201                 udelay(50);
3202         }
3203         return 0;
3204 }
3205
3206 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3207 {
3208         uint64_t clock;
3209
3210         mutex_lock(&adev->gfx.gpu_clock_mutex);
3211         WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3212         clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3213                 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3214         mutex_unlock(&adev->gfx.gpu_clock_mutex);
3215         return clock;
3216 }
3217
3218 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3219                                           uint32_t vmid,
3220                                           uint32_t gds_base, uint32_t gds_size,
3221                                           uint32_t gws_base, uint32_t gws_size,
3222                                           uint32_t oa_base, uint32_t oa_size)
3223 {
3224         struct amdgpu_device *adev = ring->adev;
3225
3226         gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3227         gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3228
3229         gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3230         gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3231
3232         oa_base = oa_base >> AMDGPU_OA_SHIFT;
3233         oa_size = oa_size >> AMDGPU_OA_SHIFT;
3234
3235         /* GDS Base */
3236         gfx_v9_0_write_data_to_reg(ring, 0, false,
3237                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3238                                    gds_base);
3239
3240         /* GDS Size */
3241         gfx_v9_0_write_data_to_reg(ring, 0, false,
3242                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3243                                    gds_size);
3244
3245         /* GWS */
3246         gfx_v9_0_write_data_to_reg(ring, 0, false,
3247                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3248                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3249
3250         /* OA */
3251         gfx_v9_0_write_data_to_reg(ring, 0, false,
3252                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3253                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
3254 }
3255
3256 static int gfx_v9_0_early_init(void *handle)
3257 {
3258         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3259
3260         adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3261         adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3262         gfx_v9_0_set_ring_funcs(adev);
3263         gfx_v9_0_set_irq_funcs(adev);
3264         gfx_v9_0_set_gds_init(adev);
3265         gfx_v9_0_set_rlc_funcs(adev);
3266
3267         return 0;
3268 }
3269
3270 static int gfx_v9_0_late_init(void *handle)
3271 {
3272         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3273         int r;
3274
3275         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3276         if (r)
3277                 return r;
3278
3279         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3280         if (r)
3281                 return r;
3282
3283         return 0;
3284 }
3285
3286 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3287 {
3288         uint32_t rlc_setting, data;
3289         unsigned i;
3290
3291         if (adev->gfx.rlc.in_safe_mode)
3292                 return;
3293
3294         /* if RLC is not enabled, do nothing */
3295         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3296         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3297                 return;
3298
3299         if (adev->cg_flags &
3300             (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3301              AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3302                 data = RLC_SAFE_MODE__CMD_MASK;
3303                 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3304                 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3305
3306                 /* wait for RLC_SAFE_MODE */
3307                 for (i = 0; i < adev->usec_timeout; i++) {
3308                         if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3309                                 break;
3310                         udelay(1);
3311                 }
3312                 adev->gfx.rlc.in_safe_mode = true;
3313         }
3314 }
3315
3316 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3317 {
3318         uint32_t rlc_setting, data;
3319
3320         if (!adev->gfx.rlc.in_safe_mode)
3321                 return;
3322
3323         /* if RLC is not enabled, do nothing */
3324         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3325         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3326                 return;
3327
3328         if (adev->cg_flags &
3329             (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3330                 /*
3331                  * Try to exit safe mode only if it is already in safe
3332                  * mode.
3333                  */
3334                 data = RLC_SAFE_MODE__CMD_MASK;
3335                 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3336                 adev->gfx.rlc.in_safe_mode = false;
3337         }
3338 }
3339
3340 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3341                                                 bool enable)
3342 {
3343         /* TODO: double check if we need to perform under safe mdoe */
3344         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3345
3346         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3347                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3348                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3349                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3350         } else {
3351                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3352                 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3353         }
3354
3355         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3356 }
3357
3358 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3359                                                 bool enable)
3360 {
3361         /* TODO: double check if we need to perform under safe mode */
3362         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3363
3364         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3365                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3366         else
3367                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3368
3369         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3370                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3371         else
3372                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3373
3374         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3375 }
3376
3377 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3378                                                       bool enable)
3379 {
3380         uint32_t data, def;
3381
3382         /* It is disabled by HW by default */
3383         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3384                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3385                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3386                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3387                           RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3388                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3389                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3390
3391                 /* only for Vega10 & Raven1 */
3392                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3393
3394                 if (def != data)
3395                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3396
3397                 /* MGLS is a global flag to control all MGLS in GFX */
3398                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3399                         /* 2 - RLC memory Light sleep */
3400                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3401                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3402                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3403                                 if (def != data)
3404                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3405                         }
3406                         /* 3 - CP memory Light sleep */
3407                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3408                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3409                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3410                                 if (def != data)
3411                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3412                         }
3413                 }
3414         } else {
3415                 /* 1 - MGCG_OVERRIDE */
3416                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3417                 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3418                          RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3419                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3420                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3421                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3422                 if (def != data)
3423                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3424
3425                 /* 2 - disable MGLS in RLC */
3426                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3427                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3428                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3429                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3430                 }
3431
3432                 /* 3 - disable MGLS in CP */
3433                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3434                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3435                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3436                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3437                 }
3438         }
3439 }
3440
3441 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3442                                            bool enable)
3443 {
3444         uint32_t data, def;
3445
3446         adev->gfx.rlc.funcs->enter_safe_mode(adev);
3447
3448         /* Enable 3D CGCG/CGLS */
3449         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3450                 /* write cmd to clear cgcg/cgls ov */
3451                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3452                 /* unset CGCG override */
3453                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3454                 /* update CGCG and CGLS override bits */
3455                 if (def != data)
3456                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3457                 /* enable 3Dcgcg FSM(0x0020003f) */
3458                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3459                 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3460                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3461                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3462                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3463                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3464                 if (def != data)
3465                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3466
3467                 /* set IDLE_POLL_COUNT(0x00900100) */
3468                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3469                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3470                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3471                 if (def != data)
3472                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3473         } else {
3474                 /* Disable CGCG/CGLS */
3475                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3476                 /* disable cgcg, cgls should be disabled */
3477                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3478                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3479                 /* disable cgcg and cgls in FSM */
3480                 if (def != data)
3481                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3482         }
3483
3484         adev->gfx.rlc.funcs->exit_safe_mode(adev);
3485 }
3486
3487 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3488                                                       bool enable)
3489 {
3490         uint32_t def, data;
3491
3492         adev->gfx.rlc.funcs->enter_safe_mode(adev);
3493
3494         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3495                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3496                 /* unset CGCG override */
3497                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3498                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3499                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3500                 else
3501                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3502                 /* update CGCG and CGLS override bits */
3503                 if (def != data)
3504                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3505
3506                 /* enable cgcg FSM(0x0020003F) */
3507                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3508                 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3509                         RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3510                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3511                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3512                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3513                 if (def != data)
3514                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3515
3516                 /* set IDLE_POLL_COUNT(0x00900100) */
3517                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3518                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3519                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3520                 if (def != data)
3521                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3522         } else {
3523                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3524                 /* reset CGCG/CGLS bits */
3525                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3526                 /* disable cgcg and cgls in FSM */
3527                 if (def != data)
3528                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3529         }
3530
3531         adev->gfx.rlc.funcs->exit_safe_mode(adev);
3532 }
3533
3534 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3535                                             bool enable)
3536 {
3537         if (enable) {
3538                 /* CGCG/CGLS should be enabled after MGCG/MGLS
3539                  * ===  MGCG + MGLS ===
3540                  */
3541                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3542                 /* ===  CGCG /CGLS for GFX 3D Only === */
3543                 gfx_v9_0_update_3d_clock_gating(adev, enable);
3544                 /* ===  CGCG + CGLS === */
3545                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3546         } else {
3547                 /* CGCG/CGLS should be disabled before MGCG/MGLS
3548                  * ===  CGCG + CGLS ===
3549                  */
3550                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3551                 /* ===  CGCG /CGLS for GFX 3D Only === */
3552                 gfx_v9_0_update_3d_clock_gating(adev, enable);
3553                 /* ===  MGCG + MGLS === */
3554                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3555         }
3556         return 0;
3557 }
3558
3559 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3560         .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3561         .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3562 };
3563
3564 static int gfx_v9_0_set_powergating_state(void *handle,
3565                                           enum amd_powergating_state state)
3566 {
3567         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3568         bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3569
3570         switch (adev->asic_type) {
3571         case CHIP_RAVEN:
3572                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3573                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3574                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3575                 } else {
3576                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3577                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3578                 }
3579
3580                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3581                         gfx_v9_0_enable_cp_power_gating(adev, true);
3582                 else
3583                         gfx_v9_0_enable_cp_power_gating(adev, false);
3584
3585                 /* update gfx cgpg state */
3586                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3587
3588                 /* update mgcg state */
3589                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3590                 break;
3591         default:
3592                 break;
3593         }
3594
3595         return 0;
3596 }
3597
3598 static int gfx_v9_0_set_clockgating_state(void *handle,
3599                                           enum amd_clockgating_state state)
3600 {
3601         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3602
3603         if (amdgpu_sriov_vf(adev))
3604                 return 0;
3605
3606         switch (adev->asic_type) {
3607         case CHIP_VEGA10:
3608         case CHIP_VEGA12:
3609         case CHIP_RAVEN:
3610                 gfx_v9_0_update_gfx_clock_gating(adev,
3611                                                  state == AMD_CG_STATE_GATE ? true : false);
3612                 break;
3613         default:
3614                 break;
3615         }
3616         return 0;
3617 }
3618
3619 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3620 {
3621         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3622         int data;
3623
3624         if (amdgpu_sriov_vf(adev))
3625                 *flags = 0;
3626
3627         /* AMD_CG_SUPPORT_GFX_MGCG */
3628         data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3629         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3630                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3631
3632         /* AMD_CG_SUPPORT_GFX_CGCG */
3633         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3634         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3635                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3636
3637         /* AMD_CG_SUPPORT_GFX_CGLS */
3638         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3639                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3640
3641         /* AMD_CG_SUPPORT_GFX_RLC_LS */
3642         data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3643         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3644                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3645
3646         /* AMD_CG_SUPPORT_GFX_CP_LS */
3647         data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3648         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3649                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3650
3651         /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3652         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3653         if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3654                 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3655
3656         /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3657         if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3658                 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3659 }
3660
3661 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3662 {
3663         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3664 }
3665
3666 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3667 {
3668         struct amdgpu_device *adev = ring->adev;
3669         u64 wptr;
3670
3671         /* XXX check if swapping is necessary on BE */
3672         if (ring->use_doorbell) {
3673                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3674         } else {
3675                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3676                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3677         }
3678
3679         return wptr;
3680 }
3681
3682 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3683 {
3684         struct amdgpu_device *adev = ring->adev;
3685
3686         if (ring->use_doorbell) {
3687                 /* XXX check if swapping is necessary on BE */
3688                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3689                 WDOORBELL64(ring->doorbell_index, ring->wptr);
3690         } else {
3691                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3692                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3693         }
3694 }
3695
3696 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3697 {
3698         struct amdgpu_device *adev = ring->adev;
3699         u32 ref_and_mask, reg_mem_engine;
3700         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
3701
3702         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3703                 switch (ring->me) {
3704                 case 1:
3705                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3706                         break;
3707                 case 2:
3708                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3709                         break;
3710                 default:
3711                         return;
3712                 }
3713                 reg_mem_engine = 0;
3714         } else {
3715                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3716                 reg_mem_engine = 1; /* pfp */
3717         }
3718
3719         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3720                               adev->nbio_funcs->get_hdp_flush_req_offset(adev),
3721                               adev->nbio_funcs->get_hdp_flush_done_offset(adev),
3722                               ref_and_mask, ref_and_mask, 0x20);
3723 }
3724
3725 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3726                                       struct amdgpu_ib *ib,
3727                                       unsigned vmid, bool ctx_switch)
3728 {
3729         u32 header, control = 0;
3730
3731         if (ib->flags & AMDGPU_IB_FLAG_CE)
3732                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3733         else
3734                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3735
3736         control |= ib->length_dw | (vmid << 24);
3737
3738         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3739                 control |= INDIRECT_BUFFER_PRE_ENB(1);
3740
3741                 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3742                         gfx_v9_0_ring_emit_de_meta(ring);
3743         }
3744
3745         amdgpu_ring_write(ring, header);
3746 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3747         amdgpu_ring_write(ring,
3748 #ifdef __BIG_ENDIAN
3749                 (2 << 0) |
3750 #endif
3751                 lower_32_bits(ib->gpu_addr));
3752         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3753         amdgpu_ring_write(ring, control);
3754 }
3755
3756 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3757                                           struct amdgpu_ib *ib,
3758                                           unsigned vmid, bool ctx_switch)
3759 {
3760         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3761
3762         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3763         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3764         amdgpu_ring_write(ring,
3765 #ifdef __BIG_ENDIAN
3766                                 (2 << 0) |
3767 #endif
3768                                 lower_32_bits(ib->gpu_addr));
3769         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3770         amdgpu_ring_write(ring, control);
3771 }
3772
3773 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3774                                      u64 seq, unsigned flags)
3775 {
3776         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3777         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3778
3779         /* RELEASE_MEM - flush caches, send int */
3780         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3781         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3782                                  EOP_TC_ACTION_EN |
3783                                  EOP_TC_WB_ACTION_EN |
3784                                  EOP_TC_MD_ACTION_EN |
3785                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3786                                  EVENT_INDEX(5)));
3787         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3788
3789         /*
3790          * the address should be Qword aligned if 64bit write, Dword
3791          * aligned if only send 32bit data low (discard data high)
3792          */
3793         if (write64bit)
3794                 BUG_ON(addr & 0x7);
3795         else
3796                 BUG_ON(addr & 0x3);
3797         amdgpu_ring_write(ring, lower_32_bits(addr));
3798         amdgpu_ring_write(ring, upper_32_bits(addr));
3799         amdgpu_ring_write(ring, lower_32_bits(seq));
3800         amdgpu_ring_write(ring, upper_32_bits(seq));
3801         amdgpu_ring_write(ring, 0);
3802 }
3803
3804 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3805 {
3806         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3807         uint32_t seq = ring->fence_drv.sync_seq;
3808         uint64_t addr = ring->fence_drv.gpu_addr;
3809
3810         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3811                               lower_32_bits(addr), upper_32_bits(addr),
3812                               seq, 0xffffffff, 4);
3813 }
3814
3815 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3816                                         unsigned vmid, uint64_t pd_addr)
3817 {
3818         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
3819
3820         /* compute doesn't have PFP */
3821         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
3822                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3823                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3824                 amdgpu_ring_write(ring, 0x0);
3825         }
3826 }
3827
3828 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3829 {
3830         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3831 }
3832
3833 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3834 {
3835         u64 wptr;
3836
3837         /* XXX check if swapping is necessary on BE */
3838         if (ring->use_doorbell)
3839                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3840         else
3841                 BUG();
3842         return wptr;
3843 }
3844
3845 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
3846                                            bool acquire)
3847 {
3848         struct amdgpu_device *adev = ring->adev;
3849         int pipe_num, tmp, reg;
3850         int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
3851
3852         pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
3853
3854         /* first me only has 2 entries, GFX and HP3D */
3855         if (ring->me > 0)
3856                 pipe_num -= 2;
3857
3858         reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
3859         tmp = RREG32(reg);
3860         tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
3861         WREG32(reg, tmp);
3862 }
3863
3864 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
3865                                             struct amdgpu_ring *ring,
3866                                             bool acquire)
3867 {
3868         int i, pipe;
3869         bool reserve;
3870         struct amdgpu_ring *iring;
3871
3872         mutex_lock(&adev->gfx.pipe_reserve_mutex);
3873         pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
3874         if (acquire)
3875                 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3876         else
3877                 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3878
3879         if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
3880                 /* Clear all reservations - everyone reacquires all resources */
3881                 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
3882                         gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
3883                                                        true);
3884
3885                 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
3886                         gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
3887                                                        true);
3888         } else {
3889                 /* Lower all pipes without a current reservation */
3890                 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
3891                         iring = &adev->gfx.gfx_ring[i];
3892                         pipe = amdgpu_gfx_queue_to_bit(adev,
3893                                                        iring->me,
3894                                                        iring->pipe,
3895                                                        0);
3896                         reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3897                         gfx_v9_0_ring_set_pipe_percent(iring, reserve);
3898                 }
3899
3900                 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
3901                         iring = &adev->gfx.compute_ring[i];
3902                         pipe = amdgpu_gfx_queue_to_bit(adev,
3903                                                        iring->me,
3904                                                        iring->pipe,
3905                                                        0);
3906                         reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
3907                         gfx_v9_0_ring_set_pipe_percent(iring, reserve);
3908                 }
3909         }
3910
3911         mutex_unlock(&adev->gfx.pipe_reserve_mutex);
3912 }
3913
3914 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
3915                                       struct amdgpu_ring *ring,
3916                                       bool acquire)
3917 {
3918         uint32_t pipe_priority = acquire ? 0x2 : 0x0;
3919         uint32_t queue_priority = acquire ? 0xf : 0x0;
3920
3921         mutex_lock(&adev->srbm_mutex);
3922         soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3923
3924         WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
3925         WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
3926
3927         soc15_grbm_select(adev, 0, 0, 0, 0);
3928         mutex_unlock(&adev->srbm_mutex);
3929 }
3930
3931 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
3932                                                enum drm_sched_priority priority)
3933 {
3934         struct amdgpu_device *adev = ring->adev;
3935         bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
3936
3937         if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
3938                 return;
3939
3940         gfx_v9_0_hqd_set_priority(adev, ring, acquire);
3941         gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
3942 }
3943
3944 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3945 {
3946         struct amdgpu_device *adev = ring->adev;
3947
3948         /* XXX check if swapping is necessary on BE */
3949         if (ring->use_doorbell) {
3950                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3951                 WDOORBELL64(ring->doorbell_index, ring->wptr);
3952         } else{
3953                 BUG(); /* only DOORBELL method supported on gfx9 now */
3954         }
3955 }
3956
3957 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3958                                          u64 seq, unsigned int flags)
3959 {
3960         struct amdgpu_device *adev = ring->adev;
3961
3962         /* we only allocate 32bit for each seq wb address */
3963         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3964
3965         /* write fence seq to the "addr" */
3966         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3967         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3968                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3969         amdgpu_ring_write(ring, lower_32_bits(addr));
3970         amdgpu_ring_write(ring, upper_32_bits(addr));
3971         amdgpu_ring_write(ring, lower_32_bits(seq));
3972
3973         if (flags & AMDGPU_FENCE_FLAG_INT) {
3974                 /* set register to trigger INT */
3975                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3976                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3977                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3978                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3979                 amdgpu_ring_write(ring, 0);
3980                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3981         }
3982 }
3983
3984 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3985 {
3986         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3987         amdgpu_ring_write(ring, 0);
3988 }
3989
3990 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3991 {
3992         struct v9_ce_ib_state ce_payload = {0};
3993         uint64_t csa_addr;
3994         int cnt;
3995
3996         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3997         csa_addr = amdgpu_csa_vaddr(ring->adev);
3998
3999         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4000         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4001                                  WRITE_DATA_DST_SEL(8) |
4002                                  WR_CONFIRM) |
4003                                  WRITE_DATA_CACHE_POLICY(0));
4004         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4005         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4006         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4007 }
4008
4009 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4010 {
4011         struct v9_de_ib_state de_payload = {0};
4012         uint64_t csa_addr, gds_addr;
4013         int cnt;
4014
4015         csa_addr = amdgpu_csa_vaddr(ring->adev);
4016         gds_addr = csa_addr + 4096;
4017         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4018         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4019
4020         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4021         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4022         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4023                                  WRITE_DATA_DST_SEL(8) |
4024                                  WR_CONFIRM) |
4025                                  WRITE_DATA_CACHE_POLICY(0));
4026         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4027         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4028         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4029 }
4030
4031 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4032 {
4033         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4034         amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4035 }
4036
4037 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4038 {
4039         uint32_t dw2 = 0;
4040
4041         if (amdgpu_sriov_vf(ring->adev))
4042                 gfx_v9_0_ring_emit_ce_meta(ring);
4043
4044         gfx_v9_0_ring_emit_tmz(ring, true);
4045
4046         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4047         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4048                 /* set load_global_config & load_global_uconfig */
4049                 dw2 |= 0x8001;
4050                 /* set load_cs_sh_regs */
4051                 dw2 |= 0x01000000;
4052                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4053                 dw2 |= 0x10002;
4054
4055                 /* set load_ce_ram if preamble presented */
4056                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4057                         dw2 |= 0x10000000;
4058         } else {
4059                 /* still load_ce_ram if this is the first time preamble presented
4060                  * although there is no context switch happens.
4061                  */
4062                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4063                         dw2 |= 0x10000000;
4064         }
4065
4066         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4067         amdgpu_ring_write(ring, dw2);
4068         amdgpu_ring_write(ring, 0);
4069 }
4070
4071 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4072 {
4073         unsigned ret;
4074         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4075         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4076         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4077         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4078         ret = ring->wptr & ring->buf_mask;
4079         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4080         return ret;
4081 }
4082
4083 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4084 {
4085         unsigned cur;
4086         BUG_ON(offset > ring->buf_mask);
4087         BUG_ON(ring->ring[offset] != 0x55aa55aa);
4088
4089         cur = (ring->wptr & ring->buf_mask) - 1;
4090         if (likely(cur > offset))
4091                 ring->ring[offset] = cur - offset;
4092         else
4093                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4094 }
4095
4096 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4097 {
4098         struct amdgpu_device *adev = ring->adev;
4099
4100         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4101         amdgpu_ring_write(ring, 0 |     /* src: register*/
4102                                 (5 << 8) |      /* dst: memory */
4103                                 (1 << 20));     /* write confirm */
4104         amdgpu_ring_write(ring, reg);
4105         amdgpu_ring_write(ring, 0);
4106         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4107                                 adev->virt.reg_val_offs * 4));
4108         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4109                                 adev->virt.reg_val_offs * 4));
4110 }
4111
4112 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4113                                     uint32_t val)
4114 {
4115         uint32_t cmd = 0;
4116
4117         switch (ring->funcs->type) {
4118         case AMDGPU_RING_TYPE_GFX:
4119                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4120                 break;
4121         case AMDGPU_RING_TYPE_KIQ:
4122                 cmd = (1 << 16); /* no inc addr */
4123                 break;
4124         default:
4125                 cmd = WR_CONFIRM;
4126                 break;
4127         }
4128         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4129         amdgpu_ring_write(ring, cmd);
4130         amdgpu_ring_write(ring, reg);
4131         amdgpu_ring_write(ring, 0);
4132         amdgpu_ring_write(ring, val);
4133 }
4134
4135 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4136                                         uint32_t val, uint32_t mask)
4137 {
4138         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4139 }
4140
4141 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4142                                                   uint32_t reg0, uint32_t reg1,
4143                                                   uint32_t ref, uint32_t mask)
4144 {
4145         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4146
4147         gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20);
4148 }
4149
4150 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4151                                                  enum amdgpu_interrupt_state state)
4152 {
4153         switch (state) {
4154         case AMDGPU_IRQ_STATE_DISABLE:
4155         case AMDGPU_IRQ_STATE_ENABLE:
4156                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4157                                TIME_STAMP_INT_ENABLE,
4158                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4159                 break;
4160         default:
4161                 break;
4162         }
4163 }
4164
4165 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4166                                                      int me, int pipe,
4167                                                      enum amdgpu_interrupt_state state)
4168 {
4169         u32 mec_int_cntl, mec_int_cntl_reg;
4170
4171         /*
4172          * amdgpu controls only the first MEC. That's why this function only
4173          * handles the setting of interrupts for this specific MEC. All other
4174          * pipes' interrupts are set by amdkfd.
4175          */
4176
4177         if (me == 1) {
4178                 switch (pipe) {
4179                 case 0:
4180                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4181                         break;
4182                 case 1:
4183                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4184                         break;
4185                 case 2:
4186                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4187                         break;
4188                 case 3:
4189                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4190                         break;
4191                 default:
4192                         DRM_DEBUG("invalid pipe %d\n", pipe);
4193                         return;
4194                 }
4195         } else {
4196                 DRM_DEBUG("invalid me %d\n", me);
4197                 return;
4198         }
4199
4200         switch (state) {
4201         case AMDGPU_IRQ_STATE_DISABLE:
4202                 mec_int_cntl = RREG32(mec_int_cntl_reg);
4203                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4204                                              TIME_STAMP_INT_ENABLE, 0);
4205                 WREG32(mec_int_cntl_reg, mec_int_cntl);
4206                 break;
4207         case AMDGPU_IRQ_STATE_ENABLE:
4208                 mec_int_cntl = RREG32(mec_int_cntl_reg);
4209                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4210                                              TIME_STAMP_INT_ENABLE, 1);
4211                 WREG32(mec_int_cntl_reg, mec_int_cntl);
4212                 break;
4213         default:
4214                 break;
4215         }
4216 }
4217
4218 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4219                                              struct amdgpu_irq_src *source,
4220                                              unsigned type,
4221                                              enum amdgpu_interrupt_state state)
4222 {
4223         switch (state) {
4224         case AMDGPU_IRQ_STATE_DISABLE:
4225         case AMDGPU_IRQ_STATE_ENABLE:
4226                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4227                                PRIV_REG_INT_ENABLE,
4228                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4229                 break;
4230         default:
4231                 break;
4232         }
4233
4234         return 0;
4235 }
4236
4237 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4238                                               struct amdgpu_irq_src *source,
4239                                               unsigned type,
4240                                               enum amdgpu_interrupt_state state)
4241 {
4242         switch (state) {
4243         case AMDGPU_IRQ_STATE_DISABLE:
4244         case AMDGPU_IRQ_STATE_ENABLE:
4245                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4246                                PRIV_INSTR_INT_ENABLE,
4247                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4248         default:
4249                 break;
4250         }
4251
4252         return 0;
4253 }
4254
4255 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4256                                             struct amdgpu_irq_src *src,
4257                                             unsigned type,
4258                                             enum amdgpu_interrupt_state state)
4259 {
4260         switch (type) {
4261         case AMDGPU_CP_IRQ_GFX_EOP:
4262                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4263                 break;
4264         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4265                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4266                 break;
4267         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4268                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4269                 break;
4270         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4271                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4272                 break;
4273         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4274                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4275                 break;
4276         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4277                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4278                 break;
4279         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4280                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4281                 break;
4282         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4283                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4284                 break;
4285         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4286                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4287                 break;
4288         default:
4289                 break;
4290         }
4291         return 0;
4292 }
4293
4294 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4295                             struct amdgpu_irq_src *source,
4296                             struct amdgpu_iv_entry *entry)
4297 {
4298         int i;
4299         u8 me_id, pipe_id, queue_id;
4300         struct amdgpu_ring *ring;
4301
4302         DRM_DEBUG("IH: CP EOP\n");
4303         me_id = (entry->ring_id & 0x0c) >> 2;
4304         pipe_id = (entry->ring_id & 0x03) >> 0;
4305         queue_id = (entry->ring_id & 0x70) >> 4;
4306
4307         switch (me_id) {
4308         case 0:
4309                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4310                 break;
4311         case 1:
4312         case 2:
4313                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4314                         ring = &adev->gfx.compute_ring[i];
4315                         /* Per-queue interrupt is supported for MEC starting from VI.
4316                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
4317                           */
4318                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4319                                 amdgpu_fence_process(ring);
4320                 }
4321                 break;
4322         }
4323         return 0;
4324 }
4325
4326 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4327                                  struct amdgpu_irq_src *source,
4328                                  struct amdgpu_iv_entry *entry)
4329 {
4330         DRM_ERROR("Illegal register access in command stream\n");
4331         schedule_work(&adev->reset_work);
4332         return 0;
4333 }
4334
4335 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4336                                   struct amdgpu_irq_src *source,
4337                                   struct amdgpu_iv_entry *entry)
4338 {
4339         DRM_ERROR("Illegal instruction in command stream\n");
4340         schedule_work(&adev->reset_work);
4341         return 0;
4342 }
4343
4344 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4345                                             struct amdgpu_irq_src *src,
4346                                             unsigned int type,
4347                                             enum amdgpu_interrupt_state state)
4348 {
4349         uint32_t tmp, target;
4350         struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4351
4352         if (ring->me == 1)
4353                 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4354         else
4355                 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4356         target += ring->pipe;
4357
4358         switch (type) {
4359         case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4360                 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4361                         tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4362                         tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4363                                                  GENERIC2_INT_ENABLE, 0);
4364                         WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4365
4366                         tmp = RREG32(target);
4367                         tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4368                                                  GENERIC2_INT_ENABLE, 0);
4369                         WREG32(target, tmp);
4370                 } else {
4371                         tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4372                         tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4373                                                  GENERIC2_INT_ENABLE, 1);
4374                         WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4375
4376                         tmp = RREG32(target);
4377                         tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4378                                                  GENERIC2_INT_ENABLE, 1);
4379                         WREG32(target, tmp);
4380                 }
4381                 break;
4382         default:
4383                 BUG(); /* kiq only support GENERIC2_INT now */
4384                 break;
4385         }
4386         return 0;
4387 }
4388
4389 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4390                             struct amdgpu_irq_src *source,
4391                             struct amdgpu_iv_entry *entry)
4392 {
4393         u8 me_id, pipe_id, queue_id;
4394         struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4395
4396         me_id = (entry->ring_id & 0x0c) >> 2;
4397         pipe_id = (entry->ring_id & 0x03) >> 0;
4398         queue_id = (entry->ring_id & 0x70) >> 4;
4399         DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4400                    me_id, pipe_id, queue_id);
4401
4402         amdgpu_fence_process(ring);
4403         return 0;
4404 }
4405
4406 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4407         .name = "gfx_v9_0",
4408         .early_init = gfx_v9_0_early_init,
4409         .late_init = gfx_v9_0_late_init,
4410         .sw_init = gfx_v9_0_sw_init,
4411         .sw_fini = gfx_v9_0_sw_fini,
4412         .hw_init = gfx_v9_0_hw_init,
4413         .hw_fini = gfx_v9_0_hw_fini,
4414         .suspend = gfx_v9_0_suspend,
4415         .resume = gfx_v9_0_resume,
4416         .is_idle = gfx_v9_0_is_idle,
4417         .wait_for_idle = gfx_v9_0_wait_for_idle,
4418         .soft_reset = gfx_v9_0_soft_reset,
4419         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4420         .set_powergating_state = gfx_v9_0_set_powergating_state,
4421         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4422 };
4423
4424 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4425         .type = AMDGPU_RING_TYPE_GFX,
4426         .align_mask = 0xff,
4427         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4428         .support_64bit_ptrs = true,
4429         .vmhub = AMDGPU_GFXHUB,
4430         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4431         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4432         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4433         .emit_frame_size = /* totally 242 maximum if 16 IBs */
4434                 5 +  /* COND_EXEC */
4435                 7 +  /* PIPELINE_SYNC */
4436                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4437                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4438                 2 + /* VM_FLUSH */
4439                 8 +  /* FENCE for VM_FLUSH */
4440                 20 + /* GDS switch */
4441                 4 + /* double SWITCH_BUFFER,
4442                        the first COND_EXEC jump to the place just
4443                            prior to this double SWITCH_BUFFER  */
4444                 5 + /* COND_EXEC */
4445                 7 +      /*     HDP_flush */
4446                 4 +      /*     VGT_flush */
4447                 14 + /* CE_META */
4448                 31 + /* DE_META */
4449                 3 + /* CNTX_CTRL */
4450                 5 + /* HDP_INVL */
4451                 8 + 8 + /* FENCE x2 */
4452                 2, /* SWITCH_BUFFER */
4453         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4454         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4455         .emit_fence = gfx_v9_0_ring_emit_fence,
4456         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4457         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4458         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4459         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4460         .test_ring = gfx_v9_0_ring_test_ring,
4461         .test_ib = gfx_v9_0_ring_test_ib,
4462         .insert_nop = amdgpu_ring_insert_nop,
4463         .pad_ib = amdgpu_ring_generic_pad_ib,
4464         .emit_switch_buffer = gfx_v9_ring_emit_sb,
4465         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4466         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4467         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4468         .emit_tmz = gfx_v9_0_ring_emit_tmz,
4469         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4470         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4471         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4472 };
4473
4474 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4475         .type = AMDGPU_RING_TYPE_COMPUTE,
4476         .align_mask = 0xff,
4477         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4478         .support_64bit_ptrs = true,
4479         .vmhub = AMDGPU_GFXHUB,
4480         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4481         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4482         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4483         .emit_frame_size =
4484                 20 + /* gfx_v9_0_ring_emit_gds_switch */
4485                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4486                 5 + /* hdp invalidate */
4487                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4488                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4489                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4490                 2 + /* gfx_v9_0_ring_emit_vm_flush */
4491                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4492         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4493         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4494         .emit_fence = gfx_v9_0_ring_emit_fence,
4495         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4496         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4497         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4498         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4499         .test_ring = gfx_v9_0_ring_test_ring,
4500         .test_ib = gfx_v9_0_ring_test_ib,
4501         .insert_nop = amdgpu_ring_insert_nop,
4502         .pad_ib = amdgpu_ring_generic_pad_ib,
4503         .set_priority = gfx_v9_0_ring_set_priority_compute,
4504         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4505         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4506         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4507 };
4508
4509 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4510         .type = AMDGPU_RING_TYPE_KIQ,
4511         .align_mask = 0xff,
4512         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4513         .support_64bit_ptrs = true,
4514         .vmhub = AMDGPU_GFXHUB,
4515         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4516         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4517         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4518         .emit_frame_size =
4519                 20 + /* gfx_v9_0_ring_emit_gds_switch */
4520                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4521                 5 + /* hdp invalidate */
4522                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4523                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4524                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4525                 2 + /* gfx_v9_0_ring_emit_vm_flush */
4526                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4527         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4528         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4529         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4530         .test_ring = gfx_v9_0_ring_test_ring,
4531         .test_ib = gfx_v9_0_ring_test_ib,
4532         .insert_nop = amdgpu_ring_insert_nop,
4533         .pad_ib = amdgpu_ring_generic_pad_ib,
4534         .emit_rreg = gfx_v9_0_ring_emit_rreg,
4535         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4536         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4537         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4538 };
4539
4540 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4541 {
4542         int i;
4543
4544         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4545
4546         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4547                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4548
4549         for (i = 0; i < adev->gfx.num_compute_rings; i++)
4550                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4551 }
4552
4553 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4554         .set = gfx_v9_0_kiq_set_interrupt_state,
4555         .process = gfx_v9_0_kiq_irq,
4556 };
4557
4558 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4559         .set = gfx_v9_0_set_eop_interrupt_state,
4560         .process = gfx_v9_0_eop_irq,
4561 };
4562
4563 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4564         .set = gfx_v9_0_set_priv_reg_fault_state,
4565         .process = gfx_v9_0_priv_reg_irq,
4566 };
4567
4568 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4569         .set = gfx_v9_0_set_priv_inst_fault_state,
4570         .process = gfx_v9_0_priv_inst_irq,
4571 };
4572
4573 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4574 {
4575         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4576         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4577
4578         adev->gfx.priv_reg_irq.num_types = 1;
4579         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4580
4581         adev->gfx.priv_inst_irq.num_types = 1;
4582         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4583
4584         adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4585         adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4586 }
4587
4588 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4589 {
4590         switch (adev->asic_type) {
4591         case CHIP_VEGA10:
4592         case CHIP_VEGA12:
4593         case CHIP_RAVEN:
4594                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4595                 break;
4596         default:
4597                 break;
4598         }
4599 }
4600
4601 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4602 {
4603         /* init asci gds info */
4604         adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4605         adev->gds.gws.total_size = 64;
4606         adev->gds.oa.total_size = 16;
4607
4608         if (adev->gds.mem.total_size == 64 * 1024) {
4609                 adev->gds.mem.gfx_partition_size = 4096;
4610                 adev->gds.mem.cs_partition_size = 4096;
4611
4612                 adev->gds.gws.gfx_partition_size = 4;
4613                 adev->gds.gws.cs_partition_size = 4;
4614
4615                 adev->gds.oa.gfx_partition_size = 4;
4616                 adev->gds.oa.cs_partition_size = 1;
4617         } else {
4618                 adev->gds.mem.gfx_partition_size = 1024;
4619                 adev->gds.mem.cs_partition_size = 1024;
4620
4621                 adev->gds.gws.gfx_partition_size = 16;
4622                 adev->gds.gws.cs_partition_size = 16;
4623
4624                 adev->gds.oa.gfx_partition_size = 4;
4625                 adev->gds.oa.cs_partition_size = 4;
4626         }
4627 }
4628
4629 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4630                                                  u32 bitmap)
4631 {
4632         u32 data;
4633
4634         if (!bitmap)
4635                 return;
4636
4637         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4638         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4639
4640         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4641 }
4642
4643 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4644 {
4645         u32 data, mask;
4646
4647         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4648         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4649
4650         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4651         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4652
4653         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4654
4655         return (~data) & mask;
4656 }
4657
4658 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4659                                  struct amdgpu_cu_info *cu_info)
4660 {
4661         int i, j, k, counter, active_cu_number = 0;
4662         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4663         unsigned disable_masks[4 * 2];
4664
4665         if (!adev || !cu_info)
4666                 return -EINVAL;
4667
4668         amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4669
4670         mutex_lock(&adev->grbm_idx_mutex);
4671         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4672                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4673                         mask = 1;
4674                         ao_bitmap = 0;
4675                         counter = 0;
4676                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4677                         if (i < 4 && j < 2)
4678                                 gfx_v9_0_set_user_cu_inactive_bitmap(
4679                                         adev, disable_masks[i * 2 + j]);
4680                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4681                         cu_info->bitmap[i][j] = bitmap;
4682
4683                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4684                                 if (bitmap & mask) {
4685                                         if (counter < adev->gfx.config.max_cu_per_sh)
4686                                                 ao_bitmap |= mask;
4687                                         counter ++;
4688                                 }
4689                                 mask <<= 1;
4690                         }
4691                         active_cu_number += counter;
4692                         if (i < 2 && j < 2)
4693                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4694                         cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4695                 }
4696         }
4697         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4698         mutex_unlock(&adev->grbm_idx_mutex);
4699
4700         cu_info->number = active_cu_number;
4701         cu_info->ao_cu_mask = ao_cu_mask;
4702
4703         return 0;
4704 }
4705
4706 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4707 {
4708         .type = AMD_IP_BLOCK_TYPE_GFX,
4709         .major = 9,
4710         .minor = 0,
4711         .rev = 0,
4712         .funcs = &gfx_v9_0_ip_funcs,
4713 };
This page took 0.318801 seconds and 4 git commands to generate.