]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/vi.c
Merge branch 'drm-next-4.2-amdgpu' of git://people.freedesktop.org/~agd5f/linux into...
[linux.git] / drivers / gpu / drm / amd / amdgpu / vi.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include "drmP.h"
27 #include "amdgpu.h"
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
33 #include "atom.h"
34
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
37
38 #include "oss/oss_3_0_d.h"
39 #include "oss/oss_3_0_sh_mask.h"
40
41 #include "bif/bif_5_0_d.h"
42 #include "bif/bif_5_0_sh_mask.h"
43
44 #include "gca/gfx_8_0_d.h"
45 #include "gca/gfx_8_0_sh_mask.h"
46
47 #include "smu/smu_7_1_1_d.h"
48 #include "smu/smu_7_1_1_sh_mask.h"
49
50 #include "uvd/uvd_5_0_d.h"
51 #include "uvd/uvd_5_0_sh_mask.h"
52
53 #include "vce/vce_3_0_d.h"
54 #include "vce/vce_3_0_sh_mask.h"
55
56 #include "dce/dce_10_0_d.h"
57 #include "dce/dce_10_0_sh_mask.h"
58
59 #include "vid.h"
60 #include "vi.h"
61 #include "vi_dpm.h"
62 #include "gmc_v8_0.h"
63 #include "gfx_v8_0.h"
64 #include "sdma_v2_4.h"
65 #include "sdma_v3_0.h"
66 #include "dce_v10_0.h"
67 #include "dce_v11_0.h"
68 #include "iceland_ih.h"
69 #include "tonga_ih.h"
70 #include "cz_ih.h"
71 #include "uvd_v5_0.h"
72 #include "uvd_v6_0.h"
73 #include "vce_v3_0.h"
74
75 /*
76  * Indirect registers accessor
77  */
78 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
79 {
80         unsigned long flags;
81         u32 r;
82
83         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
84         WREG32(mmPCIE_INDEX, reg);
85         (void)RREG32(mmPCIE_INDEX);
86         r = RREG32(mmPCIE_DATA);
87         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
88         return r;
89 }
90
91 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
92 {
93         unsigned long flags;
94
95         spin_lock_irqsave(&adev->pcie_idx_lock, flags);
96         WREG32(mmPCIE_INDEX, reg);
97         (void)RREG32(mmPCIE_INDEX);
98         WREG32(mmPCIE_DATA, v);
99         (void)RREG32(mmPCIE_DATA);
100         spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
101 }
102
103 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
104 {
105         unsigned long flags;
106         u32 r;
107
108         spin_lock_irqsave(&adev->smc_idx_lock, flags);
109         WREG32(mmSMC_IND_INDEX_0, (reg));
110         r = RREG32(mmSMC_IND_DATA_0);
111         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
112         return r;
113 }
114
115 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
116 {
117         unsigned long flags;
118
119         spin_lock_irqsave(&adev->smc_idx_lock, flags);
120         WREG32(mmSMC_IND_INDEX_0, (reg));
121         WREG32(mmSMC_IND_DATA_0, (v));
122         spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123 }
124
125 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
126 {
127         unsigned long flags;
128         u32 r;
129
130         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
131         WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
132         r = RREG32(mmUVD_CTX_DATA);
133         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
134         return r;
135 }
136
137 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
138 {
139         unsigned long flags;
140
141         spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
142         WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
143         WREG32(mmUVD_CTX_DATA, (v));
144         spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
145 }
146
147 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
148 {
149         unsigned long flags;
150         u32 r;
151
152         spin_lock_irqsave(&adev->didt_idx_lock, flags);
153         WREG32(mmDIDT_IND_INDEX, (reg));
154         r = RREG32(mmDIDT_IND_DATA);
155         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
156         return r;
157 }
158
159 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
160 {
161         unsigned long flags;
162
163         spin_lock_irqsave(&adev->didt_idx_lock, flags);
164         WREG32(mmDIDT_IND_INDEX, (reg));
165         WREG32(mmDIDT_IND_DATA, (v));
166         spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
167 }
168
169 static const u32 tonga_mgcg_cgcg_init[] =
170 {
171         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
172         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
173         mmPCIE_DATA, 0x000f0000, 0x00000000,
174         mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
175         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
176         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
177         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
178         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
179         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
180 };
181
182 static const u32 iceland_mgcg_cgcg_init[] =
183 {
184         mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
185         mmPCIE_DATA, 0x000f0000, 0x00000000,
186         mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
187         mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
188         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
189 };
190
191 static const u32 cz_mgcg_cgcg_init[] =
192 {
193         mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
194         mmPCIE_INDEX, 0xffffffff, 0x0140001c,
195         mmPCIE_DATA, 0x000f0000, 0x00000000,
196         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
197         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
198         mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
199         mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
200 };
201
202 static void vi_init_golden_registers(struct amdgpu_device *adev)
203 {
204         /* Some of the registers might be dependent on GRBM_GFX_INDEX */
205         mutex_lock(&adev->grbm_idx_mutex);
206
207         switch (adev->asic_type) {
208         case CHIP_TOPAZ:
209                 amdgpu_program_register_sequence(adev,
210                                                  iceland_mgcg_cgcg_init,
211                                                  (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
212                 break;
213         case CHIP_TONGA:
214                 amdgpu_program_register_sequence(adev,
215                                                  tonga_mgcg_cgcg_init,
216                                                  (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
217                 break;
218         case CHIP_CARRIZO:
219                 amdgpu_program_register_sequence(adev,
220                                                  cz_mgcg_cgcg_init,
221                                                  (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
222                 break;
223         default:
224                 break;
225         }
226         mutex_unlock(&adev->grbm_idx_mutex);
227 }
228
229 /**
230  * vi_get_xclk - get the xclk
231  *
232  * @adev: amdgpu_device pointer
233  *
234  * Returns the reference clock used by the gfx engine
235  * (VI).
236  */
237 static u32 vi_get_xclk(struct amdgpu_device *adev)
238 {
239         u32 reference_clock = adev->clock.spll.reference_freq;
240         u32 tmp;
241
242         if (adev->flags & AMDGPU_IS_APU)
243                 return reference_clock;
244
245         tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
246         if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
247                 return 1000;
248
249         tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
250         if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
251                 return reference_clock / 4;
252
253         return reference_clock;
254 }
255
256 /**
257  * vi_srbm_select - select specific register instances
258  *
259  * @adev: amdgpu_device pointer
260  * @me: selected ME (micro engine)
261  * @pipe: pipe
262  * @queue: queue
263  * @vmid: VMID
264  *
265  * Switches the currently active registers instances.  Some
266  * registers are instanced per VMID, others are instanced per
267  * me/pipe/queue combination.
268  */
269 void vi_srbm_select(struct amdgpu_device *adev,
270                      u32 me, u32 pipe, u32 queue, u32 vmid)
271 {
272         u32 srbm_gfx_cntl = 0;
273         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
274         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
275         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
276         srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
277         WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
278 }
279
280 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
281 {
282         /* todo */
283 }
284
285 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
286 {
287         u32 bus_cntl;
288         u32 d1vga_control = 0;
289         u32 d2vga_control = 0;
290         u32 vga_render_control = 0;
291         u32 rom_cntl;
292         bool r;
293
294         bus_cntl = RREG32(mmBUS_CNTL);
295         if (adev->mode_info.num_crtc) {
296                 d1vga_control = RREG32(mmD1VGA_CONTROL);
297                 d2vga_control = RREG32(mmD2VGA_CONTROL);
298                 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
299         }
300         rom_cntl = RREG32_SMC(ixROM_CNTL);
301
302         /* enable the rom */
303         WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
304         if (adev->mode_info.num_crtc) {
305                 /* Disable VGA mode */
306                 WREG32(mmD1VGA_CONTROL,
307                        (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
308                                           D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
309                 WREG32(mmD2VGA_CONTROL,
310                        (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
311                                           D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
312                 WREG32(mmVGA_RENDER_CONTROL,
313                        (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
314         }
315         WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
316
317         r = amdgpu_read_bios(adev);
318
319         /* restore regs */
320         WREG32(mmBUS_CNTL, bus_cntl);
321         if (adev->mode_info.num_crtc) {
322                 WREG32(mmD1VGA_CONTROL, d1vga_control);
323                 WREG32(mmD2VGA_CONTROL, d2vga_control);
324                 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
325         }
326         WREG32_SMC(ixROM_CNTL, rom_cntl);
327         return r;
328 }
329 static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
330         {mmGB_MACROTILE_MODE7, true},
331 };
332
333 static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = {
334         {mmGB_TILE_MODE7, true},
335         {mmGB_TILE_MODE12, true},
336         {mmGB_TILE_MODE17, true},
337         {mmGB_TILE_MODE23, true},
338         {mmGB_MACROTILE_MODE7, true},
339 };
340
341 static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
342         {mmGRBM_STATUS, false},
343         {mmGB_ADDR_CONFIG, false},
344         {mmMC_ARB_RAMCFG, false},
345         {mmGB_TILE_MODE0, false},
346         {mmGB_TILE_MODE1, false},
347         {mmGB_TILE_MODE2, false},
348         {mmGB_TILE_MODE3, false},
349         {mmGB_TILE_MODE4, false},
350         {mmGB_TILE_MODE5, false},
351         {mmGB_TILE_MODE6, false},
352         {mmGB_TILE_MODE7, false},
353         {mmGB_TILE_MODE8, false},
354         {mmGB_TILE_MODE9, false},
355         {mmGB_TILE_MODE10, false},
356         {mmGB_TILE_MODE11, false},
357         {mmGB_TILE_MODE12, false},
358         {mmGB_TILE_MODE13, false},
359         {mmGB_TILE_MODE14, false},
360         {mmGB_TILE_MODE15, false},
361         {mmGB_TILE_MODE16, false},
362         {mmGB_TILE_MODE17, false},
363         {mmGB_TILE_MODE18, false},
364         {mmGB_TILE_MODE19, false},
365         {mmGB_TILE_MODE20, false},
366         {mmGB_TILE_MODE21, false},
367         {mmGB_TILE_MODE22, false},
368         {mmGB_TILE_MODE23, false},
369         {mmGB_TILE_MODE24, false},
370         {mmGB_TILE_MODE25, false},
371         {mmGB_TILE_MODE26, false},
372         {mmGB_TILE_MODE27, false},
373         {mmGB_TILE_MODE28, false},
374         {mmGB_TILE_MODE29, false},
375         {mmGB_TILE_MODE30, false},
376         {mmGB_TILE_MODE31, false},
377         {mmGB_MACROTILE_MODE0, false},
378         {mmGB_MACROTILE_MODE1, false},
379         {mmGB_MACROTILE_MODE2, false},
380         {mmGB_MACROTILE_MODE3, false},
381         {mmGB_MACROTILE_MODE4, false},
382         {mmGB_MACROTILE_MODE5, false},
383         {mmGB_MACROTILE_MODE6, false},
384         {mmGB_MACROTILE_MODE7, false},
385         {mmGB_MACROTILE_MODE8, false},
386         {mmGB_MACROTILE_MODE9, false},
387         {mmGB_MACROTILE_MODE10, false},
388         {mmGB_MACROTILE_MODE11, false},
389         {mmGB_MACROTILE_MODE12, false},
390         {mmGB_MACROTILE_MODE13, false},
391         {mmGB_MACROTILE_MODE14, false},
392         {mmGB_MACROTILE_MODE15, false},
393         {mmCC_RB_BACKEND_DISABLE, false, true},
394         {mmGC_USER_RB_BACKEND_DISABLE, false, true},
395         {mmGB_BACKEND_MAP, false, false},
396         {mmPA_SC_RASTER_CONFIG, false, true},
397         {mmPA_SC_RASTER_CONFIG_1, false, true},
398 };
399
400 static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
401                                          u32 sh_num, u32 reg_offset)
402 {
403         uint32_t val;
404
405         mutex_lock(&adev->grbm_idx_mutex);
406         if (se_num != 0xffffffff || sh_num != 0xffffffff)
407                 gfx_v8_0_select_se_sh(adev, se_num, sh_num);
408
409         val = RREG32(reg_offset);
410
411         if (se_num != 0xffffffff || sh_num != 0xffffffff)
412                 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff);
413         mutex_unlock(&adev->grbm_idx_mutex);
414         return val;
415 }
416
417 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
418                             u32 sh_num, u32 reg_offset, u32 *value)
419 {
420         struct amdgpu_allowed_register_entry *asic_register_table = NULL;
421         struct amdgpu_allowed_register_entry *asic_register_entry;
422         uint32_t size, i;
423
424         *value = 0;
425         switch (adev->asic_type) {
426         case CHIP_TOPAZ:
427                 asic_register_table = tonga_allowed_read_registers;
428                 size = ARRAY_SIZE(tonga_allowed_read_registers);
429                 break;
430         case CHIP_TONGA:
431         case CHIP_CARRIZO:
432                 asic_register_table = cz_allowed_read_registers;
433                 size = ARRAY_SIZE(cz_allowed_read_registers);
434                 break;
435         default:
436                 return -EINVAL;
437         }
438
439         if (asic_register_table) {
440                 for (i = 0; i < size; i++) {
441                         asic_register_entry = asic_register_table + i;
442                         if (reg_offset != asic_register_entry->reg_offset)
443                                 continue;
444                         if (!asic_register_entry->untouched)
445                                 *value = asic_register_entry->grbm_indexed ?
446                                         vi_read_indexed_register(adev, se_num,
447                                                                  sh_num, reg_offset) :
448                                         RREG32(reg_offset);
449                         return 0;
450                 }
451         }
452
453         for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
454                 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
455                         continue;
456
457                 if (!vi_allowed_read_registers[i].untouched)
458                         *value = vi_allowed_read_registers[i].grbm_indexed ?
459                                 vi_read_indexed_register(adev, se_num,
460                                                          sh_num, reg_offset) :
461                                 RREG32(reg_offset);
462                 return 0;
463         }
464         return -EINVAL;
465 }
466
467 static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
468 {
469         dev_info(adev->dev, "  GRBM_STATUS=0x%08X\n",
470                 RREG32(mmGRBM_STATUS));
471         dev_info(adev->dev, "  GRBM_STATUS2=0x%08X\n",
472                 RREG32(mmGRBM_STATUS2));
473         dev_info(adev->dev, "  GRBM_STATUS_SE0=0x%08X\n",
474                 RREG32(mmGRBM_STATUS_SE0));
475         dev_info(adev->dev, "  GRBM_STATUS_SE1=0x%08X\n",
476                 RREG32(mmGRBM_STATUS_SE1));
477         dev_info(adev->dev, "  GRBM_STATUS_SE2=0x%08X\n",
478                 RREG32(mmGRBM_STATUS_SE2));
479         dev_info(adev->dev, "  GRBM_STATUS_SE3=0x%08X\n",
480                 RREG32(mmGRBM_STATUS_SE3));
481         dev_info(adev->dev, "  SRBM_STATUS=0x%08X\n",
482                 RREG32(mmSRBM_STATUS));
483         dev_info(adev->dev, "  SRBM_STATUS2=0x%08X\n",
484                 RREG32(mmSRBM_STATUS2));
485         dev_info(adev->dev, "  SDMA0_STATUS_REG   = 0x%08X\n",
486                 RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
487         dev_info(adev->dev, "  SDMA1_STATUS_REG   = 0x%08X\n",
488                  RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
489         dev_info(adev->dev, "  CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
490         dev_info(adev->dev, "  CP_STALLED_STAT1 = 0x%08x\n",
491                  RREG32(mmCP_STALLED_STAT1));
492         dev_info(adev->dev, "  CP_STALLED_STAT2 = 0x%08x\n",
493                  RREG32(mmCP_STALLED_STAT2));
494         dev_info(adev->dev, "  CP_STALLED_STAT3 = 0x%08x\n",
495                  RREG32(mmCP_STALLED_STAT3));
496         dev_info(adev->dev, "  CP_CPF_BUSY_STAT = 0x%08x\n",
497                  RREG32(mmCP_CPF_BUSY_STAT));
498         dev_info(adev->dev, "  CP_CPF_STALLED_STAT1 = 0x%08x\n",
499                  RREG32(mmCP_CPF_STALLED_STAT1));
500         dev_info(adev->dev, "  CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS));
501         dev_info(adev->dev, "  CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT));
502         dev_info(adev->dev, "  CP_CPC_STALLED_STAT1 = 0x%08x\n",
503                  RREG32(mmCP_CPC_STALLED_STAT1));
504         dev_info(adev->dev, "  CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS));
505 }
506
507 /**
508  * vi_gpu_check_soft_reset - check which blocks are busy
509  *
510  * @adev: amdgpu_device pointer
511  *
512  * Check which blocks are busy and return the relevant reset
513  * mask to be used by vi_gpu_soft_reset().
514  * Returns a mask of the blocks to be reset.
515  */
516 u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
517 {
518         u32 reset_mask = 0;
519         u32 tmp;
520
521         /* GRBM_STATUS */
522         tmp = RREG32(mmGRBM_STATUS);
523         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
524                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
525                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
526                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
527                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
528                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK))
529                 reset_mask |= AMDGPU_RESET_GFX;
530
531         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK))
532                 reset_mask |= AMDGPU_RESET_CP;
533
534         /* GRBM_STATUS2 */
535         tmp = RREG32(mmGRBM_STATUS2);
536         if (tmp & GRBM_STATUS2__RLC_BUSY_MASK)
537                 reset_mask |= AMDGPU_RESET_RLC;
538
539         if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK |
540                    GRBM_STATUS2__CPC_BUSY_MASK |
541                    GRBM_STATUS2__CPG_BUSY_MASK))
542                 reset_mask |= AMDGPU_RESET_CP;
543
544         /* SRBM_STATUS2 */
545         tmp = RREG32(mmSRBM_STATUS2);
546         if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK)
547                 reset_mask |= AMDGPU_RESET_DMA;
548
549         if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)
550                 reset_mask |= AMDGPU_RESET_DMA1;
551
552         /* SRBM_STATUS */
553         tmp = RREG32(mmSRBM_STATUS);
554
555         if (tmp & SRBM_STATUS__IH_BUSY_MASK)
556                 reset_mask |= AMDGPU_RESET_IH;
557
558         if (tmp & SRBM_STATUS__SEM_BUSY_MASK)
559                 reset_mask |= AMDGPU_RESET_SEM;
560
561         if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK)
562                 reset_mask |= AMDGPU_RESET_GRBM;
563
564         if (adev->asic_type != CHIP_TOPAZ) {
565                 if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK |
566                            SRBM_STATUS__UVD_BUSY_MASK))
567                         reset_mask |= AMDGPU_RESET_UVD;
568         }
569
570         if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
571                 reset_mask |= AMDGPU_RESET_VMC;
572
573         if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
574                    SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK))
575                 reset_mask |= AMDGPU_RESET_MC;
576
577         /* SDMA0_STATUS_REG */
578         tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
579         if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
580                 reset_mask |= AMDGPU_RESET_DMA;
581
582         /* SDMA1_STATUS_REG */
583         tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
584         if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
585                 reset_mask |= AMDGPU_RESET_DMA1;
586 #if 0
587         /* VCE_STATUS */
588         if (adev->asic_type != CHIP_TOPAZ) {
589                 tmp = RREG32(mmVCE_STATUS);
590                 if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK)
591                         reset_mask |= AMDGPU_RESET_VCE;
592                 if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK)
593                         reset_mask |= AMDGPU_RESET_VCE1;
594
595         }
596
597         if (adev->asic_type != CHIP_TOPAZ) {
598                 if (amdgpu_display_is_display_hung(adev))
599                         reset_mask |= AMDGPU_RESET_DISPLAY;
600         }
601 #endif
602
603         /* Skip MC reset as it's mostly likely not hung, just busy */
604         if (reset_mask & AMDGPU_RESET_MC) {
605                 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
606                 reset_mask &= ~AMDGPU_RESET_MC;
607         }
608
609         return reset_mask;
610 }
611
612 /**
613  * vi_gpu_soft_reset - soft reset GPU
614  *
615  * @adev: amdgpu_device pointer
616  * @reset_mask: mask of which blocks to reset
617  *
618  * Soft reset the blocks specified in @reset_mask.
619  */
620 static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask)
621 {
622         struct amdgpu_mode_mc_save save;
623         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
624         u32 tmp;
625
626         if (reset_mask == 0)
627                 return;
628
629         dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask);
630
631         vi_print_gpu_status_regs(adev);
632         dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
633                  RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR));
634         dev_info(adev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
635                  RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS));
636
637         /* disable CG/PG */
638
639         /* stop the rlc */
640         //XXX
641         //gfx_v8_0_rlc_stop(adev);
642
643         /* Disable GFX parsing/prefetching */
644         tmp = RREG32(mmCP_ME_CNTL);
645         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
646         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
647         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
648         WREG32(mmCP_ME_CNTL, tmp);
649
650         /* Disable MEC parsing/prefetching */
651         tmp = RREG32(mmCP_MEC_CNTL);
652         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
653         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
654         WREG32(mmCP_MEC_CNTL, tmp);
655
656         if (reset_mask & AMDGPU_RESET_DMA) {
657                 /* sdma0 */
658                 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
659                 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
660                 WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
661         }
662         if (reset_mask & AMDGPU_RESET_DMA1) {
663                 /* sdma1 */
664                 tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
665                 tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
666                 WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
667         }
668
669         gmc_v8_0_mc_stop(adev, &save);
670         if (amdgpu_asic_wait_for_mc_idle(adev)) {
671                 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
672         }
673
674         if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) {
675                 grbm_soft_reset =
676                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
677                 grbm_soft_reset =
678                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
679         }
680
681         if (reset_mask & AMDGPU_RESET_CP) {
682                 grbm_soft_reset =
683                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
684                 srbm_soft_reset =
685                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
686         }
687
688         if (reset_mask & AMDGPU_RESET_DMA)
689                 srbm_soft_reset =
690                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1);
691
692         if (reset_mask & AMDGPU_RESET_DMA1)
693                 srbm_soft_reset =
694                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1);
695
696         if (reset_mask & AMDGPU_RESET_DISPLAY)
697                 srbm_soft_reset =
698                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1);
699
700         if (reset_mask & AMDGPU_RESET_RLC)
701                 grbm_soft_reset =
702                         REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
703
704         if (reset_mask & AMDGPU_RESET_SEM)
705                 srbm_soft_reset =
706                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
707
708         if (reset_mask & AMDGPU_RESET_IH)
709                 srbm_soft_reset =
710                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1);
711
712         if (reset_mask & AMDGPU_RESET_GRBM)
713                 srbm_soft_reset =
714                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
715
716         if (reset_mask & AMDGPU_RESET_VMC)
717                 srbm_soft_reset =
718                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
719
720         if (reset_mask & AMDGPU_RESET_UVD)
721                 srbm_soft_reset =
722                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
723
724         if (reset_mask & AMDGPU_RESET_VCE)
725                 srbm_soft_reset =
726                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
727
728         if (reset_mask & AMDGPU_RESET_VCE)
729                 srbm_soft_reset =
730                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
731
732         if (!(adev->flags & AMDGPU_IS_APU)) {
733                 if (reset_mask & AMDGPU_RESET_MC)
734                 srbm_soft_reset =
735                         REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
736         }
737
738         if (grbm_soft_reset) {
739                 tmp = RREG32(mmGRBM_SOFT_RESET);
740                 tmp |= grbm_soft_reset;
741                 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
742                 WREG32(mmGRBM_SOFT_RESET, tmp);
743                 tmp = RREG32(mmGRBM_SOFT_RESET);
744
745                 udelay(50);
746
747                 tmp &= ~grbm_soft_reset;
748                 WREG32(mmGRBM_SOFT_RESET, tmp);
749                 tmp = RREG32(mmGRBM_SOFT_RESET);
750         }
751
752         if (srbm_soft_reset) {
753                 tmp = RREG32(mmSRBM_SOFT_RESET);
754                 tmp |= srbm_soft_reset;
755                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
756                 WREG32(mmSRBM_SOFT_RESET, tmp);
757                 tmp = RREG32(mmSRBM_SOFT_RESET);
758
759                 udelay(50);
760
761                 tmp &= ~srbm_soft_reset;
762                 WREG32(mmSRBM_SOFT_RESET, tmp);
763                 tmp = RREG32(mmSRBM_SOFT_RESET);
764         }
765
766         /* Wait a little for things to settle down */
767         udelay(50);
768
769         gmc_v8_0_mc_resume(adev, &save);
770         udelay(50);
771
772         vi_print_gpu_status_regs(adev);
773 }
774
775 static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
776 {
777         struct amdgpu_mode_mc_save save;
778         u32 tmp, i;
779
780         dev_info(adev->dev, "GPU pci config reset\n");
781
782         /* disable dpm? */
783
784         /* disable cg/pg */
785
786         /* Disable GFX parsing/prefetching */
787         tmp = RREG32(mmCP_ME_CNTL);
788         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
789         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
790         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
791         WREG32(mmCP_ME_CNTL, tmp);
792
793         /* Disable MEC parsing/prefetching */
794         tmp = RREG32(mmCP_MEC_CNTL);
795         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
796         tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
797         WREG32(mmCP_MEC_CNTL, tmp);
798
799         /* Disable GFX parsing/prefetching */
800         WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK |
801                 CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
802
803         /* Disable MEC parsing/prefetching */
804         WREG32(mmCP_MEC_CNTL,
805                         CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK);
806
807         /* sdma0 */
808         tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
809         tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
810         WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
811
812         /* sdma1 */
813         tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
814         tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1);
815         WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
816
817         /* XXX other engines? */
818
819         /* halt the rlc, disable cp internal ints */
820         //XXX
821         //gfx_v8_0_rlc_stop(adev);
822
823         udelay(50);
824
825         /* disable mem access */
826         gmc_v8_0_mc_stop(adev, &save);
827         if (amdgpu_asic_wait_for_mc_idle(adev)) {
828                 dev_warn(adev->dev, "Wait for MC idle timed out !\n");
829         }
830
831         /* disable BM */
832         pci_clear_master(adev->pdev);
833         /* reset */
834         amdgpu_pci_config_reset(adev);
835
836         udelay(100);
837
838         /* wait for asic to come out of reset */
839         for (i = 0; i < adev->usec_timeout; i++) {
840                 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff)
841                         break;
842                 udelay(1);
843         }
844
845 }
846
847 static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
848 {
849         u32 tmp = RREG32(mmBIOS_SCRATCH_3);
850
851         if (hung)
852                 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
853         else
854                 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
855
856         WREG32(mmBIOS_SCRATCH_3, tmp);
857 }
858
859 /**
860  * vi_asic_reset - soft reset GPU
861  *
862  * @adev: amdgpu_device pointer
863  *
864  * Look up which blocks are hung and attempt
865  * to reset them.
866  * Returns 0 for success.
867  */
868 static int vi_asic_reset(struct amdgpu_device *adev)
869 {
870         u32 reset_mask;
871
872         reset_mask = vi_gpu_check_soft_reset(adev);
873
874         if (reset_mask)
875                 vi_set_bios_scratch_engine_hung(adev, true);
876
877         /* try soft reset */
878         vi_gpu_soft_reset(adev, reset_mask);
879
880         reset_mask = vi_gpu_check_soft_reset(adev);
881
882         /* try pci config reset */
883         if (reset_mask && amdgpu_hard_reset)
884                 vi_gpu_pci_config_reset(adev);
885
886         reset_mask = vi_gpu_check_soft_reset(adev);
887
888         if (!reset_mask)
889                 vi_set_bios_scratch_engine_hung(adev, false);
890
891         return 0;
892 }
893
894 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
895                         u32 cntl_reg, u32 status_reg)
896 {
897         int r, i;
898         struct atom_clock_dividers dividers;
899         uint32_t tmp;
900
901         r = amdgpu_atombios_get_clock_dividers(adev,
902                                                COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
903                                                clock, false, &dividers);
904         if (r)
905                 return r;
906
907         tmp = RREG32_SMC(cntl_reg);
908         tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
909                 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
910         tmp |= dividers.post_divider;
911         WREG32_SMC(cntl_reg, tmp);
912
913         for (i = 0; i < 100; i++) {
914                 if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
915                         break;
916                 mdelay(10);
917         }
918         if (i == 100)
919                 return -ETIMEDOUT;
920
921         return 0;
922 }
923
924 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
925 {
926         int r;
927
928         r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
929         if (r)
930                 return r;
931
932         r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
933
934         return 0;
935 }
936
937 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
938 {
939         /* todo */
940
941         return 0;
942 }
943
944 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
945 {
946         u32 mask;
947         int ret;
948
949         if (amdgpu_pcie_gen2 == 0)
950                 return;
951
952         if (adev->flags & AMDGPU_IS_APU)
953                 return;
954
955         ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
956         if (ret != 0)
957                 return;
958
959         if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
960                 return;
961
962         /* todo */
963 }
964
965 static void vi_program_aspm(struct amdgpu_device *adev)
966 {
967
968         if (amdgpu_aspm == 0)
969                 return;
970
971         /* todo */
972 }
973
974 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
975                                         bool enable)
976 {
977         u32 tmp;
978
979         /* not necessary on CZ */
980         if (adev->flags & AMDGPU_IS_APU)
981                 return;
982
983         tmp = RREG32(mmBIF_DOORBELL_APER_EN);
984         if (enable)
985                 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
986         else
987                 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
988
989         WREG32(mmBIF_DOORBELL_APER_EN, tmp);
990 }
991
992 /* topaz has no DCE, UVD, VCE */
993 static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
994 {
995         /* ORDER MATTERS! */
996         {
997                 .type = AMD_IP_BLOCK_TYPE_COMMON,
998                 .major = 2,
999                 .minor = 0,
1000                 .rev = 0,
1001                 .funcs = &vi_common_ip_funcs,
1002         },
1003         {
1004                 .type = AMD_IP_BLOCK_TYPE_GMC,
1005                 .major = 8,
1006                 .minor = 0,
1007                 .rev = 0,
1008                 .funcs = &gmc_v8_0_ip_funcs,
1009         },
1010         {
1011                 .type = AMD_IP_BLOCK_TYPE_IH,
1012                 .major = 2,
1013                 .minor = 4,
1014                 .rev = 0,
1015                 .funcs = &iceland_ih_ip_funcs,
1016         },
1017         {
1018                 .type = AMD_IP_BLOCK_TYPE_SMC,
1019                 .major = 7,
1020                 .minor = 1,
1021                 .rev = 0,
1022                 .funcs = &iceland_dpm_ip_funcs,
1023         },
1024         {
1025                 .type = AMD_IP_BLOCK_TYPE_GFX,
1026                 .major = 8,
1027                 .minor = 0,
1028                 .rev = 0,
1029                 .funcs = &gfx_v8_0_ip_funcs,
1030         },
1031         {
1032                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1033                 .major = 2,
1034                 .minor = 4,
1035                 .rev = 0,
1036                 .funcs = &sdma_v2_4_ip_funcs,
1037         },
1038 };
1039
1040 static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
1041 {
1042         /* ORDER MATTERS! */
1043         {
1044                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1045                 .major = 2,
1046                 .minor = 0,
1047                 .rev = 0,
1048                 .funcs = &vi_common_ip_funcs,
1049         },
1050         {
1051                 .type = AMD_IP_BLOCK_TYPE_GMC,
1052                 .major = 8,
1053                 .minor = 0,
1054                 .rev = 0,
1055                 .funcs = &gmc_v8_0_ip_funcs,
1056         },
1057         {
1058                 .type = AMD_IP_BLOCK_TYPE_IH,
1059                 .major = 3,
1060                 .minor = 0,
1061                 .rev = 0,
1062                 .funcs = &tonga_ih_ip_funcs,
1063         },
1064         {
1065                 .type = AMD_IP_BLOCK_TYPE_SMC,
1066                 .major = 7,
1067                 .minor = 1,
1068                 .rev = 0,
1069                 .funcs = &tonga_dpm_ip_funcs,
1070         },
1071         {
1072                 .type = AMD_IP_BLOCK_TYPE_DCE,
1073                 .major = 10,
1074                 .minor = 0,
1075                 .rev = 0,
1076                 .funcs = &dce_v10_0_ip_funcs,
1077         },
1078         {
1079                 .type = AMD_IP_BLOCK_TYPE_GFX,
1080                 .major = 8,
1081                 .minor = 0,
1082                 .rev = 0,
1083                 .funcs = &gfx_v8_0_ip_funcs,
1084         },
1085         {
1086                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1087                 .major = 3,
1088                 .minor = 0,
1089                 .rev = 0,
1090                 .funcs = &sdma_v3_0_ip_funcs,
1091         },
1092         {
1093                 .type = AMD_IP_BLOCK_TYPE_UVD,
1094                 .major = 5,
1095                 .minor = 0,
1096                 .rev = 0,
1097                 .funcs = &uvd_v5_0_ip_funcs,
1098         },
1099         {
1100                 .type = AMD_IP_BLOCK_TYPE_VCE,
1101                 .major = 3,
1102                 .minor = 0,
1103                 .rev = 0,
1104                 .funcs = &vce_v3_0_ip_funcs,
1105         },
1106 };
1107
1108 static const struct amdgpu_ip_block_version cz_ip_blocks[] =
1109 {
1110         /* ORDER MATTERS! */
1111         {
1112                 .type = AMD_IP_BLOCK_TYPE_COMMON,
1113                 .major = 2,
1114                 .minor = 0,
1115                 .rev = 0,
1116                 .funcs = &vi_common_ip_funcs,
1117         },
1118         {
1119                 .type = AMD_IP_BLOCK_TYPE_GMC,
1120                 .major = 8,
1121                 .minor = 0,
1122                 .rev = 0,
1123                 .funcs = &gmc_v8_0_ip_funcs,
1124         },
1125         {
1126                 .type = AMD_IP_BLOCK_TYPE_IH,
1127                 .major = 3,
1128                 .minor = 0,
1129                 .rev = 0,
1130                 .funcs = &cz_ih_ip_funcs,
1131         },
1132         {
1133                 .type = AMD_IP_BLOCK_TYPE_SMC,
1134                 .major = 8,
1135                 .minor = 0,
1136                 .rev = 0,
1137                 .funcs = &cz_dpm_ip_funcs,
1138         },
1139         {
1140                 .type = AMD_IP_BLOCK_TYPE_DCE,
1141                 .major = 11,
1142                 .minor = 0,
1143                 .rev = 0,
1144                 .funcs = &dce_v11_0_ip_funcs,
1145         },
1146         {
1147                 .type = AMD_IP_BLOCK_TYPE_GFX,
1148                 .major = 8,
1149                 .minor = 0,
1150                 .rev = 0,
1151                 .funcs = &gfx_v8_0_ip_funcs,
1152         },
1153         {
1154                 .type = AMD_IP_BLOCK_TYPE_SDMA,
1155                 .major = 3,
1156                 .minor = 0,
1157                 .rev = 0,
1158                 .funcs = &sdma_v3_0_ip_funcs,
1159         },
1160         {
1161                 .type = AMD_IP_BLOCK_TYPE_UVD,
1162                 .major = 6,
1163                 .minor = 0,
1164                 .rev = 0,
1165                 .funcs = &uvd_v6_0_ip_funcs,
1166         },
1167         {
1168                 .type = AMD_IP_BLOCK_TYPE_VCE,
1169                 .major = 3,
1170                 .minor = 0,
1171                 .rev = 0,
1172                 .funcs = &vce_v3_0_ip_funcs,
1173         },
1174 };
1175
1176 int vi_set_ip_blocks(struct amdgpu_device *adev)
1177 {
1178         switch (adev->asic_type) {
1179         case CHIP_TOPAZ:
1180                 adev->ip_blocks = topaz_ip_blocks;
1181                 adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks);
1182                 break;
1183         case CHIP_TONGA:
1184                 adev->ip_blocks = tonga_ip_blocks;
1185                 adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
1186                 break;
1187         case CHIP_CARRIZO:
1188                 adev->ip_blocks = cz_ip_blocks;
1189                 adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
1190                 break;
1191         default:
1192                 /* FIXME: not supported yet */
1193                 return -EINVAL;
1194         }
1195
1196         adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
1197         if (adev->ip_block_enabled == NULL)
1198                 return -ENOMEM;
1199
1200         return 0;
1201 }
1202
1203 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1204 {
1205         if (adev->asic_type == CHIP_TOPAZ)
1206                 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1207                         >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1208         else
1209                 return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
1210                         >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
1211 }
1212
1213 static const struct amdgpu_asic_funcs vi_asic_funcs =
1214 {
1215         .read_disabled_bios = &vi_read_disabled_bios,
1216         .read_register = &vi_read_register,
1217         .reset = &vi_asic_reset,
1218         .set_vga_state = &vi_vga_set_state,
1219         .get_xclk = &vi_get_xclk,
1220         .set_uvd_clocks = &vi_set_uvd_clocks,
1221         .set_vce_clocks = &vi_set_vce_clocks,
1222         .get_cu_info = &gfx_v8_0_get_cu_info,
1223         /* these should be moved to their own ip modules */
1224         .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1225         .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1226 };
1227
1228 static int vi_common_early_init(void *handle)
1229 {
1230         bool smc_enabled = false;
1231         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1232
1233         adev->smc_rreg = &vi_smc_rreg;
1234         adev->smc_wreg = &vi_smc_wreg;
1235         adev->pcie_rreg = &vi_pcie_rreg;
1236         adev->pcie_wreg = &vi_pcie_wreg;
1237         adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1238         adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1239         adev->didt_rreg = &vi_didt_rreg;
1240         adev->didt_wreg = &vi_didt_wreg;
1241
1242         adev->asic_funcs = &vi_asic_funcs;
1243
1244         if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
1245                 (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
1246                 smc_enabled = true;
1247
1248         adev->rev_id = vi_get_rev_id(adev);
1249         adev->external_rev_id = 0xFF;
1250         switch (adev->asic_type) {
1251         case CHIP_TOPAZ:
1252                 adev->has_uvd = false;
1253                 adev->cg_flags = 0;
1254                 adev->pg_flags = 0;
1255                 adev->external_rev_id = 0x1;
1256                 if (amdgpu_smc_load_fw && smc_enabled)
1257                         adev->firmware.smu_load = true;
1258                 break;
1259         case CHIP_TONGA:
1260                 adev->has_uvd = true;
1261                 adev->cg_flags = 0;
1262                 adev->pg_flags = 0;
1263                 adev->external_rev_id = adev->rev_id + 0x14;
1264                 if (amdgpu_smc_load_fw && smc_enabled)
1265                         adev->firmware.smu_load = true;
1266                 break;
1267         case CHIP_CARRIZO:
1268                 adev->has_uvd = true;
1269                 adev->cg_flags = 0;
1270                 adev->pg_flags = AMDGPU_PG_SUPPORT_UVD;
1271                 adev->external_rev_id = adev->rev_id + 0x1;
1272                 if (amdgpu_smc_load_fw && smc_enabled)
1273                         adev->firmware.smu_load = true;
1274                 break;
1275         default:
1276                 /* FIXME: not supported yet */
1277                 return -EINVAL;
1278         }
1279
1280         return 0;
1281 }
1282
1283 static int vi_common_sw_init(void *handle)
1284 {
1285         return 0;
1286 }
1287
1288 static int vi_common_sw_fini(void *handle)
1289 {
1290         return 0;
1291 }
1292
1293 static int vi_common_hw_init(void *handle)
1294 {
1295         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1296
1297         /* move the golden regs per IP block */
1298         vi_init_golden_registers(adev);
1299         /* enable pcie gen2/3 link */
1300         vi_pcie_gen3_enable(adev);
1301         /* enable aspm */
1302         vi_program_aspm(adev);
1303         /* enable the doorbell aperture */
1304         vi_enable_doorbell_aperture(adev, true);
1305
1306         return 0;
1307 }
1308
1309 static int vi_common_hw_fini(void *handle)
1310 {
1311         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1312
1313         /* enable the doorbell aperture */
1314         vi_enable_doorbell_aperture(adev, false);
1315
1316         return 0;
1317 }
1318
1319 static int vi_common_suspend(void *handle)
1320 {
1321         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322
1323         return vi_common_hw_fini(adev);
1324 }
1325
1326 static int vi_common_resume(void *handle)
1327 {
1328         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1329
1330         return vi_common_hw_init(adev);
1331 }
1332
1333 static bool vi_common_is_idle(void *handle)
1334 {
1335         return true;
1336 }
1337
1338 static int vi_common_wait_for_idle(void *handle)
1339 {
1340         return 0;
1341 }
1342
1343 static void vi_common_print_status(void *handle)
1344 {
1345         return;
1346 }
1347
1348 static int vi_common_soft_reset(void *handle)
1349 {
1350         return 0;
1351 }
1352
1353 static int vi_common_set_clockgating_state(void *handle,
1354                                             enum amd_clockgating_state state)
1355 {
1356         return 0;
1357 }
1358
1359 static int vi_common_set_powergating_state(void *handle,
1360                                             enum amd_powergating_state state)
1361 {
1362         return 0;
1363 }
1364
1365 const struct amd_ip_funcs vi_common_ip_funcs = {
1366         .early_init = vi_common_early_init,
1367         .late_init = NULL,
1368         .sw_init = vi_common_sw_init,
1369         .sw_fini = vi_common_sw_fini,
1370         .hw_init = vi_common_hw_init,
1371         .hw_fini = vi_common_hw_fini,
1372         .suspend = vi_common_suspend,
1373         .resume = vi_common_resume,
1374         .is_idle = vi_common_is_idle,
1375         .wait_for_idle = vi_common_wait_for_idle,
1376         .soft_reset = vi_common_soft_reset,
1377         .print_status = vi_common_print_status,
1378         .set_clockgating_state = vi_common_set_clockgating_state,
1379         .set_powergating_state = vi_common_set_powergating_state,
1380 };
1381
This page took 0.115877 seconds and 4 git commands to generate.