2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
25 #include <linux/slab.h>
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
65 #include "sdma_v2_4.h"
66 #include "sdma_v3_0.h"
67 #include "dce_v10_0.h"
68 #include "dce_v11_0.h"
69 #include "iceland_ih.h"
75 #if defined(CONFIG_DRM_AMD_ACP)
76 #include "amdgpu_acp.h"
78 #include "dce_virtual.h"
80 #include "amdgpu_dm.h"
83 * Indirect registers accessor
85 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
90 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
91 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
92 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
93 r = RREG32_NO_KIQ(mmPCIE_DATA);
94 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
98 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
102 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
103 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
104 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
105 WREG32_NO_KIQ(mmPCIE_DATA, v);
106 (void)RREG32_NO_KIQ(mmPCIE_DATA);
107 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
110 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
115 spin_lock_irqsave(&adev->smc_idx_lock, flags);
116 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
117 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
118 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
122 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
126 spin_lock_irqsave(&adev->smc_idx_lock, flags);
127 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
128 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
129 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
133 #define mmMP0PUB_IND_INDEX 0x180
134 #define mmMP0PUB_IND_DATA 0x181
136 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
141 spin_lock_irqsave(&adev->smc_idx_lock, flags);
142 WREG32(mmMP0PUB_IND_INDEX, (reg));
143 r = RREG32(mmMP0PUB_IND_DATA);
144 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
148 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
152 spin_lock_irqsave(&adev->smc_idx_lock, flags);
153 WREG32(mmMP0PUB_IND_INDEX, (reg));
154 WREG32(mmMP0PUB_IND_DATA, (v));
155 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
158 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
163 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
164 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
165 r = RREG32(mmUVD_CTX_DATA);
166 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
170 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
174 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
175 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
176 WREG32(mmUVD_CTX_DATA, (v));
177 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
180 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
185 spin_lock_irqsave(&adev->didt_idx_lock, flags);
186 WREG32(mmDIDT_IND_INDEX, (reg));
187 r = RREG32(mmDIDT_IND_DATA);
188 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
192 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
196 spin_lock_irqsave(&adev->didt_idx_lock, flags);
197 WREG32(mmDIDT_IND_INDEX, (reg));
198 WREG32(mmDIDT_IND_DATA, (v));
199 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
202 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
207 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
208 WREG32(mmGC_CAC_IND_INDEX, (reg));
209 r = RREG32(mmGC_CAC_IND_DATA);
210 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
214 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
218 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
219 WREG32(mmGC_CAC_IND_INDEX, (reg));
220 WREG32(mmGC_CAC_IND_DATA, (v));
221 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
225 static const u32 tonga_mgcg_cgcg_init[] =
227 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
228 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
229 mmPCIE_DATA, 0x000f0000, 0x00000000,
230 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
231 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
232 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
233 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
236 static const u32 fiji_mgcg_cgcg_init[] =
238 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
239 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
240 mmPCIE_DATA, 0x000f0000, 0x00000000,
241 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
242 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
243 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
244 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
247 static const u32 iceland_mgcg_cgcg_init[] =
249 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
250 mmPCIE_DATA, 0x000f0000, 0x00000000,
251 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
252 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
253 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
256 static const u32 cz_mgcg_cgcg_init[] =
258 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
259 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
260 mmPCIE_DATA, 0x000f0000, 0x00000000,
261 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
262 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
265 static const u32 stoney_mgcg_cgcg_init[] =
267 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
268 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
269 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
272 static void vi_init_golden_registers(struct amdgpu_device *adev)
274 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
275 mutex_lock(&adev->grbm_idx_mutex);
277 if (amdgpu_sriov_vf(adev)) {
278 xgpu_vi_init_golden_registers(adev);
279 mutex_unlock(&adev->grbm_idx_mutex);
283 switch (adev->asic_type) {
285 amdgpu_device_program_register_sequence(adev,
286 iceland_mgcg_cgcg_init,
287 ARRAY_SIZE(iceland_mgcg_cgcg_init));
290 amdgpu_device_program_register_sequence(adev,
292 ARRAY_SIZE(fiji_mgcg_cgcg_init));
295 amdgpu_device_program_register_sequence(adev,
296 tonga_mgcg_cgcg_init,
297 ARRAY_SIZE(tonga_mgcg_cgcg_init));
300 amdgpu_device_program_register_sequence(adev,
302 ARRAY_SIZE(cz_mgcg_cgcg_init));
305 amdgpu_device_program_register_sequence(adev,
306 stoney_mgcg_cgcg_init,
307 ARRAY_SIZE(stoney_mgcg_cgcg_init));
316 mutex_unlock(&adev->grbm_idx_mutex);
320 * vi_get_xclk - get the xclk
322 * @adev: amdgpu_device pointer
324 * Returns the reference clock used by the gfx engine
327 static u32 vi_get_xclk(struct amdgpu_device *adev)
329 u32 reference_clock = adev->clock.spll.reference_freq;
332 if (adev->flags & AMD_IS_APU)
333 return reference_clock;
335 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
336 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
339 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
340 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
341 return reference_clock / 4;
343 return reference_clock;
347 * vi_srbm_select - select specific register instances
349 * @adev: amdgpu_device pointer
350 * @me: selected ME (micro engine)
355 * Switches the currently active registers instances. Some
356 * registers are instanced per VMID, others are instanced per
357 * me/pipe/queue combination.
359 void vi_srbm_select(struct amdgpu_device *adev,
360 u32 me, u32 pipe, u32 queue, u32 vmid)
362 u32 srbm_gfx_cntl = 0;
363 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
367 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
370 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
375 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
378 u32 d1vga_control = 0;
379 u32 d2vga_control = 0;
380 u32 vga_render_control = 0;
384 bus_cntl = RREG32(mmBUS_CNTL);
385 if (adev->mode_info.num_crtc) {
386 d1vga_control = RREG32(mmD1VGA_CONTROL);
387 d2vga_control = RREG32(mmD2VGA_CONTROL);
388 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
390 rom_cntl = RREG32_SMC(ixROM_CNTL);
393 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
394 if (adev->mode_info.num_crtc) {
395 /* Disable VGA mode */
396 WREG32(mmD1VGA_CONTROL,
397 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
398 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
399 WREG32(mmD2VGA_CONTROL,
400 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
401 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
402 WREG32(mmVGA_RENDER_CONTROL,
403 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
405 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
407 r = amdgpu_read_bios(adev);
410 WREG32(mmBUS_CNTL, bus_cntl);
411 if (adev->mode_info.num_crtc) {
412 WREG32(mmD1VGA_CONTROL, d1vga_control);
413 WREG32(mmD2VGA_CONTROL, d2vga_control);
414 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
416 WREG32_SMC(ixROM_CNTL, rom_cntl);
420 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
421 u8 *bios, u32 length_bytes)
429 if (length_bytes == 0)
431 /* APU vbios image is part of sbios image */
432 if (adev->flags & AMD_IS_APU)
435 dw_ptr = (u32 *)bios;
436 length_dw = ALIGN(length_bytes, 4) / 4;
437 /* take the smc lock since we are using the smc index */
438 spin_lock_irqsave(&adev->smc_idx_lock, flags);
439 /* set rom index to 0 */
440 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
441 WREG32(mmSMC_IND_DATA_11, 0);
442 /* set index to data for continous read */
443 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
444 for (i = 0; i < length_dw; i++)
445 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
446 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
451 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
461 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
462 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
464 {mmCP_STALLED_STAT1},
465 {mmCP_STALLED_STAT2},
466 {mmCP_STALLED_STAT3},
467 {mmCP_CPF_BUSY_STAT},
468 {mmCP_CPF_STALLED_STAT1},
470 {mmCP_CPC_BUSY_STAT},
471 {mmCP_CPC_STALLED_STAT1},
507 {mmGB_MACROTILE_MODE0},
508 {mmGB_MACROTILE_MODE1},
509 {mmGB_MACROTILE_MODE2},
510 {mmGB_MACROTILE_MODE3},
511 {mmGB_MACROTILE_MODE4},
512 {mmGB_MACROTILE_MODE5},
513 {mmGB_MACROTILE_MODE6},
514 {mmGB_MACROTILE_MODE7},
515 {mmGB_MACROTILE_MODE8},
516 {mmGB_MACROTILE_MODE9},
517 {mmGB_MACROTILE_MODE10},
518 {mmGB_MACROTILE_MODE11},
519 {mmGB_MACROTILE_MODE12},
520 {mmGB_MACROTILE_MODE13},
521 {mmGB_MACROTILE_MODE14},
522 {mmGB_MACROTILE_MODE15},
523 {mmCC_RB_BACKEND_DISABLE, true},
524 {mmGC_USER_RB_BACKEND_DISABLE, true},
525 {mmGB_BACKEND_MAP, false},
526 {mmPA_SC_RASTER_CONFIG, true},
527 {mmPA_SC_RASTER_CONFIG_1, true},
530 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
531 bool indexed, u32 se_num,
532 u32 sh_num, u32 reg_offset)
536 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
537 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
539 switch (reg_offset) {
540 case mmCC_RB_BACKEND_DISABLE:
541 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
542 case mmGC_USER_RB_BACKEND_DISABLE:
543 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
544 case mmPA_SC_RASTER_CONFIG:
545 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
546 case mmPA_SC_RASTER_CONFIG_1:
547 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
550 mutex_lock(&adev->grbm_idx_mutex);
551 if (se_num != 0xffffffff || sh_num != 0xffffffff)
552 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
554 val = RREG32(reg_offset);
556 if (se_num != 0xffffffff || sh_num != 0xffffffff)
557 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
558 mutex_unlock(&adev->grbm_idx_mutex);
563 switch (reg_offset) {
564 case mmGB_ADDR_CONFIG:
565 return adev->gfx.config.gb_addr_config;
566 case mmMC_ARB_RAMCFG:
567 return adev->gfx.config.mc_arb_ramcfg;
568 case mmGB_TILE_MODE0:
569 case mmGB_TILE_MODE1:
570 case mmGB_TILE_MODE2:
571 case mmGB_TILE_MODE3:
572 case mmGB_TILE_MODE4:
573 case mmGB_TILE_MODE5:
574 case mmGB_TILE_MODE6:
575 case mmGB_TILE_MODE7:
576 case mmGB_TILE_MODE8:
577 case mmGB_TILE_MODE9:
578 case mmGB_TILE_MODE10:
579 case mmGB_TILE_MODE11:
580 case mmGB_TILE_MODE12:
581 case mmGB_TILE_MODE13:
582 case mmGB_TILE_MODE14:
583 case mmGB_TILE_MODE15:
584 case mmGB_TILE_MODE16:
585 case mmGB_TILE_MODE17:
586 case mmGB_TILE_MODE18:
587 case mmGB_TILE_MODE19:
588 case mmGB_TILE_MODE20:
589 case mmGB_TILE_MODE21:
590 case mmGB_TILE_MODE22:
591 case mmGB_TILE_MODE23:
592 case mmGB_TILE_MODE24:
593 case mmGB_TILE_MODE25:
594 case mmGB_TILE_MODE26:
595 case mmGB_TILE_MODE27:
596 case mmGB_TILE_MODE28:
597 case mmGB_TILE_MODE29:
598 case mmGB_TILE_MODE30:
599 case mmGB_TILE_MODE31:
600 idx = (reg_offset - mmGB_TILE_MODE0);
601 return adev->gfx.config.tile_mode_array[idx];
602 case mmGB_MACROTILE_MODE0:
603 case mmGB_MACROTILE_MODE1:
604 case mmGB_MACROTILE_MODE2:
605 case mmGB_MACROTILE_MODE3:
606 case mmGB_MACROTILE_MODE4:
607 case mmGB_MACROTILE_MODE5:
608 case mmGB_MACROTILE_MODE6:
609 case mmGB_MACROTILE_MODE7:
610 case mmGB_MACROTILE_MODE8:
611 case mmGB_MACROTILE_MODE9:
612 case mmGB_MACROTILE_MODE10:
613 case mmGB_MACROTILE_MODE11:
614 case mmGB_MACROTILE_MODE12:
615 case mmGB_MACROTILE_MODE13:
616 case mmGB_MACROTILE_MODE14:
617 case mmGB_MACROTILE_MODE15:
618 idx = (reg_offset - mmGB_MACROTILE_MODE0);
619 return adev->gfx.config.macrotile_mode_array[idx];
621 return RREG32(reg_offset);
626 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
627 u32 sh_num, u32 reg_offset, u32 *value)
632 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
633 bool indexed = vi_allowed_read_registers[i].grbm_indexed;
635 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
638 *value = vi_get_register_value(adev, indexed, se_num, sh_num,
645 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
649 dev_info(adev->dev, "GPU pci config reset\n");
652 pci_clear_master(adev->pdev);
654 amdgpu_device_pci_config_reset(adev);
658 /* wait for asic to come out of reset */
659 for (i = 0; i < adev->usec_timeout; i++) {
660 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
662 pci_set_master(adev->pdev);
663 adev->has_hw_reset = true;
672 * vi_asic_pci_config_reset - soft reset GPU
674 * @adev: amdgpu_device pointer
676 * Use PCI Config method to reset the GPU.
678 * Returns 0 for success.
680 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
684 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
686 r = vi_gpu_pci_config_reset(adev);
688 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
693 static bool vi_asic_supports_baco(struct amdgpu_device *adev)
695 switch (adev->asic_type) {
702 return amdgpu_dpm_is_baco_supported(adev);
708 static enum amd_reset_method
709 vi_asic_reset_method(struct amdgpu_device *adev)
713 if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
714 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
715 return amdgpu_reset_method;
717 if (amdgpu_reset_method != -1)
718 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
719 amdgpu_reset_method);
721 switch (adev->asic_type) {
728 baco_reset = amdgpu_dpm_is_baco_supported(adev);
736 return AMD_RESET_METHOD_BACO;
738 return AMD_RESET_METHOD_LEGACY;
742 * vi_asic_reset - soft reset GPU
744 * @adev: amdgpu_device pointer
746 * Look up which blocks are hung and attempt
748 * Returns 0 for success.
750 static int vi_asic_reset(struct amdgpu_device *adev)
754 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
755 dev_info(adev->dev, "BACO reset\n");
756 r = amdgpu_dpm_baco_reset(adev);
758 dev_info(adev->dev, "PCI CONFIG reset\n");
759 r = vi_asic_pci_config_reset(adev);
765 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
767 return RREG32(mmCONFIG_MEMSIZE);
770 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
771 u32 cntl_reg, u32 status_reg)
774 struct atom_clock_dividers dividers;
777 r = amdgpu_atombios_get_clock_dividers(adev,
778 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
779 clock, false, ÷rs);
783 tmp = RREG32_SMC(cntl_reg);
785 if (adev->flags & AMD_IS_APU)
786 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
788 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
789 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
790 tmp |= dividers.post_divider;
791 WREG32_SMC(cntl_reg, tmp);
793 for (i = 0; i < 100; i++) {
794 tmp = RREG32_SMC(status_reg);
795 if (adev->flags & AMD_IS_APU) {
799 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
809 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
810 #define ixGNB_CLK1_STATUS 0xD822010C
811 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
812 #define ixGNB_CLK2_STATUS 0xD822012C
813 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
814 #define ixGNB_CLK3_STATUS 0xD822014C
816 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
820 if (adev->flags & AMD_IS_APU) {
821 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
825 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
829 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
833 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
841 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
844 struct atom_clock_dividers dividers;
851 if (adev->flags & AMD_IS_APU) {
852 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
853 reg_status = ixGNB_CLK3_STATUS;
854 status_mask = 0x00010000;
855 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
857 reg_ctrl = ixCG_ECLK_CNTL;
858 reg_status = ixCG_ECLK_STATUS;
859 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
860 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
863 r = amdgpu_atombios_get_clock_dividers(adev,
864 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
865 ecclk, false, ÷rs);
869 for (i = 0; i < 100; i++) {
870 if (RREG32_SMC(reg_status) & status_mask)
878 tmp = RREG32_SMC(reg_ctrl);
880 tmp |= dividers.post_divider;
881 WREG32_SMC(reg_ctrl, tmp);
883 for (i = 0; i < 100; i++) {
884 if (RREG32_SMC(reg_status) & status_mask)
895 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
897 if (pci_is_root_bus(adev->pdev->bus))
900 if (amdgpu_pcie_gen2 == 0)
903 if (adev->flags & AMD_IS_APU)
906 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
907 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
913 static void vi_program_aspm(struct amdgpu_device *adev)
916 if (amdgpu_aspm == 0)
922 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
927 /* not necessary on CZ */
928 if (adev->flags & AMD_IS_APU)
931 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
933 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
935 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
937 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
940 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
941 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
942 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
944 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
946 if (adev->flags & AMD_IS_APU)
947 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
948 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
950 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
951 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
954 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
956 if (!ring || !ring->funcs->emit_wreg) {
957 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
958 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
960 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
964 static void vi_invalidate_hdp(struct amdgpu_device *adev,
965 struct amdgpu_ring *ring)
967 if (!ring || !ring->funcs->emit_wreg) {
968 WREG32(mmHDP_DEBUG0, 1);
969 RREG32(mmHDP_DEBUG0);
971 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
975 static bool vi_need_full_reset(struct amdgpu_device *adev)
977 switch (adev->asic_type) {
980 /* CZ has hang issues with full reset at the moment */
984 /* XXX: soft reset should work on fiji and tonga */
991 /* change this when we support soft reset */
996 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
999 uint32_t perfctr = 0;
1000 uint64_t cnt0_of, cnt1_of;
1003 /* This reports 0 on APUs, so return to avoid writing/reading registers
1004 * that may or may not be different from their GPU counterparts
1006 if (adev->flags & AMD_IS_APU)
1009 /* Set the 2 events that we wish to watch, defined above */
1010 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1011 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1012 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1014 /* Write to enable desired perf counters */
1015 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1016 /* Zero out and enable the perf counters
1018 * Bit 0 = Start all counters(1)
1019 * Bit 2 = Global counter reset enable(1)
1021 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1025 /* Load the shadow and disable the perf counters
1027 * Bit 0 = Stop counters(0)
1028 * Bit 1 = Load the shadow counters(1)
1030 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1032 /* Read register values to get any >32bit overflow */
1033 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1034 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1035 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1037 /* Get the values and add the overflow */
1038 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1039 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1042 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1044 uint64_t nak_r, nak_g;
1046 /* Get the number of NAKs received and generated */
1047 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1048 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1050 /* Add the total number of NAKs, i.e the number of replays */
1051 return (nak_r + nak_g);
1054 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1058 if (adev->flags & AMD_IS_APU)
1061 /* check if the SMC is already running */
1062 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1063 pc = RREG32_SMC(ixSMC_PC_C);
1064 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1071 static void vi_pre_asic_init(struct amdgpu_device *adev)
1075 static const struct amdgpu_asic_funcs vi_asic_funcs =
1077 .read_disabled_bios = &vi_read_disabled_bios,
1078 .read_bios_from_rom = &vi_read_bios_from_rom,
1079 .read_register = &vi_read_register,
1080 .reset = &vi_asic_reset,
1081 .reset_method = &vi_asic_reset_method,
1082 .set_vga_state = &vi_vga_set_state,
1083 .get_xclk = &vi_get_xclk,
1084 .set_uvd_clocks = &vi_set_uvd_clocks,
1085 .set_vce_clocks = &vi_set_vce_clocks,
1086 .get_config_memsize = &vi_get_config_memsize,
1087 .flush_hdp = &vi_flush_hdp,
1088 .invalidate_hdp = &vi_invalidate_hdp,
1089 .need_full_reset = &vi_need_full_reset,
1090 .init_doorbell_index = &legacy_doorbell_index_init,
1091 .get_pcie_usage = &vi_get_pcie_usage,
1092 .need_reset_on_init = &vi_need_reset_on_init,
1093 .get_pcie_replay_count = &vi_get_pcie_replay_count,
1094 .supports_baco = &vi_asic_supports_baco,
1095 .pre_asic_init = &vi_pre_asic_init,
1098 #define CZ_REV_BRISTOL(rev) \
1099 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1101 static int vi_common_early_init(void *handle)
1103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1105 if (adev->flags & AMD_IS_APU) {
1106 adev->smc_rreg = &cz_smc_rreg;
1107 adev->smc_wreg = &cz_smc_wreg;
1109 adev->smc_rreg = &vi_smc_rreg;
1110 adev->smc_wreg = &vi_smc_wreg;
1112 adev->pcie_rreg = &vi_pcie_rreg;
1113 adev->pcie_wreg = &vi_pcie_wreg;
1114 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1115 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1116 adev->didt_rreg = &vi_didt_rreg;
1117 adev->didt_wreg = &vi_didt_wreg;
1118 adev->gc_cac_rreg = &vi_gc_cac_rreg;
1119 adev->gc_cac_wreg = &vi_gc_cac_wreg;
1121 adev->asic_funcs = &vi_asic_funcs;
1123 adev->rev_id = vi_get_rev_id(adev);
1124 adev->external_rev_id = 0xFF;
1125 switch (adev->asic_type) {
1129 adev->external_rev_id = 0x1;
1132 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1133 AMD_CG_SUPPORT_GFX_MGLS |
1134 AMD_CG_SUPPORT_GFX_RLC_LS |
1135 AMD_CG_SUPPORT_GFX_CP_LS |
1136 AMD_CG_SUPPORT_GFX_CGTS |
1137 AMD_CG_SUPPORT_GFX_CGTS_LS |
1138 AMD_CG_SUPPORT_GFX_CGCG |
1139 AMD_CG_SUPPORT_GFX_CGLS |
1140 AMD_CG_SUPPORT_SDMA_MGCG |
1141 AMD_CG_SUPPORT_SDMA_LS |
1142 AMD_CG_SUPPORT_BIF_LS |
1143 AMD_CG_SUPPORT_HDP_MGCG |
1144 AMD_CG_SUPPORT_HDP_LS |
1145 AMD_CG_SUPPORT_ROM_MGCG |
1146 AMD_CG_SUPPORT_MC_MGCG |
1147 AMD_CG_SUPPORT_MC_LS |
1148 AMD_CG_SUPPORT_UVD_MGCG;
1150 adev->external_rev_id = adev->rev_id + 0x3c;
1153 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1154 AMD_CG_SUPPORT_GFX_CGCG |
1155 AMD_CG_SUPPORT_GFX_CGLS |
1156 AMD_CG_SUPPORT_SDMA_MGCG |
1157 AMD_CG_SUPPORT_SDMA_LS |
1158 AMD_CG_SUPPORT_BIF_LS |
1159 AMD_CG_SUPPORT_HDP_MGCG |
1160 AMD_CG_SUPPORT_HDP_LS |
1161 AMD_CG_SUPPORT_ROM_MGCG |
1162 AMD_CG_SUPPORT_MC_MGCG |
1163 AMD_CG_SUPPORT_MC_LS |
1164 AMD_CG_SUPPORT_DRM_LS |
1165 AMD_CG_SUPPORT_UVD_MGCG;
1167 adev->external_rev_id = adev->rev_id + 0x14;
1169 case CHIP_POLARIS11:
1170 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1171 AMD_CG_SUPPORT_GFX_RLC_LS |
1172 AMD_CG_SUPPORT_GFX_CP_LS |
1173 AMD_CG_SUPPORT_GFX_CGCG |
1174 AMD_CG_SUPPORT_GFX_CGLS |
1175 AMD_CG_SUPPORT_GFX_3D_CGCG |
1176 AMD_CG_SUPPORT_GFX_3D_CGLS |
1177 AMD_CG_SUPPORT_SDMA_MGCG |
1178 AMD_CG_SUPPORT_SDMA_LS |
1179 AMD_CG_SUPPORT_BIF_MGCG |
1180 AMD_CG_SUPPORT_BIF_LS |
1181 AMD_CG_SUPPORT_HDP_MGCG |
1182 AMD_CG_SUPPORT_HDP_LS |
1183 AMD_CG_SUPPORT_ROM_MGCG |
1184 AMD_CG_SUPPORT_MC_MGCG |
1185 AMD_CG_SUPPORT_MC_LS |
1186 AMD_CG_SUPPORT_DRM_LS |
1187 AMD_CG_SUPPORT_UVD_MGCG |
1188 AMD_CG_SUPPORT_VCE_MGCG;
1190 adev->external_rev_id = adev->rev_id + 0x5A;
1192 case CHIP_POLARIS10:
1193 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1194 AMD_CG_SUPPORT_GFX_RLC_LS |
1195 AMD_CG_SUPPORT_GFX_CP_LS |
1196 AMD_CG_SUPPORT_GFX_CGCG |
1197 AMD_CG_SUPPORT_GFX_CGLS |
1198 AMD_CG_SUPPORT_GFX_3D_CGCG |
1199 AMD_CG_SUPPORT_GFX_3D_CGLS |
1200 AMD_CG_SUPPORT_SDMA_MGCG |
1201 AMD_CG_SUPPORT_SDMA_LS |
1202 AMD_CG_SUPPORT_BIF_MGCG |
1203 AMD_CG_SUPPORT_BIF_LS |
1204 AMD_CG_SUPPORT_HDP_MGCG |
1205 AMD_CG_SUPPORT_HDP_LS |
1206 AMD_CG_SUPPORT_ROM_MGCG |
1207 AMD_CG_SUPPORT_MC_MGCG |
1208 AMD_CG_SUPPORT_MC_LS |
1209 AMD_CG_SUPPORT_DRM_LS |
1210 AMD_CG_SUPPORT_UVD_MGCG |
1211 AMD_CG_SUPPORT_VCE_MGCG;
1213 adev->external_rev_id = adev->rev_id + 0x50;
1215 case CHIP_POLARIS12:
1216 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1217 AMD_CG_SUPPORT_GFX_RLC_LS |
1218 AMD_CG_SUPPORT_GFX_CP_LS |
1219 AMD_CG_SUPPORT_GFX_CGCG |
1220 AMD_CG_SUPPORT_GFX_CGLS |
1221 AMD_CG_SUPPORT_GFX_3D_CGCG |
1222 AMD_CG_SUPPORT_GFX_3D_CGLS |
1223 AMD_CG_SUPPORT_SDMA_MGCG |
1224 AMD_CG_SUPPORT_SDMA_LS |
1225 AMD_CG_SUPPORT_BIF_MGCG |
1226 AMD_CG_SUPPORT_BIF_LS |
1227 AMD_CG_SUPPORT_HDP_MGCG |
1228 AMD_CG_SUPPORT_HDP_LS |
1229 AMD_CG_SUPPORT_ROM_MGCG |
1230 AMD_CG_SUPPORT_MC_MGCG |
1231 AMD_CG_SUPPORT_MC_LS |
1232 AMD_CG_SUPPORT_DRM_LS |
1233 AMD_CG_SUPPORT_UVD_MGCG |
1234 AMD_CG_SUPPORT_VCE_MGCG;
1236 adev->external_rev_id = adev->rev_id + 0x64;
1240 /*AMD_CG_SUPPORT_GFX_MGCG |
1241 AMD_CG_SUPPORT_GFX_RLC_LS |
1242 AMD_CG_SUPPORT_GFX_CP_LS |
1243 AMD_CG_SUPPORT_GFX_CGCG |
1244 AMD_CG_SUPPORT_GFX_CGLS |
1245 AMD_CG_SUPPORT_GFX_3D_CGCG |
1246 AMD_CG_SUPPORT_GFX_3D_CGLS |
1247 AMD_CG_SUPPORT_SDMA_MGCG |
1248 AMD_CG_SUPPORT_SDMA_LS |
1249 AMD_CG_SUPPORT_BIF_MGCG |
1250 AMD_CG_SUPPORT_BIF_LS |
1251 AMD_CG_SUPPORT_HDP_MGCG |
1252 AMD_CG_SUPPORT_HDP_LS |
1253 AMD_CG_SUPPORT_ROM_MGCG |
1254 AMD_CG_SUPPORT_MC_MGCG |
1255 AMD_CG_SUPPORT_MC_LS |
1256 AMD_CG_SUPPORT_DRM_LS |
1257 AMD_CG_SUPPORT_UVD_MGCG |
1258 AMD_CG_SUPPORT_VCE_MGCG;*/
1260 adev->external_rev_id = adev->rev_id + 0x6E;
1263 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1264 AMD_CG_SUPPORT_GFX_MGCG |
1265 AMD_CG_SUPPORT_GFX_MGLS |
1266 AMD_CG_SUPPORT_GFX_RLC_LS |
1267 AMD_CG_SUPPORT_GFX_CP_LS |
1268 AMD_CG_SUPPORT_GFX_CGTS |
1269 AMD_CG_SUPPORT_GFX_CGTS_LS |
1270 AMD_CG_SUPPORT_GFX_CGCG |
1271 AMD_CG_SUPPORT_GFX_CGLS |
1272 AMD_CG_SUPPORT_BIF_LS |
1273 AMD_CG_SUPPORT_HDP_MGCG |
1274 AMD_CG_SUPPORT_HDP_LS |
1275 AMD_CG_SUPPORT_SDMA_MGCG |
1276 AMD_CG_SUPPORT_SDMA_LS |
1277 AMD_CG_SUPPORT_VCE_MGCG;
1278 /* rev0 hardware requires workarounds to support PG */
1280 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1281 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1282 AMD_PG_SUPPORT_GFX_PIPELINE |
1284 AMD_PG_SUPPORT_UVD |
1287 adev->external_rev_id = adev->rev_id + 0x1;
1290 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1291 AMD_CG_SUPPORT_GFX_MGCG |
1292 AMD_CG_SUPPORT_GFX_MGLS |
1293 AMD_CG_SUPPORT_GFX_RLC_LS |
1294 AMD_CG_SUPPORT_GFX_CP_LS |
1295 AMD_CG_SUPPORT_GFX_CGTS |
1296 AMD_CG_SUPPORT_GFX_CGTS_LS |
1297 AMD_CG_SUPPORT_GFX_CGLS |
1298 AMD_CG_SUPPORT_BIF_LS |
1299 AMD_CG_SUPPORT_HDP_MGCG |
1300 AMD_CG_SUPPORT_HDP_LS |
1301 AMD_CG_SUPPORT_SDMA_MGCG |
1302 AMD_CG_SUPPORT_SDMA_LS |
1303 AMD_CG_SUPPORT_VCE_MGCG;
1304 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1305 AMD_PG_SUPPORT_GFX_SMG |
1306 AMD_PG_SUPPORT_GFX_PIPELINE |
1308 AMD_PG_SUPPORT_UVD |
1310 adev->external_rev_id = adev->rev_id + 0x61;
1313 /* FIXME: not supported yet */
1317 if (amdgpu_sriov_vf(adev)) {
1318 amdgpu_virt_init_setting(adev);
1319 xgpu_vi_mailbox_set_irq_funcs(adev);
1325 static int vi_common_late_init(void *handle)
1327 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1329 if (amdgpu_sriov_vf(adev))
1330 xgpu_vi_mailbox_get_irq(adev);
1335 static int vi_common_sw_init(void *handle)
1337 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1339 if (amdgpu_sriov_vf(adev))
1340 xgpu_vi_mailbox_add_irq_id(adev);
1345 static int vi_common_sw_fini(void *handle)
1350 static int vi_common_hw_init(void *handle)
1352 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1354 /* move the golden regs per IP block */
1355 vi_init_golden_registers(adev);
1356 /* enable pcie gen2/3 link */
1357 vi_pcie_gen3_enable(adev);
1359 vi_program_aspm(adev);
1360 /* enable the doorbell aperture */
1361 vi_enable_doorbell_aperture(adev, true);
1366 static int vi_common_hw_fini(void *handle)
1368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1370 /* enable the doorbell aperture */
1371 vi_enable_doorbell_aperture(adev, false);
1373 if (amdgpu_sriov_vf(adev))
1374 xgpu_vi_mailbox_put_irq(adev);
1379 static int vi_common_suspend(void *handle)
1381 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1383 return vi_common_hw_fini(adev);
1386 static int vi_common_resume(void *handle)
1388 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1390 return vi_common_hw_init(adev);
1393 static bool vi_common_is_idle(void *handle)
1398 static int vi_common_wait_for_idle(void *handle)
1403 static int vi_common_soft_reset(void *handle)
1408 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1411 uint32_t temp, data;
1413 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1415 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1416 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1417 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1418 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1420 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1421 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1422 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1425 WREG32_PCIE(ixPCIE_CNTL2, data);
1428 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1431 uint32_t temp, data;
1433 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1435 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1436 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1438 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1441 WREG32(mmHDP_HOST_PATH_CNTL, data);
1444 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1447 uint32_t temp, data;
1449 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1451 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1452 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1454 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1457 WREG32(mmHDP_MEM_POWER_LS, data);
1460 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1463 uint32_t temp, data;
1465 temp = data = RREG32(0x157a);
1467 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1473 WREG32(0x157a, data);
1477 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1480 uint32_t temp, data;
1482 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1484 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1485 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1486 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1488 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1489 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1492 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1495 static int vi_common_set_clockgating_state_by_smu(void *handle,
1496 enum amd_clockgating_state state)
1498 uint32_t msg_id, pp_state = 0;
1499 uint32_t pp_support_state = 0;
1500 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1502 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1503 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1504 pp_support_state = PP_STATE_SUPPORT_LS;
1505 pp_state = PP_STATE_LS;
1507 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1508 pp_support_state |= PP_STATE_SUPPORT_CG;
1509 pp_state |= PP_STATE_CG;
1511 if (state == AMD_CG_STATE_UNGATE)
1513 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1517 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1520 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1521 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1522 pp_support_state = PP_STATE_SUPPORT_LS;
1523 pp_state = PP_STATE_LS;
1525 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1526 pp_support_state |= PP_STATE_SUPPORT_CG;
1527 pp_state |= PP_STATE_CG;
1529 if (state == AMD_CG_STATE_UNGATE)
1531 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1535 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1538 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1539 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1540 pp_support_state = PP_STATE_SUPPORT_LS;
1541 pp_state = PP_STATE_LS;
1543 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1544 pp_support_state |= PP_STATE_SUPPORT_CG;
1545 pp_state |= PP_STATE_CG;
1547 if (state == AMD_CG_STATE_UNGATE)
1549 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1553 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1557 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1558 if (state == AMD_CG_STATE_UNGATE)
1561 pp_state = PP_STATE_LS;
1563 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1565 PP_STATE_SUPPORT_LS,
1567 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1569 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1570 if (state == AMD_CG_STATE_UNGATE)
1573 pp_state = PP_STATE_CG;
1575 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1577 PP_STATE_SUPPORT_CG,
1579 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1582 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1584 if (state == AMD_CG_STATE_UNGATE)
1587 pp_state = PP_STATE_LS;
1589 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1591 PP_STATE_SUPPORT_LS,
1593 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1596 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1598 if (state == AMD_CG_STATE_UNGATE)
1601 pp_state = PP_STATE_CG;
1603 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1605 PP_STATE_SUPPORT_CG,
1607 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1612 static int vi_common_set_clockgating_state(void *handle,
1613 enum amd_clockgating_state state)
1615 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1617 if (amdgpu_sriov_vf(adev))
1620 switch (adev->asic_type) {
1622 vi_update_bif_medium_grain_light_sleep(adev,
1623 state == AMD_CG_STATE_GATE);
1624 vi_update_hdp_medium_grain_clock_gating(adev,
1625 state == AMD_CG_STATE_GATE);
1626 vi_update_hdp_light_sleep(adev,
1627 state == AMD_CG_STATE_GATE);
1628 vi_update_rom_medium_grain_clock_gating(adev,
1629 state == AMD_CG_STATE_GATE);
1633 vi_update_bif_medium_grain_light_sleep(adev,
1634 state == AMD_CG_STATE_GATE);
1635 vi_update_hdp_medium_grain_clock_gating(adev,
1636 state == AMD_CG_STATE_GATE);
1637 vi_update_hdp_light_sleep(adev,
1638 state == AMD_CG_STATE_GATE);
1639 vi_update_drm_light_sleep(adev,
1640 state == AMD_CG_STATE_GATE);
1643 case CHIP_POLARIS10:
1644 case CHIP_POLARIS11:
1645 case CHIP_POLARIS12:
1647 vi_common_set_clockgating_state_by_smu(adev, state);
1654 static int vi_common_set_powergating_state(void *handle,
1655 enum amd_powergating_state state)
1660 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1662 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1665 if (amdgpu_sriov_vf(adev))
1668 /* AMD_CG_SUPPORT_BIF_LS */
1669 data = RREG32_PCIE(ixPCIE_CNTL2);
1670 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1671 *flags |= AMD_CG_SUPPORT_BIF_LS;
1673 /* AMD_CG_SUPPORT_HDP_LS */
1674 data = RREG32(mmHDP_MEM_POWER_LS);
1675 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1676 *flags |= AMD_CG_SUPPORT_HDP_LS;
1678 /* AMD_CG_SUPPORT_HDP_MGCG */
1679 data = RREG32(mmHDP_HOST_PATH_CNTL);
1680 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1681 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1683 /* AMD_CG_SUPPORT_ROM_MGCG */
1684 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1685 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1686 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1689 static const struct amd_ip_funcs vi_common_ip_funcs = {
1690 .name = "vi_common",
1691 .early_init = vi_common_early_init,
1692 .late_init = vi_common_late_init,
1693 .sw_init = vi_common_sw_init,
1694 .sw_fini = vi_common_sw_fini,
1695 .hw_init = vi_common_hw_init,
1696 .hw_fini = vi_common_hw_fini,
1697 .suspend = vi_common_suspend,
1698 .resume = vi_common_resume,
1699 .is_idle = vi_common_is_idle,
1700 .wait_for_idle = vi_common_wait_for_idle,
1701 .soft_reset = vi_common_soft_reset,
1702 .set_clockgating_state = vi_common_set_clockgating_state,
1703 .set_powergating_state = vi_common_set_powergating_state,
1704 .get_clockgating_state = vi_common_get_clockgating_state,
1707 static const struct amdgpu_ip_block_version vi_common_ip_block =
1709 .type = AMD_IP_BLOCK_TYPE_COMMON,
1713 .funcs = &vi_common_ip_funcs,
1716 void vi_set_virt_ops(struct amdgpu_device *adev)
1718 adev->virt.ops = &xgpu_vi_virt_ops;
1721 int vi_set_ip_blocks(struct amdgpu_device *adev)
1723 switch (adev->asic_type) {
1725 /* topaz has no DCE, UVD, VCE */
1726 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1727 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1728 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1729 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1730 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1731 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1732 if (adev->enable_virtual_display)
1733 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1736 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1737 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1738 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1739 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1740 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1741 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1742 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1743 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1744 #if defined(CONFIG_DRM_AMD_DC)
1745 else if (amdgpu_device_has_dc_support(adev))
1746 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1749 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1750 if (!amdgpu_sriov_vf(adev)) {
1751 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1752 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1756 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1757 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1758 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1759 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1760 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1761 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1762 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1763 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1764 #if defined(CONFIG_DRM_AMD_DC)
1765 else if (amdgpu_device_has_dc_support(adev))
1766 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1769 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1770 if (!amdgpu_sriov_vf(adev)) {
1771 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1772 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1775 case CHIP_POLARIS10:
1776 case CHIP_POLARIS11:
1777 case CHIP_POLARIS12:
1779 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1780 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1781 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1782 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1783 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1784 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1785 if (adev->enable_virtual_display)
1786 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1787 #if defined(CONFIG_DRM_AMD_DC)
1788 else if (amdgpu_device_has_dc_support(adev))
1789 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1792 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1793 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1794 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1797 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1798 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1799 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1800 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1801 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1802 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1803 if (adev->enable_virtual_display)
1804 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1805 #if defined(CONFIG_DRM_AMD_DC)
1806 else if (amdgpu_device_has_dc_support(adev))
1807 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1810 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1811 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1812 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1813 #if defined(CONFIG_DRM_AMD_ACP)
1814 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1818 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1819 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1820 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1821 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1822 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1823 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1824 if (adev->enable_virtual_display)
1825 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1826 #if defined(CONFIG_DRM_AMD_DC)
1827 else if (amdgpu_device_has_dc_support(adev))
1828 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1831 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1832 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1833 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1834 #if defined(CONFIG_DRM_AMD_ACP)
1835 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1839 /* FIXME: not supported yet */
1846 void legacy_doorbell_index_init(struct amdgpu_device *adev)
1848 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1849 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1850 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1851 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1852 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1853 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1854 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1855 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1856 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1857 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1858 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1859 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1860 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1861 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;