2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
25 #include <linux/slab.h>
28 #include "amdgpu_atombios.h"
29 #include "amdgpu_ih.h"
30 #include "amdgpu_uvd.h"
31 #include "amdgpu_vce.h"
32 #include "amdgpu_ucode.h"
36 #include "gmc/gmc_8_1_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
39 #include "oss/oss_3_0_d.h"
40 #include "oss/oss_3_0_sh_mask.h"
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
45 #include "gca/gfx_8_0_d.h"
46 #include "gca/gfx_8_0_sh_mask.h"
48 #include "smu/smu_7_1_1_d.h"
49 #include "smu/smu_7_1_1_sh_mask.h"
51 #include "uvd/uvd_5_0_d.h"
52 #include "uvd/uvd_5_0_sh_mask.h"
54 #include "vce/vce_3_0_d.h"
55 #include "vce/vce_3_0_sh_mask.h"
57 #include "dce/dce_10_0_d.h"
58 #include "dce/dce_10_0_sh_mask.h"
66 #include "sdma_v2_4.h"
67 #include "sdma_v3_0.h"
68 #include "dce_v10_0.h"
69 #include "dce_v11_0.h"
70 #include "iceland_ih.h"
76 #if defined(CONFIG_DRM_AMD_ACP)
77 #include "amdgpu_acp.h"
79 #include "dce_virtual.h"
81 #include "amdgpu_dm.h"
84 * Indirect registers accessor
86 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
91 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
92 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
93 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
94 r = RREG32_NO_KIQ(mmPCIE_DATA);
95 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
99 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
103 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
104 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
105 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
106 WREG32_NO_KIQ(mmPCIE_DATA, v);
107 (void)RREG32_NO_KIQ(mmPCIE_DATA);
108 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
111 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
116 spin_lock_irqsave(&adev->smc_idx_lock, flags);
117 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
118 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
119 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
123 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
127 spin_lock_irqsave(&adev->smc_idx_lock, flags);
128 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
129 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
130 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
134 #define mmMP0PUB_IND_INDEX 0x180
135 #define mmMP0PUB_IND_DATA 0x181
137 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
142 spin_lock_irqsave(&adev->smc_idx_lock, flags);
143 WREG32(mmMP0PUB_IND_INDEX, (reg));
144 r = RREG32(mmMP0PUB_IND_DATA);
145 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
149 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
153 spin_lock_irqsave(&adev->smc_idx_lock, flags);
154 WREG32(mmMP0PUB_IND_INDEX, (reg));
155 WREG32(mmMP0PUB_IND_DATA, (v));
156 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
159 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
164 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
165 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
166 r = RREG32(mmUVD_CTX_DATA);
167 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
171 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
175 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
176 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
177 WREG32(mmUVD_CTX_DATA, (v));
178 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
181 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
186 spin_lock_irqsave(&adev->didt_idx_lock, flags);
187 WREG32(mmDIDT_IND_INDEX, (reg));
188 r = RREG32(mmDIDT_IND_DATA);
189 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
193 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
197 spin_lock_irqsave(&adev->didt_idx_lock, flags);
198 WREG32(mmDIDT_IND_INDEX, (reg));
199 WREG32(mmDIDT_IND_DATA, (v));
200 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
203 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
208 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
209 WREG32(mmGC_CAC_IND_INDEX, (reg));
210 r = RREG32(mmGC_CAC_IND_DATA);
211 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
215 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
219 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
220 WREG32(mmGC_CAC_IND_INDEX, (reg));
221 WREG32(mmGC_CAC_IND_DATA, (v));
222 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
226 static const u32 tonga_mgcg_cgcg_init[] =
228 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
229 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
230 mmPCIE_DATA, 0x000f0000, 0x00000000,
231 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
232 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
233 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
234 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
237 static const u32 fiji_mgcg_cgcg_init[] =
239 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
240 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
241 mmPCIE_DATA, 0x000f0000, 0x00000000,
242 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
243 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
244 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
245 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
248 static const u32 iceland_mgcg_cgcg_init[] =
250 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
251 mmPCIE_DATA, 0x000f0000, 0x00000000,
252 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
253 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
254 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
257 static const u32 cz_mgcg_cgcg_init[] =
259 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
260 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
261 mmPCIE_DATA, 0x000f0000, 0x00000000,
262 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
263 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
266 static const u32 stoney_mgcg_cgcg_init[] =
268 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
269 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
270 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
273 static void vi_init_golden_registers(struct amdgpu_device *adev)
275 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
276 mutex_lock(&adev->grbm_idx_mutex);
278 if (amdgpu_sriov_vf(adev)) {
279 xgpu_vi_init_golden_registers(adev);
280 mutex_unlock(&adev->grbm_idx_mutex);
284 switch (adev->asic_type) {
286 amdgpu_device_program_register_sequence(adev,
287 iceland_mgcg_cgcg_init,
288 ARRAY_SIZE(iceland_mgcg_cgcg_init));
291 amdgpu_device_program_register_sequence(adev,
293 ARRAY_SIZE(fiji_mgcg_cgcg_init));
296 amdgpu_device_program_register_sequence(adev,
297 tonga_mgcg_cgcg_init,
298 ARRAY_SIZE(tonga_mgcg_cgcg_init));
301 amdgpu_device_program_register_sequence(adev,
303 ARRAY_SIZE(cz_mgcg_cgcg_init));
306 amdgpu_device_program_register_sequence(adev,
307 stoney_mgcg_cgcg_init,
308 ARRAY_SIZE(stoney_mgcg_cgcg_init));
317 mutex_unlock(&adev->grbm_idx_mutex);
321 * vi_get_xclk - get the xclk
323 * @adev: amdgpu_device pointer
325 * Returns the reference clock used by the gfx engine
328 static u32 vi_get_xclk(struct amdgpu_device *adev)
330 u32 reference_clock = adev->clock.spll.reference_freq;
333 if (adev->flags & AMD_IS_APU)
334 return reference_clock;
336 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
337 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
340 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
341 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
342 return reference_clock / 4;
344 return reference_clock;
348 * vi_srbm_select - select specific register instances
350 * @adev: amdgpu_device pointer
351 * @me: selected ME (micro engine)
356 * Switches the currently active registers instances. Some
357 * registers are instanced per VMID, others are instanced per
358 * me/pipe/queue combination.
360 void vi_srbm_select(struct amdgpu_device *adev,
361 u32 me, u32 pipe, u32 queue, u32 vmid)
363 u32 srbm_gfx_cntl = 0;
364 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
365 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
366 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
367 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
368 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
371 static void vi_vga_set_state(struct amdgpu_device *adev, bool state)
376 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
379 u32 d1vga_control = 0;
380 u32 d2vga_control = 0;
381 u32 vga_render_control = 0;
385 bus_cntl = RREG32(mmBUS_CNTL);
386 if (adev->mode_info.num_crtc) {
387 d1vga_control = RREG32(mmD1VGA_CONTROL);
388 d2vga_control = RREG32(mmD2VGA_CONTROL);
389 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
391 rom_cntl = RREG32_SMC(ixROM_CNTL);
394 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
395 if (adev->mode_info.num_crtc) {
396 /* Disable VGA mode */
397 WREG32(mmD1VGA_CONTROL,
398 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
399 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
400 WREG32(mmD2VGA_CONTROL,
401 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
402 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
403 WREG32(mmVGA_RENDER_CONTROL,
404 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
406 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
408 r = amdgpu_read_bios(adev);
411 WREG32(mmBUS_CNTL, bus_cntl);
412 if (adev->mode_info.num_crtc) {
413 WREG32(mmD1VGA_CONTROL, d1vga_control);
414 WREG32(mmD2VGA_CONTROL, d2vga_control);
415 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
417 WREG32_SMC(ixROM_CNTL, rom_cntl);
421 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
422 u8 *bios, u32 length_bytes)
430 if (length_bytes == 0)
432 /* APU vbios image is part of sbios image */
433 if (adev->flags & AMD_IS_APU)
436 dw_ptr = (u32 *)bios;
437 length_dw = ALIGN(length_bytes, 4) / 4;
438 /* take the smc lock since we are using the smc index */
439 spin_lock_irqsave(&adev->smc_idx_lock, flags);
440 /* set rom index to 0 */
441 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
442 WREG32(mmSMC_IND_DATA_11, 0);
443 /* set index to data for continous read */
444 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
445 for (i = 0; i < length_dw; i++)
446 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
447 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
452 static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
456 if (adev->asic_type == CHIP_TONGA ||
457 adev->asic_type == CHIP_FIJI) {
458 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
459 /* bit0: 0 means pf and 1 means vf */
460 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
461 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
462 /* bit31: 0 means disable IOV and 1 means enable */
463 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
464 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
468 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
469 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
473 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
483 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
484 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
486 {mmCP_STALLED_STAT1},
487 {mmCP_STALLED_STAT2},
488 {mmCP_STALLED_STAT3},
489 {mmCP_CPF_BUSY_STAT},
490 {mmCP_CPF_STALLED_STAT1},
492 {mmCP_CPC_BUSY_STAT},
493 {mmCP_CPC_STALLED_STAT1},
529 {mmGB_MACROTILE_MODE0},
530 {mmGB_MACROTILE_MODE1},
531 {mmGB_MACROTILE_MODE2},
532 {mmGB_MACROTILE_MODE3},
533 {mmGB_MACROTILE_MODE4},
534 {mmGB_MACROTILE_MODE5},
535 {mmGB_MACROTILE_MODE6},
536 {mmGB_MACROTILE_MODE7},
537 {mmGB_MACROTILE_MODE8},
538 {mmGB_MACROTILE_MODE9},
539 {mmGB_MACROTILE_MODE10},
540 {mmGB_MACROTILE_MODE11},
541 {mmGB_MACROTILE_MODE12},
542 {mmGB_MACROTILE_MODE13},
543 {mmGB_MACROTILE_MODE14},
544 {mmGB_MACROTILE_MODE15},
545 {mmCC_RB_BACKEND_DISABLE, true},
546 {mmGC_USER_RB_BACKEND_DISABLE, true},
547 {mmGB_BACKEND_MAP, false},
548 {mmPA_SC_RASTER_CONFIG, true},
549 {mmPA_SC_RASTER_CONFIG_1, true},
552 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
553 bool indexed, u32 se_num,
554 u32 sh_num, u32 reg_offset)
558 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
559 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
561 switch (reg_offset) {
562 case mmCC_RB_BACKEND_DISABLE:
563 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
564 case mmGC_USER_RB_BACKEND_DISABLE:
565 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
566 case mmPA_SC_RASTER_CONFIG:
567 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
568 case mmPA_SC_RASTER_CONFIG_1:
569 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
572 mutex_lock(&adev->grbm_idx_mutex);
573 if (se_num != 0xffffffff || sh_num != 0xffffffff)
574 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
576 val = RREG32(reg_offset);
578 if (se_num != 0xffffffff || sh_num != 0xffffffff)
579 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
580 mutex_unlock(&adev->grbm_idx_mutex);
585 switch (reg_offset) {
586 case mmGB_ADDR_CONFIG:
587 return adev->gfx.config.gb_addr_config;
588 case mmMC_ARB_RAMCFG:
589 return adev->gfx.config.mc_arb_ramcfg;
590 case mmGB_TILE_MODE0:
591 case mmGB_TILE_MODE1:
592 case mmGB_TILE_MODE2:
593 case mmGB_TILE_MODE3:
594 case mmGB_TILE_MODE4:
595 case mmGB_TILE_MODE5:
596 case mmGB_TILE_MODE6:
597 case mmGB_TILE_MODE7:
598 case mmGB_TILE_MODE8:
599 case mmGB_TILE_MODE9:
600 case mmGB_TILE_MODE10:
601 case mmGB_TILE_MODE11:
602 case mmGB_TILE_MODE12:
603 case mmGB_TILE_MODE13:
604 case mmGB_TILE_MODE14:
605 case mmGB_TILE_MODE15:
606 case mmGB_TILE_MODE16:
607 case mmGB_TILE_MODE17:
608 case mmGB_TILE_MODE18:
609 case mmGB_TILE_MODE19:
610 case mmGB_TILE_MODE20:
611 case mmGB_TILE_MODE21:
612 case mmGB_TILE_MODE22:
613 case mmGB_TILE_MODE23:
614 case mmGB_TILE_MODE24:
615 case mmGB_TILE_MODE25:
616 case mmGB_TILE_MODE26:
617 case mmGB_TILE_MODE27:
618 case mmGB_TILE_MODE28:
619 case mmGB_TILE_MODE29:
620 case mmGB_TILE_MODE30:
621 case mmGB_TILE_MODE31:
622 idx = (reg_offset - mmGB_TILE_MODE0);
623 return adev->gfx.config.tile_mode_array[idx];
624 case mmGB_MACROTILE_MODE0:
625 case mmGB_MACROTILE_MODE1:
626 case mmGB_MACROTILE_MODE2:
627 case mmGB_MACROTILE_MODE3:
628 case mmGB_MACROTILE_MODE4:
629 case mmGB_MACROTILE_MODE5:
630 case mmGB_MACROTILE_MODE6:
631 case mmGB_MACROTILE_MODE7:
632 case mmGB_MACROTILE_MODE8:
633 case mmGB_MACROTILE_MODE9:
634 case mmGB_MACROTILE_MODE10:
635 case mmGB_MACROTILE_MODE11:
636 case mmGB_MACROTILE_MODE12:
637 case mmGB_MACROTILE_MODE13:
638 case mmGB_MACROTILE_MODE14:
639 case mmGB_MACROTILE_MODE15:
640 idx = (reg_offset - mmGB_MACROTILE_MODE0);
641 return adev->gfx.config.macrotile_mode_array[idx];
643 return RREG32(reg_offset);
648 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
649 u32 sh_num, u32 reg_offset, u32 *value)
654 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
655 bool indexed = vi_allowed_read_registers[i].grbm_indexed;
657 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
660 *value = vi_get_register_value(adev, indexed, se_num, sh_num,
667 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
671 dev_info(adev->dev, "GPU pci config reset\n");
674 pci_clear_master(adev->pdev);
676 amdgpu_device_pci_config_reset(adev);
680 /* wait for asic to come out of reset */
681 for (i = 0; i < adev->usec_timeout; i++) {
682 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
684 pci_set_master(adev->pdev);
685 adev->has_hw_reset = true;
694 * vi_asic_reset - soft reset GPU
696 * @adev: amdgpu_device pointer
698 * Look up which blocks are hung and attempt
700 * Returns 0 for success.
702 static int vi_asic_reset(struct amdgpu_device *adev)
706 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
708 r = vi_gpu_pci_config_reset(adev);
710 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
715 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
717 return RREG32(mmCONFIG_MEMSIZE);
720 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
721 u32 cntl_reg, u32 status_reg)
724 struct atom_clock_dividers dividers;
727 r = amdgpu_atombios_get_clock_dividers(adev,
728 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
729 clock, false, ÷rs);
733 tmp = RREG32_SMC(cntl_reg);
735 if (adev->flags & AMD_IS_APU)
736 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
738 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
739 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
740 tmp |= dividers.post_divider;
741 WREG32_SMC(cntl_reg, tmp);
743 for (i = 0; i < 100; i++) {
744 tmp = RREG32_SMC(status_reg);
745 if (adev->flags & AMD_IS_APU) {
749 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
759 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
760 #define ixGNB_CLK1_STATUS 0xD822010C
761 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
762 #define ixGNB_CLK2_STATUS 0xD822012C
763 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
764 #define ixGNB_CLK3_STATUS 0xD822014C
766 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
770 if (adev->flags & AMD_IS_APU) {
771 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
775 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
779 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
783 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
791 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
794 struct atom_clock_dividers dividers;
801 if (adev->flags & AMD_IS_APU) {
802 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
803 reg_status = ixGNB_CLK3_STATUS;
804 status_mask = 0x00010000;
805 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
807 reg_ctrl = ixCG_ECLK_CNTL;
808 reg_status = ixCG_ECLK_STATUS;
809 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
810 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
813 r = amdgpu_atombios_get_clock_dividers(adev,
814 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
815 ecclk, false, ÷rs);
819 for (i = 0; i < 100; i++) {
820 if (RREG32_SMC(reg_status) & status_mask)
828 tmp = RREG32_SMC(reg_ctrl);
830 tmp |= dividers.post_divider;
831 WREG32_SMC(reg_ctrl, tmp);
833 for (i = 0; i < 100; i++) {
834 if (RREG32_SMC(reg_status) & status_mask)
845 static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
847 if (pci_is_root_bus(adev->pdev->bus))
850 if (amdgpu_pcie_gen2 == 0)
853 if (adev->flags & AMD_IS_APU)
856 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
857 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
863 static void vi_program_aspm(struct amdgpu_device *adev)
866 if (amdgpu_aspm == 0)
872 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
877 /* not necessary on CZ */
878 if (adev->flags & AMD_IS_APU)
881 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
883 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
885 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
887 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
890 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
891 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
892 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
894 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
896 if (adev->flags & AMD_IS_APU)
897 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
898 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
900 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
901 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
904 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
906 if (!ring || !ring->funcs->emit_wreg) {
907 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
908 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
910 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
914 static void vi_invalidate_hdp(struct amdgpu_device *adev,
915 struct amdgpu_ring *ring)
917 if (!ring || !ring->funcs->emit_wreg) {
918 WREG32(mmHDP_DEBUG0, 1);
919 RREG32(mmHDP_DEBUG0);
921 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
925 static bool vi_need_full_reset(struct amdgpu_device *adev)
927 switch (adev->asic_type) {
930 /* CZ has hang issues with full reset at the moment */
934 /* XXX: soft reset should work on fiji and tonga */
941 /* change this when we support soft reset */
946 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
949 uint32_t perfctr = 0;
950 uint64_t cnt0_of, cnt1_of;
953 /* This reports 0 on APUs, so return to avoid writing/reading registers
954 * that may or may not be different from their GPU counterparts
956 if (adev->flags & AMD_IS_APU)
959 /* Set the 2 events that we wish to watch, defined above */
960 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
961 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
962 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
964 /* Write to enable desired perf counters */
965 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
966 /* Zero out and enable the perf counters
968 * Bit 0 = Start all counters(1)
969 * Bit 2 = Global counter reset enable(1)
971 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
975 /* Load the shadow and disable the perf counters
977 * Bit 0 = Stop counters(0)
978 * Bit 1 = Load the shadow counters(1)
980 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
982 /* Read register values to get any >32bit overflow */
983 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
984 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
985 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
987 /* Get the values and add the overflow */
988 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
989 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
992 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
994 uint64_t nak_r, nak_g;
996 /* Get the number of NAKs received and generated */
997 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
998 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1000 /* Add the total number of NAKs, i.e the number of replays */
1001 return (nak_r + nak_g);
1004 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1008 if (adev->flags & AMD_IS_APU)
1011 /* check if the SMC is already running */
1012 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1013 pc = RREG32_SMC(ixSMC_PC_C);
1014 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1021 static const struct amdgpu_asic_funcs vi_asic_funcs =
1023 .read_disabled_bios = &vi_read_disabled_bios,
1024 .read_bios_from_rom = &vi_read_bios_from_rom,
1025 .read_register = &vi_read_register,
1026 .reset = &vi_asic_reset,
1027 .set_vga_state = &vi_vga_set_state,
1028 .get_xclk = &vi_get_xclk,
1029 .set_uvd_clocks = &vi_set_uvd_clocks,
1030 .set_vce_clocks = &vi_set_vce_clocks,
1031 .get_config_memsize = &vi_get_config_memsize,
1032 .flush_hdp = &vi_flush_hdp,
1033 .invalidate_hdp = &vi_invalidate_hdp,
1034 .need_full_reset = &vi_need_full_reset,
1035 .init_doorbell_index = &legacy_doorbell_index_init,
1036 .get_pcie_usage = &vi_get_pcie_usage,
1037 .need_reset_on_init = &vi_need_reset_on_init,
1038 .get_pcie_replay_count = &vi_get_pcie_replay_count,
1041 #define CZ_REV_BRISTOL(rev) \
1042 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1044 static int vi_common_early_init(void *handle)
1046 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1048 if (adev->flags & AMD_IS_APU) {
1049 adev->smc_rreg = &cz_smc_rreg;
1050 adev->smc_wreg = &cz_smc_wreg;
1052 adev->smc_rreg = &vi_smc_rreg;
1053 adev->smc_wreg = &vi_smc_wreg;
1055 adev->pcie_rreg = &vi_pcie_rreg;
1056 adev->pcie_wreg = &vi_pcie_wreg;
1057 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1058 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1059 adev->didt_rreg = &vi_didt_rreg;
1060 adev->didt_wreg = &vi_didt_wreg;
1061 adev->gc_cac_rreg = &vi_gc_cac_rreg;
1062 adev->gc_cac_wreg = &vi_gc_cac_wreg;
1064 adev->asic_funcs = &vi_asic_funcs;
1066 adev->rev_id = vi_get_rev_id(adev);
1067 adev->external_rev_id = 0xFF;
1068 switch (adev->asic_type) {
1072 adev->external_rev_id = 0x1;
1075 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1076 AMD_CG_SUPPORT_GFX_MGLS |
1077 AMD_CG_SUPPORT_GFX_RLC_LS |
1078 AMD_CG_SUPPORT_GFX_CP_LS |
1079 AMD_CG_SUPPORT_GFX_CGTS |
1080 AMD_CG_SUPPORT_GFX_CGTS_LS |
1081 AMD_CG_SUPPORT_GFX_CGCG |
1082 AMD_CG_SUPPORT_GFX_CGLS |
1083 AMD_CG_SUPPORT_SDMA_MGCG |
1084 AMD_CG_SUPPORT_SDMA_LS |
1085 AMD_CG_SUPPORT_BIF_LS |
1086 AMD_CG_SUPPORT_HDP_MGCG |
1087 AMD_CG_SUPPORT_HDP_LS |
1088 AMD_CG_SUPPORT_ROM_MGCG |
1089 AMD_CG_SUPPORT_MC_MGCG |
1090 AMD_CG_SUPPORT_MC_LS |
1091 AMD_CG_SUPPORT_UVD_MGCG;
1093 adev->external_rev_id = adev->rev_id + 0x3c;
1096 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1097 AMD_CG_SUPPORT_GFX_CGCG |
1098 AMD_CG_SUPPORT_GFX_CGLS |
1099 AMD_CG_SUPPORT_SDMA_MGCG |
1100 AMD_CG_SUPPORT_SDMA_LS |
1101 AMD_CG_SUPPORT_BIF_LS |
1102 AMD_CG_SUPPORT_HDP_MGCG |
1103 AMD_CG_SUPPORT_HDP_LS |
1104 AMD_CG_SUPPORT_ROM_MGCG |
1105 AMD_CG_SUPPORT_MC_MGCG |
1106 AMD_CG_SUPPORT_MC_LS |
1107 AMD_CG_SUPPORT_DRM_LS |
1108 AMD_CG_SUPPORT_UVD_MGCG;
1110 adev->external_rev_id = adev->rev_id + 0x14;
1112 case CHIP_POLARIS11:
1113 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1114 AMD_CG_SUPPORT_GFX_RLC_LS |
1115 AMD_CG_SUPPORT_GFX_CP_LS |
1116 AMD_CG_SUPPORT_GFX_CGCG |
1117 AMD_CG_SUPPORT_GFX_CGLS |
1118 AMD_CG_SUPPORT_GFX_3D_CGCG |
1119 AMD_CG_SUPPORT_GFX_3D_CGLS |
1120 AMD_CG_SUPPORT_SDMA_MGCG |
1121 AMD_CG_SUPPORT_SDMA_LS |
1122 AMD_CG_SUPPORT_BIF_MGCG |
1123 AMD_CG_SUPPORT_BIF_LS |
1124 AMD_CG_SUPPORT_HDP_MGCG |
1125 AMD_CG_SUPPORT_HDP_LS |
1126 AMD_CG_SUPPORT_ROM_MGCG |
1127 AMD_CG_SUPPORT_MC_MGCG |
1128 AMD_CG_SUPPORT_MC_LS |
1129 AMD_CG_SUPPORT_DRM_LS |
1130 AMD_CG_SUPPORT_UVD_MGCG |
1131 AMD_CG_SUPPORT_VCE_MGCG;
1133 adev->external_rev_id = adev->rev_id + 0x5A;
1135 case CHIP_POLARIS10:
1136 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1137 AMD_CG_SUPPORT_GFX_RLC_LS |
1138 AMD_CG_SUPPORT_GFX_CP_LS |
1139 AMD_CG_SUPPORT_GFX_CGCG |
1140 AMD_CG_SUPPORT_GFX_CGLS |
1141 AMD_CG_SUPPORT_GFX_3D_CGCG |
1142 AMD_CG_SUPPORT_GFX_3D_CGLS |
1143 AMD_CG_SUPPORT_SDMA_MGCG |
1144 AMD_CG_SUPPORT_SDMA_LS |
1145 AMD_CG_SUPPORT_BIF_MGCG |
1146 AMD_CG_SUPPORT_BIF_LS |
1147 AMD_CG_SUPPORT_HDP_MGCG |
1148 AMD_CG_SUPPORT_HDP_LS |
1149 AMD_CG_SUPPORT_ROM_MGCG |
1150 AMD_CG_SUPPORT_MC_MGCG |
1151 AMD_CG_SUPPORT_MC_LS |
1152 AMD_CG_SUPPORT_DRM_LS |
1153 AMD_CG_SUPPORT_UVD_MGCG |
1154 AMD_CG_SUPPORT_VCE_MGCG;
1156 adev->external_rev_id = adev->rev_id + 0x50;
1158 case CHIP_POLARIS12:
1159 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1160 AMD_CG_SUPPORT_GFX_RLC_LS |
1161 AMD_CG_SUPPORT_GFX_CP_LS |
1162 AMD_CG_SUPPORT_GFX_CGCG |
1163 AMD_CG_SUPPORT_GFX_CGLS |
1164 AMD_CG_SUPPORT_GFX_3D_CGCG |
1165 AMD_CG_SUPPORT_GFX_3D_CGLS |
1166 AMD_CG_SUPPORT_SDMA_MGCG |
1167 AMD_CG_SUPPORT_SDMA_LS |
1168 AMD_CG_SUPPORT_BIF_MGCG |
1169 AMD_CG_SUPPORT_BIF_LS |
1170 AMD_CG_SUPPORT_HDP_MGCG |
1171 AMD_CG_SUPPORT_HDP_LS |
1172 AMD_CG_SUPPORT_ROM_MGCG |
1173 AMD_CG_SUPPORT_MC_MGCG |
1174 AMD_CG_SUPPORT_MC_LS |
1175 AMD_CG_SUPPORT_DRM_LS |
1176 AMD_CG_SUPPORT_UVD_MGCG |
1177 AMD_CG_SUPPORT_VCE_MGCG;
1179 adev->external_rev_id = adev->rev_id + 0x64;
1183 /*AMD_CG_SUPPORT_GFX_MGCG |
1184 AMD_CG_SUPPORT_GFX_RLC_LS |
1185 AMD_CG_SUPPORT_GFX_CP_LS |
1186 AMD_CG_SUPPORT_GFX_CGCG |
1187 AMD_CG_SUPPORT_GFX_CGLS |
1188 AMD_CG_SUPPORT_GFX_3D_CGCG |
1189 AMD_CG_SUPPORT_GFX_3D_CGLS |
1190 AMD_CG_SUPPORT_SDMA_MGCG |
1191 AMD_CG_SUPPORT_SDMA_LS |
1192 AMD_CG_SUPPORT_BIF_MGCG |
1193 AMD_CG_SUPPORT_BIF_LS |
1194 AMD_CG_SUPPORT_HDP_MGCG |
1195 AMD_CG_SUPPORT_HDP_LS |
1196 AMD_CG_SUPPORT_ROM_MGCG |
1197 AMD_CG_SUPPORT_MC_MGCG |
1198 AMD_CG_SUPPORT_MC_LS |
1199 AMD_CG_SUPPORT_DRM_LS |
1200 AMD_CG_SUPPORT_UVD_MGCG |
1201 AMD_CG_SUPPORT_VCE_MGCG;*/
1203 adev->external_rev_id = adev->rev_id + 0x6E;
1206 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1207 AMD_CG_SUPPORT_GFX_MGCG |
1208 AMD_CG_SUPPORT_GFX_MGLS |
1209 AMD_CG_SUPPORT_GFX_RLC_LS |
1210 AMD_CG_SUPPORT_GFX_CP_LS |
1211 AMD_CG_SUPPORT_GFX_CGTS |
1212 AMD_CG_SUPPORT_GFX_CGTS_LS |
1213 AMD_CG_SUPPORT_GFX_CGCG |
1214 AMD_CG_SUPPORT_GFX_CGLS |
1215 AMD_CG_SUPPORT_BIF_LS |
1216 AMD_CG_SUPPORT_HDP_MGCG |
1217 AMD_CG_SUPPORT_HDP_LS |
1218 AMD_CG_SUPPORT_SDMA_MGCG |
1219 AMD_CG_SUPPORT_SDMA_LS |
1220 AMD_CG_SUPPORT_VCE_MGCG;
1221 /* rev0 hardware requires workarounds to support PG */
1223 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1224 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1225 AMD_PG_SUPPORT_GFX_PIPELINE |
1227 AMD_PG_SUPPORT_UVD |
1230 adev->external_rev_id = adev->rev_id + 0x1;
1233 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1234 AMD_CG_SUPPORT_GFX_MGCG |
1235 AMD_CG_SUPPORT_GFX_MGLS |
1236 AMD_CG_SUPPORT_GFX_RLC_LS |
1237 AMD_CG_SUPPORT_GFX_CP_LS |
1238 AMD_CG_SUPPORT_GFX_CGTS |
1239 AMD_CG_SUPPORT_GFX_CGTS_LS |
1240 AMD_CG_SUPPORT_GFX_CGLS |
1241 AMD_CG_SUPPORT_BIF_LS |
1242 AMD_CG_SUPPORT_HDP_MGCG |
1243 AMD_CG_SUPPORT_HDP_LS |
1244 AMD_CG_SUPPORT_SDMA_MGCG |
1245 AMD_CG_SUPPORT_SDMA_LS |
1246 AMD_CG_SUPPORT_VCE_MGCG;
1247 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1248 AMD_PG_SUPPORT_GFX_SMG |
1249 AMD_PG_SUPPORT_GFX_PIPELINE |
1251 AMD_PG_SUPPORT_UVD |
1253 adev->external_rev_id = adev->rev_id + 0x61;
1256 /* FIXME: not supported yet */
1260 if (amdgpu_sriov_vf(adev)) {
1261 amdgpu_virt_init_setting(adev);
1262 xgpu_vi_mailbox_set_irq_funcs(adev);
1268 static int vi_common_late_init(void *handle)
1270 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1272 if (amdgpu_sriov_vf(adev))
1273 xgpu_vi_mailbox_get_irq(adev);
1278 static int vi_common_sw_init(void *handle)
1280 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1282 if (amdgpu_sriov_vf(adev))
1283 xgpu_vi_mailbox_add_irq_id(adev);
1288 static int vi_common_sw_fini(void *handle)
1293 static int vi_common_hw_init(void *handle)
1295 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1297 /* move the golden regs per IP block */
1298 vi_init_golden_registers(adev);
1299 /* enable pcie gen2/3 link */
1300 vi_pcie_gen3_enable(adev);
1302 vi_program_aspm(adev);
1303 /* enable the doorbell aperture */
1304 vi_enable_doorbell_aperture(adev, true);
1309 static int vi_common_hw_fini(void *handle)
1311 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1313 /* enable the doorbell aperture */
1314 vi_enable_doorbell_aperture(adev, false);
1316 if (amdgpu_sriov_vf(adev))
1317 xgpu_vi_mailbox_put_irq(adev);
1322 static int vi_common_suspend(void *handle)
1324 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1326 return vi_common_hw_fini(adev);
1329 static int vi_common_resume(void *handle)
1331 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1333 return vi_common_hw_init(adev);
1336 static bool vi_common_is_idle(void *handle)
1341 static int vi_common_wait_for_idle(void *handle)
1346 static int vi_common_soft_reset(void *handle)
1351 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1354 uint32_t temp, data;
1356 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1358 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1359 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1360 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1361 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1363 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1364 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1365 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1368 WREG32_PCIE(ixPCIE_CNTL2, data);
1371 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1374 uint32_t temp, data;
1376 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1378 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1379 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1381 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1384 WREG32(mmHDP_HOST_PATH_CNTL, data);
1387 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1390 uint32_t temp, data;
1392 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1394 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1395 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1397 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1400 WREG32(mmHDP_MEM_POWER_LS, data);
1403 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1406 uint32_t temp, data;
1408 temp = data = RREG32(0x157a);
1410 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1416 WREG32(0x157a, data);
1420 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1423 uint32_t temp, data;
1425 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1427 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1428 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1429 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1431 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1432 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1435 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1438 static int vi_common_set_clockgating_state_by_smu(void *handle,
1439 enum amd_clockgating_state state)
1441 uint32_t msg_id, pp_state = 0;
1442 uint32_t pp_support_state = 0;
1443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1446 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1447 pp_support_state = PP_STATE_SUPPORT_LS;
1448 pp_state = PP_STATE_LS;
1450 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1451 pp_support_state |= PP_STATE_SUPPORT_CG;
1452 pp_state |= PP_STATE_CG;
1454 if (state == AMD_CG_STATE_UNGATE)
1456 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1460 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1461 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1464 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1465 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1466 pp_support_state = PP_STATE_SUPPORT_LS;
1467 pp_state = PP_STATE_LS;
1469 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1470 pp_support_state |= PP_STATE_SUPPORT_CG;
1471 pp_state |= PP_STATE_CG;
1473 if (state == AMD_CG_STATE_UNGATE)
1475 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1479 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1480 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1483 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1484 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1485 pp_support_state = PP_STATE_SUPPORT_LS;
1486 pp_state = PP_STATE_LS;
1488 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1489 pp_support_state |= PP_STATE_SUPPORT_CG;
1490 pp_state |= PP_STATE_CG;
1492 if (state == AMD_CG_STATE_UNGATE)
1494 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1498 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1499 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1503 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1504 if (state == AMD_CG_STATE_UNGATE)
1507 pp_state = PP_STATE_LS;
1509 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1511 PP_STATE_SUPPORT_LS,
1513 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1514 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1516 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1517 if (state == AMD_CG_STATE_UNGATE)
1520 pp_state = PP_STATE_CG;
1522 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1524 PP_STATE_SUPPORT_CG,
1526 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1527 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1530 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1532 if (state == AMD_CG_STATE_UNGATE)
1535 pp_state = PP_STATE_LS;
1537 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1539 PP_STATE_SUPPORT_LS,
1541 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1542 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1545 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1547 if (state == AMD_CG_STATE_UNGATE)
1550 pp_state = PP_STATE_CG;
1552 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1554 PP_STATE_SUPPORT_CG,
1556 if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
1557 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1562 static int vi_common_set_clockgating_state(void *handle,
1563 enum amd_clockgating_state state)
1565 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1567 if (amdgpu_sriov_vf(adev))
1570 switch (adev->asic_type) {
1572 vi_update_bif_medium_grain_light_sleep(adev,
1573 state == AMD_CG_STATE_GATE);
1574 vi_update_hdp_medium_grain_clock_gating(adev,
1575 state == AMD_CG_STATE_GATE);
1576 vi_update_hdp_light_sleep(adev,
1577 state == AMD_CG_STATE_GATE);
1578 vi_update_rom_medium_grain_clock_gating(adev,
1579 state == AMD_CG_STATE_GATE);
1583 vi_update_bif_medium_grain_light_sleep(adev,
1584 state == AMD_CG_STATE_GATE);
1585 vi_update_hdp_medium_grain_clock_gating(adev,
1586 state == AMD_CG_STATE_GATE);
1587 vi_update_hdp_light_sleep(adev,
1588 state == AMD_CG_STATE_GATE);
1589 vi_update_drm_light_sleep(adev,
1590 state == AMD_CG_STATE_GATE);
1593 case CHIP_POLARIS10:
1594 case CHIP_POLARIS11:
1595 case CHIP_POLARIS12:
1597 vi_common_set_clockgating_state_by_smu(adev, state);
1604 static int vi_common_set_powergating_state(void *handle,
1605 enum amd_powergating_state state)
1610 static void vi_common_get_clockgating_state(void *handle, u32 *flags)
1612 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1615 if (amdgpu_sriov_vf(adev))
1618 /* AMD_CG_SUPPORT_BIF_LS */
1619 data = RREG32_PCIE(ixPCIE_CNTL2);
1620 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
1621 *flags |= AMD_CG_SUPPORT_BIF_LS;
1623 /* AMD_CG_SUPPORT_HDP_LS */
1624 data = RREG32(mmHDP_MEM_POWER_LS);
1625 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
1626 *flags |= AMD_CG_SUPPORT_HDP_LS;
1628 /* AMD_CG_SUPPORT_HDP_MGCG */
1629 data = RREG32(mmHDP_HOST_PATH_CNTL);
1630 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
1631 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
1633 /* AMD_CG_SUPPORT_ROM_MGCG */
1634 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1635 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
1636 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
1639 static const struct amd_ip_funcs vi_common_ip_funcs = {
1640 .name = "vi_common",
1641 .early_init = vi_common_early_init,
1642 .late_init = vi_common_late_init,
1643 .sw_init = vi_common_sw_init,
1644 .sw_fini = vi_common_sw_fini,
1645 .hw_init = vi_common_hw_init,
1646 .hw_fini = vi_common_hw_fini,
1647 .suspend = vi_common_suspend,
1648 .resume = vi_common_resume,
1649 .is_idle = vi_common_is_idle,
1650 .wait_for_idle = vi_common_wait_for_idle,
1651 .soft_reset = vi_common_soft_reset,
1652 .set_clockgating_state = vi_common_set_clockgating_state,
1653 .set_powergating_state = vi_common_set_powergating_state,
1654 .get_clockgating_state = vi_common_get_clockgating_state,
1657 static const struct amdgpu_ip_block_version vi_common_ip_block =
1659 .type = AMD_IP_BLOCK_TYPE_COMMON,
1663 .funcs = &vi_common_ip_funcs,
1666 int vi_set_ip_blocks(struct amdgpu_device *adev)
1668 /* in early init stage, vbios code won't work */
1669 vi_detect_hw_virtualization(adev);
1671 if (amdgpu_sriov_vf(adev))
1672 adev->virt.ops = &xgpu_vi_virt_ops;
1674 switch (adev->asic_type) {
1676 /* topaz has no DCE, UVD, VCE */
1677 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1678 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
1679 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
1680 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1681 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
1682 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1683 if (adev->enable_virtual_display)
1684 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1687 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1688 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
1689 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1690 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1691 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1692 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1693 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1694 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1695 #if defined(CONFIG_DRM_AMD_DC)
1696 else if (amdgpu_device_has_dc_support(adev))
1697 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1700 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
1701 if (!amdgpu_sriov_vf(adev)) {
1702 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1703 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1707 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1708 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1709 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1710 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1711 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1712 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1713 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
1714 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1715 #if defined(CONFIG_DRM_AMD_DC)
1716 else if (amdgpu_device_has_dc_support(adev))
1717 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1720 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
1721 if (!amdgpu_sriov_vf(adev)) {
1722 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
1723 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
1726 case CHIP_POLARIS10:
1727 case CHIP_POLARIS11:
1728 case CHIP_POLARIS12:
1730 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1731 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
1732 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
1733 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1734 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
1735 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1736 if (adev->enable_virtual_display)
1737 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1738 #if defined(CONFIG_DRM_AMD_DC)
1739 else if (amdgpu_device_has_dc_support(adev))
1740 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1743 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
1744 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
1745 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1748 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1749 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1750 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1751 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
1752 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1753 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1754 if (adev->enable_virtual_display)
1755 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1756 #if defined(CONFIG_DRM_AMD_DC)
1757 else if (amdgpu_device_has_dc_support(adev))
1758 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1761 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1762 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
1763 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
1764 #if defined(CONFIG_DRM_AMD_ACP)
1765 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1769 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
1770 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
1771 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
1772 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
1773 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
1774 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1775 if (adev->enable_virtual_display)
1776 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1777 #if defined(CONFIG_DRM_AMD_DC)
1778 else if (amdgpu_device_has_dc_support(adev))
1779 amdgpu_device_ip_block_add(adev, &dm_ip_block);
1782 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
1783 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
1784 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
1785 #if defined(CONFIG_DRM_AMD_ACP)
1786 amdgpu_device_ip_block_add(adev, &acp_ip_block);
1790 /* FIXME: not supported yet */
1797 void legacy_doorbell_index_init(struct amdgpu_device *adev)
1799 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
1800 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
1801 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
1802 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
1803 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
1804 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
1805 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
1806 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
1807 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
1808 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
1809 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
1810 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
1811 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
1812 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;