2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include <drm/amdgpu_drm.h>
30 #include "amdgpu_atombios.h"
31 #include "amdgpu_ih.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
34 #include "amdgpu_ucode.h"
38 #include "gmc/gmc_8_1_d.h"
39 #include "gmc/gmc_8_1_sh_mask.h"
41 #include "oss/oss_3_0_d.h"
42 #include "oss/oss_3_0_sh_mask.h"
44 #include "bif/bif_5_0_d.h"
45 #include "bif/bif_5_0_sh_mask.h"
47 #include "gca/gfx_8_0_d.h"
48 #include "gca/gfx_8_0_sh_mask.h"
50 #include "smu/smu_7_1_1_d.h"
51 #include "smu/smu_7_1_1_sh_mask.h"
53 #include "uvd/uvd_5_0_d.h"
54 #include "uvd/uvd_5_0_sh_mask.h"
56 #include "vce/vce_3_0_d.h"
57 #include "vce/vce_3_0_sh_mask.h"
59 #include "dce/dce_10_0_d.h"
60 #include "dce/dce_10_0_sh_mask.h"
67 #include "sdma_v2_4.h"
68 #include "sdma_v3_0.h"
69 #include "dce_v10_0.h"
70 #include "dce_v11_0.h"
71 #include "iceland_ih.h"
77 #if defined(CONFIG_DRM_AMD_ACP)
78 #include "amdgpu_acp.h"
80 #include "amdgpu_vkms.h"
82 #include "amdgpu_dm.h"
84 #define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
85 #define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
86 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
87 #define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
88 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
89 #define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
90 #define ixPCIE_L1_PM_SUB_CNTL 0x378
91 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
92 #define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
93 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
94 #define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
95 #define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
97 #define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
98 #define ixCPM_CONTROL 0x1400118
99 #define ixPCIE_LC_CNTL7 0x100100BC
100 #define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L
101 #define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
102 #define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009
103 #define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
104 #define PCIE_L1_PM_SUB_CNTL 0x378
105 #define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
106 (asic_type <= CHIP_POLARIS12) && \
109 static const struct amdgpu_video_codecs topaz_video_codecs_encode =
115 /* Tonga, CZ, ST, Fiji */
116 static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
119 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
122 .max_pixels_per_frame = 4096 * 2304,
127 static const struct amdgpu_video_codecs tonga_video_codecs_encode =
129 .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
130 .codec_array = tonga_video_codecs_encode_array,
134 static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
137 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
140 .max_pixels_per_frame = 4096 * 4096,
144 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
147 .max_pixels_per_frame = 4096 * 4096,
152 static const struct amdgpu_video_codecs polaris_video_codecs_encode =
154 .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
155 .codec_array = polaris_video_codecs_encode_array,
159 static const struct amdgpu_video_codecs topaz_video_codecs_decode =
166 static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
169 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
172 .max_pixels_per_frame = 4096 * 4096,
176 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
179 .max_pixels_per_frame = 4096 * 4096,
183 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
186 .max_pixels_per_frame = 4096 * 4096,
190 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
193 .max_pixels_per_frame = 4096 * 4096,
198 static const struct amdgpu_video_codecs tonga_video_codecs_decode =
200 .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
201 .codec_array = tonga_video_codecs_decode_array,
204 /* CZ, ST, Fiji, Polaris */
205 static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
208 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
211 .max_pixels_per_frame = 4096 * 4096,
215 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
218 .max_pixels_per_frame = 4096 * 4096,
222 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
225 .max_pixels_per_frame = 4096 * 4096,
229 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
232 .max_pixels_per_frame = 4096 * 4096,
236 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
239 .max_pixels_per_frame = 4096 * 4096,
243 .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
246 .max_pixels_per_frame = 4096 * 4096,
251 static const struct amdgpu_video_codecs cz_video_codecs_decode =
253 .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
254 .codec_array = cz_video_codecs_decode_array,
257 static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
258 const struct amdgpu_video_codecs **codecs)
260 switch (adev->asic_type) {
263 *codecs = &topaz_video_codecs_encode;
265 *codecs = &topaz_video_codecs_decode;
269 *codecs = &tonga_video_codecs_encode;
271 *codecs = &tonga_video_codecs_decode;
278 *codecs = &polaris_video_codecs_encode;
280 *codecs = &cz_video_codecs_decode;
286 *codecs = &tonga_video_codecs_encode;
288 *codecs = &cz_video_codecs_decode;
296 * Indirect registers accessor
298 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
303 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
304 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
305 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
306 r = RREG32_NO_KIQ(mmPCIE_DATA);
307 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
311 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
315 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
316 WREG32_NO_KIQ(mmPCIE_INDEX, reg);
317 (void)RREG32_NO_KIQ(mmPCIE_INDEX);
318 WREG32_NO_KIQ(mmPCIE_DATA, v);
319 (void)RREG32_NO_KIQ(mmPCIE_DATA);
320 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
323 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
328 spin_lock_irqsave(&adev->smc_idx_lock, flags);
329 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
330 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
331 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
335 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
339 spin_lock_irqsave(&adev->smc_idx_lock, flags);
340 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
341 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
342 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
346 #define mmMP0PUB_IND_INDEX 0x180
347 #define mmMP0PUB_IND_DATA 0x181
349 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg)
354 spin_lock_irqsave(&adev->smc_idx_lock, flags);
355 WREG32(mmMP0PUB_IND_INDEX, (reg));
356 r = RREG32(mmMP0PUB_IND_DATA);
357 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
361 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
365 spin_lock_irqsave(&adev->smc_idx_lock, flags);
366 WREG32(mmMP0PUB_IND_INDEX, (reg));
367 WREG32(mmMP0PUB_IND_DATA, (v));
368 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
371 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
376 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
377 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
378 r = RREG32(mmUVD_CTX_DATA);
379 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
383 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
387 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags);
388 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff));
389 WREG32(mmUVD_CTX_DATA, (v));
390 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags);
393 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg)
398 spin_lock_irqsave(&adev->didt_idx_lock, flags);
399 WREG32(mmDIDT_IND_INDEX, (reg));
400 r = RREG32(mmDIDT_IND_DATA);
401 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
405 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
409 spin_lock_irqsave(&adev->didt_idx_lock, flags);
410 WREG32(mmDIDT_IND_INDEX, (reg));
411 WREG32(mmDIDT_IND_DATA, (v));
412 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
415 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
420 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
421 WREG32(mmGC_CAC_IND_INDEX, (reg));
422 r = RREG32(mmGC_CAC_IND_DATA);
423 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
427 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
431 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
432 WREG32(mmGC_CAC_IND_INDEX, (reg));
433 WREG32(mmGC_CAC_IND_DATA, (v));
434 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
438 static const u32 tonga_mgcg_cgcg_init[] =
440 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
441 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
442 mmPCIE_DATA, 0x000f0000, 0x00000000,
443 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
444 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
445 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
446 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
449 static const u32 fiji_mgcg_cgcg_init[] =
451 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
452 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
453 mmPCIE_DATA, 0x000f0000, 0x00000000,
454 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
455 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
456 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
457 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
460 static const u32 iceland_mgcg_cgcg_init[] =
462 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2,
463 mmPCIE_DATA, 0x000f0000, 0x00000000,
464 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0,
465 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
466 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
469 static const u32 cz_mgcg_cgcg_init[] =
471 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
472 mmPCIE_INDEX, 0xffffffff, 0x0140001c,
473 mmPCIE_DATA, 0x000f0000, 0x00000000,
474 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
475 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
478 static const u32 stoney_mgcg_cgcg_init[] =
480 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
481 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
482 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
485 static void vi_init_golden_registers(struct amdgpu_device *adev)
487 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
488 mutex_lock(&adev->grbm_idx_mutex);
490 if (amdgpu_sriov_vf(adev)) {
491 xgpu_vi_init_golden_registers(adev);
492 mutex_unlock(&adev->grbm_idx_mutex);
496 switch (adev->asic_type) {
498 amdgpu_device_program_register_sequence(adev,
499 iceland_mgcg_cgcg_init,
500 ARRAY_SIZE(iceland_mgcg_cgcg_init));
503 amdgpu_device_program_register_sequence(adev,
505 ARRAY_SIZE(fiji_mgcg_cgcg_init));
508 amdgpu_device_program_register_sequence(adev,
509 tonga_mgcg_cgcg_init,
510 ARRAY_SIZE(tonga_mgcg_cgcg_init));
513 amdgpu_device_program_register_sequence(adev,
515 ARRAY_SIZE(cz_mgcg_cgcg_init));
518 amdgpu_device_program_register_sequence(adev,
519 stoney_mgcg_cgcg_init,
520 ARRAY_SIZE(stoney_mgcg_cgcg_init));
529 mutex_unlock(&adev->grbm_idx_mutex);
533 * vi_get_xclk - get the xclk
535 * @adev: amdgpu_device pointer
537 * Returns the reference clock used by the gfx engine
540 static u32 vi_get_xclk(struct amdgpu_device *adev)
542 u32 reference_clock = adev->clock.spll.reference_freq;
545 if (adev->flags & AMD_IS_APU) {
546 switch (adev->asic_type) {
548 /* vbios says 48Mhz, but the actual freq is 100Mhz */
551 return reference_clock;
555 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
556 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
559 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL);
560 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE))
561 return reference_clock / 4;
563 return reference_clock;
567 * vi_srbm_select - select specific register instances
569 * @adev: amdgpu_device pointer
570 * @me: selected ME (micro engine)
575 * Switches the currently active registers instances. Some
576 * registers are instanced per VMID, others are instanced per
577 * me/pipe/queue combination.
579 void vi_srbm_select(struct amdgpu_device *adev,
580 u32 me, u32 pipe, u32 queue, u32 vmid)
582 u32 srbm_gfx_cntl = 0;
583 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe);
584 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me);
585 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid);
586 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue);
587 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl);
590 static bool vi_read_disabled_bios(struct amdgpu_device *adev)
593 u32 d1vga_control = 0;
594 u32 d2vga_control = 0;
595 u32 vga_render_control = 0;
599 bus_cntl = RREG32(mmBUS_CNTL);
600 if (adev->mode_info.num_crtc) {
601 d1vga_control = RREG32(mmD1VGA_CONTROL);
602 d2vga_control = RREG32(mmD2VGA_CONTROL);
603 vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
605 rom_cntl = RREG32_SMC(ixROM_CNTL);
608 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK));
609 if (adev->mode_info.num_crtc) {
610 /* Disable VGA mode */
611 WREG32(mmD1VGA_CONTROL,
612 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK |
613 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK)));
614 WREG32(mmD2VGA_CONTROL,
615 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK |
616 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK)));
617 WREG32(mmVGA_RENDER_CONTROL,
618 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK));
620 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK);
622 r = amdgpu_read_bios(adev);
625 WREG32(mmBUS_CNTL, bus_cntl);
626 if (adev->mode_info.num_crtc) {
627 WREG32(mmD1VGA_CONTROL, d1vga_control);
628 WREG32(mmD2VGA_CONTROL, d2vga_control);
629 WREG32(mmVGA_RENDER_CONTROL, vga_render_control);
631 WREG32_SMC(ixROM_CNTL, rom_cntl);
635 static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
636 u8 *bios, u32 length_bytes)
644 if (length_bytes == 0)
646 /* APU vbios image is part of sbios image */
647 if (adev->flags & AMD_IS_APU)
650 dw_ptr = (u32 *)bios;
651 length_dw = ALIGN(length_bytes, 4) / 4;
652 /* take the smc lock since we are using the smc index */
653 spin_lock_irqsave(&adev->smc_idx_lock, flags);
654 /* set rom index to 0 */
655 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX);
656 WREG32(mmSMC_IND_DATA_11, 0);
657 /* set index to data for continous read */
658 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);
659 for (i = 0; i < length_dw; i++)
660 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);
661 spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
666 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
676 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
677 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
679 {mmCP_STALLED_STAT1},
680 {mmCP_STALLED_STAT2},
681 {mmCP_STALLED_STAT3},
682 {mmCP_CPF_BUSY_STAT},
683 {mmCP_CPF_STALLED_STAT1},
685 {mmCP_CPC_BUSY_STAT},
686 {mmCP_CPC_STALLED_STAT1},
722 {mmGB_MACROTILE_MODE0},
723 {mmGB_MACROTILE_MODE1},
724 {mmGB_MACROTILE_MODE2},
725 {mmGB_MACROTILE_MODE3},
726 {mmGB_MACROTILE_MODE4},
727 {mmGB_MACROTILE_MODE5},
728 {mmGB_MACROTILE_MODE6},
729 {mmGB_MACROTILE_MODE7},
730 {mmGB_MACROTILE_MODE8},
731 {mmGB_MACROTILE_MODE9},
732 {mmGB_MACROTILE_MODE10},
733 {mmGB_MACROTILE_MODE11},
734 {mmGB_MACROTILE_MODE12},
735 {mmGB_MACROTILE_MODE13},
736 {mmGB_MACROTILE_MODE14},
737 {mmGB_MACROTILE_MODE15},
738 {mmCC_RB_BACKEND_DISABLE, true},
739 {mmGC_USER_RB_BACKEND_DISABLE, true},
740 {mmGB_BACKEND_MAP, false},
741 {mmPA_SC_RASTER_CONFIG, true},
742 {mmPA_SC_RASTER_CONFIG_1, true},
745 static uint32_t vi_get_register_value(struct amdgpu_device *adev,
746 bool indexed, u32 se_num,
747 u32 sh_num, u32 reg_offset)
751 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
752 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
754 switch (reg_offset) {
755 case mmCC_RB_BACKEND_DISABLE:
756 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
757 case mmGC_USER_RB_BACKEND_DISABLE:
758 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
759 case mmPA_SC_RASTER_CONFIG:
760 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
761 case mmPA_SC_RASTER_CONFIG_1:
762 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
765 mutex_lock(&adev->grbm_idx_mutex);
766 if (se_num != 0xffffffff || sh_num != 0xffffffff)
767 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
769 val = RREG32(reg_offset);
771 if (se_num != 0xffffffff || sh_num != 0xffffffff)
772 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
773 mutex_unlock(&adev->grbm_idx_mutex);
778 switch (reg_offset) {
779 case mmGB_ADDR_CONFIG:
780 return adev->gfx.config.gb_addr_config;
781 case mmMC_ARB_RAMCFG:
782 return adev->gfx.config.mc_arb_ramcfg;
783 case mmGB_TILE_MODE0:
784 case mmGB_TILE_MODE1:
785 case mmGB_TILE_MODE2:
786 case mmGB_TILE_MODE3:
787 case mmGB_TILE_MODE4:
788 case mmGB_TILE_MODE5:
789 case mmGB_TILE_MODE6:
790 case mmGB_TILE_MODE7:
791 case mmGB_TILE_MODE8:
792 case mmGB_TILE_MODE9:
793 case mmGB_TILE_MODE10:
794 case mmGB_TILE_MODE11:
795 case mmGB_TILE_MODE12:
796 case mmGB_TILE_MODE13:
797 case mmGB_TILE_MODE14:
798 case mmGB_TILE_MODE15:
799 case mmGB_TILE_MODE16:
800 case mmGB_TILE_MODE17:
801 case mmGB_TILE_MODE18:
802 case mmGB_TILE_MODE19:
803 case mmGB_TILE_MODE20:
804 case mmGB_TILE_MODE21:
805 case mmGB_TILE_MODE22:
806 case mmGB_TILE_MODE23:
807 case mmGB_TILE_MODE24:
808 case mmGB_TILE_MODE25:
809 case mmGB_TILE_MODE26:
810 case mmGB_TILE_MODE27:
811 case mmGB_TILE_MODE28:
812 case mmGB_TILE_MODE29:
813 case mmGB_TILE_MODE30:
814 case mmGB_TILE_MODE31:
815 idx = (reg_offset - mmGB_TILE_MODE0);
816 return adev->gfx.config.tile_mode_array[idx];
817 case mmGB_MACROTILE_MODE0:
818 case mmGB_MACROTILE_MODE1:
819 case mmGB_MACROTILE_MODE2:
820 case mmGB_MACROTILE_MODE3:
821 case mmGB_MACROTILE_MODE4:
822 case mmGB_MACROTILE_MODE5:
823 case mmGB_MACROTILE_MODE6:
824 case mmGB_MACROTILE_MODE7:
825 case mmGB_MACROTILE_MODE8:
826 case mmGB_MACROTILE_MODE9:
827 case mmGB_MACROTILE_MODE10:
828 case mmGB_MACROTILE_MODE11:
829 case mmGB_MACROTILE_MODE12:
830 case mmGB_MACROTILE_MODE13:
831 case mmGB_MACROTILE_MODE14:
832 case mmGB_MACROTILE_MODE15:
833 idx = (reg_offset - mmGB_MACROTILE_MODE0);
834 return adev->gfx.config.macrotile_mode_array[idx];
836 return RREG32(reg_offset);
841 static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
842 u32 sh_num, u32 reg_offset, u32 *value)
847 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) {
848 bool indexed = vi_allowed_read_registers[i].grbm_indexed;
850 if (reg_offset != vi_allowed_read_registers[i].reg_offset)
853 *value = vi_get_register_value(adev, indexed, se_num, sh_num,
861 * vi_asic_pci_config_reset - soft reset GPU
863 * @adev: amdgpu_device pointer
865 * Use PCI Config method to reset the GPU.
867 * Returns 0 for success.
869 static int vi_asic_pci_config_reset(struct amdgpu_device *adev)
874 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
877 pci_clear_master(adev->pdev);
879 amdgpu_device_pci_config_reset(adev);
883 /* wait for asic to come out of reset */
884 for (i = 0; i < adev->usec_timeout; i++) {
885 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
887 pci_set_master(adev->pdev);
888 adev->has_hw_reset = true;
895 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
900 static int vi_asic_supports_baco(struct amdgpu_device *adev)
902 switch (adev->asic_type) {
909 return amdgpu_dpm_is_baco_supported(adev);
915 static enum amd_reset_method
916 vi_asic_reset_method(struct amdgpu_device *adev)
920 if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
921 amdgpu_reset_method == AMD_RESET_METHOD_BACO)
922 return amdgpu_reset_method;
924 if (amdgpu_reset_method != -1)
925 dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
926 amdgpu_reset_method);
928 switch (adev->asic_type) {
935 baco_reset = amdgpu_dpm_is_baco_supported(adev);
943 return AMD_RESET_METHOD_BACO;
945 return AMD_RESET_METHOD_LEGACY;
949 * vi_asic_reset - soft reset GPU
951 * @adev: amdgpu_device pointer
953 * Look up which blocks are hung and attempt
955 * Returns 0 for success.
957 static int vi_asic_reset(struct amdgpu_device *adev)
961 /* APUs don't have full asic reset */
962 if (adev->flags & AMD_IS_APU)
965 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
966 dev_info(adev->dev, "BACO reset\n");
967 r = amdgpu_dpm_baco_reset(adev);
969 dev_info(adev->dev, "PCI CONFIG reset\n");
970 r = vi_asic_pci_config_reset(adev);
976 static u32 vi_get_config_memsize(struct amdgpu_device *adev)
978 return RREG32(mmCONFIG_MEMSIZE);
981 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
982 u32 cntl_reg, u32 status_reg)
985 struct atom_clock_dividers dividers;
988 r = amdgpu_atombios_get_clock_dividers(adev,
989 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
990 clock, false, ÷rs);
994 tmp = RREG32_SMC(cntl_reg);
996 if (adev->flags & AMD_IS_APU)
997 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
999 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
1000 CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
1001 tmp |= dividers.post_divider;
1002 WREG32_SMC(cntl_reg, tmp);
1004 for (i = 0; i < 100; i++) {
1005 tmp = RREG32_SMC(status_reg);
1006 if (adev->flags & AMD_IS_APU) {
1010 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
1020 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0
1021 #define ixGNB_CLK1_STATUS 0xD822010C
1022 #define ixGNB_CLK2_DFS_CNTL 0xD8220110
1023 #define ixGNB_CLK2_STATUS 0xD822012C
1024 #define ixGNB_CLK3_DFS_CNTL 0xD8220130
1025 #define ixGNB_CLK3_STATUS 0xD822014C
1027 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
1031 if (adev->flags & AMD_IS_APU) {
1032 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
1036 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
1040 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
1044 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
1052 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
1055 struct atom_clock_dividers dividers;
1062 if (adev->flags & AMD_IS_APU) {
1063 reg_ctrl = ixGNB_CLK3_DFS_CNTL;
1064 reg_status = ixGNB_CLK3_STATUS;
1065 status_mask = 0x00010000;
1066 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1068 reg_ctrl = ixCG_ECLK_CNTL;
1069 reg_status = ixCG_ECLK_STATUS;
1070 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
1071 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
1074 r = amdgpu_atombios_get_clock_dividers(adev,
1075 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1076 ecclk, false, ÷rs);
1080 for (i = 0; i < 100; i++) {
1081 if (RREG32_SMC(reg_status) & status_mask)
1089 tmp = RREG32_SMC(reg_ctrl);
1091 tmp |= dividers.post_divider;
1092 WREG32_SMC(reg_ctrl, tmp);
1094 for (i = 0; i < 100; i++) {
1095 if (RREG32_SMC(reg_status) & status_mask)
1106 static void vi_enable_aspm(struct amdgpu_device *adev)
1110 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1111 data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
1112 PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
1113 data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
1114 PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
1115 data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1116 data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
1118 WREG32_PCIE(ixPCIE_LC_CNTL, data);
1121 static void vi_program_aspm(struct amdgpu_device *adev)
1123 u32 data, data1, orig;
1125 bool bClkReqSupport = true;
1127 if (!amdgpu_device_should_use_aspm(adev))
1130 if (adev->asic_type < CHIP_POLARIS10)
1133 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1134 data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
1135 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1136 data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
1138 WREG32_PCIE(ixPCIE_LC_CNTL, data);
1140 orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1141 data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
1142 data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
1143 data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
1145 WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
1147 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
1148 data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
1150 WREG32_PCIE(ixPCIE_LC_CNTL3, data);
1152 orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
1153 data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
1155 WREG32_PCIE(ixPCIE_P_CNTL, data);
1157 data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
1158 pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
1159 if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
1160 (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
1161 PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
1162 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
1163 PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
1165 } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
1166 PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
1167 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
1168 PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
1172 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
1173 data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
1175 WREG32_PCIE(ixPCIE_LC_CNTL6, data);
1177 orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
1178 data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
1180 WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
1182 pci_read_config_dword(adev->pdev, LINK_CAP, &data);
1183 if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
1184 bClkReqSupport = false;
1186 if (bClkReqSupport) {
1187 orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
1188 data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
1189 data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
1190 (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
1192 WREG32_SMC(ixTHM_CLK_CNTL, data);
1194 orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
1195 data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
1196 MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
1197 data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
1198 (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
1199 data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
1201 WREG32_SMC(ixMISC_CLK_CTRL, data);
1203 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
1204 data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
1206 WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1208 orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
1209 data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
1211 WREG32_SMC(ixCG_CLKPIN_CNTL, data);
1213 orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
1214 data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
1215 data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
1217 WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
1219 orig = data = RREG32_PCIE(ixCPM_CONTROL);
1220 data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
1221 CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
1223 WREG32_PCIE(ixCPM_CONTROL, data);
1225 orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
1226 data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
1227 data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
1229 WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
1231 orig = data = RREG32(mmBIF_CLK_CTRL);
1232 data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
1234 WREG32(mmBIF_CLK_CTRL, data);
1236 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
1237 data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
1239 WREG32_PCIE(ixPCIE_LC_CNTL7, data);
1241 orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
1242 data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
1244 WREG32_PCIE(ixPCIE_HW_DEBUG, data);
1246 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
1247 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
1248 data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1250 data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
1252 WREG32_PCIE(ixPCIE_LC_CNTL2, data);
1256 vi_enable_aspm(adev);
1258 data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
1259 data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
1260 if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
1261 data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
1262 data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
1263 orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
1264 data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
1266 WREG32_PCIE(ixPCIE_LC_CNTL, data);
1269 if ((adev->asic_type == CHIP_POLARIS12 &&
1270 !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
1271 ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
1272 orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
1273 data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
1275 WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
1279 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
1284 /* not necessary on CZ */
1285 if (adev->flags & AMD_IS_APU)
1288 tmp = RREG32(mmBIF_DOORBELL_APER_EN);
1290 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
1292 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
1294 WREG32(mmBIF_DOORBELL_APER_EN, tmp);
1297 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
1298 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9
1299 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
1301 static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
1303 if (adev->flags & AMD_IS_APU)
1304 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
1305 >> ATI_REV_ID_FUSE_MACRO__SHIFT;
1307 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
1308 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
1311 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1313 if (!ring || !ring->funcs->emit_wreg) {
1314 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1315 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1317 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1321 static void vi_invalidate_hdp(struct amdgpu_device *adev,
1322 struct amdgpu_ring *ring)
1324 if (!ring || !ring->funcs->emit_wreg) {
1325 WREG32(mmHDP_DEBUG0, 1);
1326 RREG32(mmHDP_DEBUG0);
1328 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1332 static bool vi_need_full_reset(struct amdgpu_device *adev)
1334 switch (adev->asic_type) {
1337 /* CZ has hang issues with full reset at the moment */
1341 /* XXX: soft reset should work on fiji and tonga */
1343 case CHIP_POLARIS10:
1344 case CHIP_POLARIS11:
1345 case CHIP_POLARIS12:
1348 /* change this when we support soft reset */
1353 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
1356 uint32_t perfctr = 0;
1357 uint64_t cnt0_of, cnt1_of;
1360 /* This reports 0 on APUs, so return to avoid writing/reading registers
1361 * that may or may not be different from their GPU counterparts
1363 if (adev->flags & AMD_IS_APU)
1366 /* Set the 2 events that we wish to watch, defined above */
1367 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
1368 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
1369 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
1371 /* Write to enable desired perf counters */
1372 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
1373 /* Zero out and enable the perf counters
1375 * Bit 0 = Start all counters(1)
1376 * Bit 2 = Global counter reset enable(1)
1378 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
1382 /* Load the shadow and disable the perf counters
1384 * Bit 0 = Stop counters(0)
1385 * Bit 1 = Load the shadow counters(1)
1387 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
1389 /* Read register values to get any >32bit overflow */
1390 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
1391 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
1392 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
1394 /* Get the values and add the overflow */
1395 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
1396 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
1399 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev)
1401 uint64_t nak_r, nak_g;
1403 /* Get the number of NAKs received and generated */
1404 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
1405 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
1407 /* Add the total number of NAKs, i.e the number of replays */
1408 return (nak_r + nak_g);
1411 static bool vi_need_reset_on_init(struct amdgpu_device *adev)
1415 if (adev->flags & AMD_IS_APU)
1418 /* check if the SMC is already running */
1419 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
1420 pc = RREG32_SMC(ixSMC_PC_C);
1421 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
1428 static void vi_pre_asic_init(struct amdgpu_device *adev)
1432 static const struct amdgpu_asic_funcs vi_asic_funcs =
1434 .read_disabled_bios = &vi_read_disabled_bios,
1435 .read_bios_from_rom = &vi_read_bios_from_rom,
1436 .read_register = &vi_read_register,
1437 .reset = &vi_asic_reset,
1438 .reset_method = &vi_asic_reset_method,
1439 .get_xclk = &vi_get_xclk,
1440 .set_uvd_clocks = &vi_set_uvd_clocks,
1441 .set_vce_clocks = &vi_set_vce_clocks,
1442 .get_config_memsize = &vi_get_config_memsize,
1443 .flush_hdp = &vi_flush_hdp,
1444 .invalidate_hdp = &vi_invalidate_hdp,
1445 .need_full_reset = &vi_need_full_reset,
1446 .init_doorbell_index = &legacy_doorbell_index_init,
1447 .get_pcie_usage = &vi_get_pcie_usage,
1448 .need_reset_on_init = &vi_need_reset_on_init,
1449 .get_pcie_replay_count = &vi_get_pcie_replay_count,
1450 .supports_baco = &vi_asic_supports_baco,
1451 .pre_asic_init = &vi_pre_asic_init,
1452 .query_video_codecs = &vi_query_video_codecs,
1455 #define CZ_REV_BRISTOL(rev) \
1456 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6))
1458 static int vi_common_early_init(struct amdgpu_ip_block *ip_block)
1460 struct amdgpu_device *adev = ip_block->adev;
1462 if (adev->flags & AMD_IS_APU) {
1463 adev->smc_rreg = &cz_smc_rreg;
1464 adev->smc_wreg = &cz_smc_wreg;
1466 adev->smc_rreg = &vi_smc_rreg;
1467 adev->smc_wreg = &vi_smc_wreg;
1469 adev->pcie_rreg = &vi_pcie_rreg;
1470 adev->pcie_wreg = &vi_pcie_wreg;
1471 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg;
1472 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1473 adev->didt_rreg = &vi_didt_rreg;
1474 adev->didt_wreg = &vi_didt_wreg;
1475 adev->gc_cac_rreg = &vi_gc_cac_rreg;
1476 adev->gc_cac_wreg = &vi_gc_cac_wreg;
1478 adev->asic_funcs = &vi_asic_funcs;
1480 adev->rev_id = vi_get_rev_id(adev);
1481 adev->external_rev_id = 0xFF;
1482 switch (adev->asic_type) {
1486 adev->external_rev_id = 0x1;
1489 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1490 AMD_CG_SUPPORT_GFX_MGLS |
1491 AMD_CG_SUPPORT_GFX_RLC_LS |
1492 AMD_CG_SUPPORT_GFX_CP_LS |
1493 AMD_CG_SUPPORT_GFX_CGTS |
1494 AMD_CG_SUPPORT_GFX_CGTS_LS |
1495 AMD_CG_SUPPORT_GFX_CGCG |
1496 AMD_CG_SUPPORT_GFX_CGLS |
1497 AMD_CG_SUPPORT_SDMA_MGCG |
1498 AMD_CG_SUPPORT_SDMA_LS |
1499 AMD_CG_SUPPORT_BIF_LS |
1500 AMD_CG_SUPPORT_HDP_MGCG |
1501 AMD_CG_SUPPORT_HDP_LS |
1502 AMD_CG_SUPPORT_ROM_MGCG |
1503 AMD_CG_SUPPORT_MC_MGCG |
1504 AMD_CG_SUPPORT_MC_LS |
1505 AMD_CG_SUPPORT_UVD_MGCG;
1507 adev->external_rev_id = adev->rev_id + 0x3c;
1510 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1511 AMD_CG_SUPPORT_GFX_CGCG |
1512 AMD_CG_SUPPORT_GFX_CGLS |
1513 AMD_CG_SUPPORT_SDMA_MGCG |
1514 AMD_CG_SUPPORT_SDMA_LS |
1515 AMD_CG_SUPPORT_BIF_LS |
1516 AMD_CG_SUPPORT_HDP_MGCG |
1517 AMD_CG_SUPPORT_HDP_LS |
1518 AMD_CG_SUPPORT_ROM_MGCG |
1519 AMD_CG_SUPPORT_MC_MGCG |
1520 AMD_CG_SUPPORT_MC_LS |
1521 AMD_CG_SUPPORT_DRM_LS |
1522 AMD_CG_SUPPORT_UVD_MGCG;
1524 adev->external_rev_id = adev->rev_id + 0x14;
1526 case CHIP_POLARIS11:
1527 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1528 AMD_CG_SUPPORT_GFX_RLC_LS |
1529 AMD_CG_SUPPORT_GFX_CP_LS |
1530 AMD_CG_SUPPORT_GFX_CGCG |
1531 AMD_CG_SUPPORT_GFX_CGLS |
1532 AMD_CG_SUPPORT_GFX_3D_CGCG |
1533 AMD_CG_SUPPORT_GFX_3D_CGLS |
1534 AMD_CG_SUPPORT_SDMA_MGCG |
1535 AMD_CG_SUPPORT_SDMA_LS |
1536 AMD_CG_SUPPORT_BIF_MGCG |
1537 AMD_CG_SUPPORT_BIF_LS |
1538 AMD_CG_SUPPORT_HDP_MGCG |
1539 AMD_CG_SUPPORT_HDP_LS |
1540 AMD_CG_SUPPORT_ROM_MGCG |
1541 AMD_CG_SUPPORT_MC_MGCG |
1542 AMD_CG_SUPPORT_MC_LS |
1543 AMD_CG_SUPPORT_DRM_LS |
1544 AMD_CG_SUPPORT_UVD_MGCG |
1545 AMD_CG_SUPPORT_VCE_MGCG;
1547 adev->external_rev_id = adev->rev_id + 0x5A;
1549 case CHIP_POLARIS10:
1550 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1551 AMD_CG_SUPPORT_GFX_RLC_LS |
1552 AMD_CG_SUPPORT_GFX_CP_LS |
1553 AMD_CG_SUPPORT_GFX_CGCG |
1554 AMD_CG_SUPPORT_GFX_CGLS |
1555 AMD_CG_SUPPORT_GFX_3D_CGCG |
1556 AMD_CG_SUPPORT_GFX_3D_CGLS |
1557 AMD_CG_SUPPORT_SDMA_MGCG |
1558 AMD_CG_SUPPORT_SDMA_LS |
1559 AMD_CG_SUPPORT_BIF_MGCG |
1560 AMD_CG_SUPPORT_BIF_LS |
1561 AMD_CG_SUPPORT_HDP_MGCG |
1562 AMD_CG_SUPPORT_HDP_LS |
1563 AMD_CG_SUPPORT_ROM_MGCG |
1564 AMD_CG_SUPPORT_MC_MGCG |
1565 AMD_CG_SUPPORT_MC_LS |
1566 AMD_CG_SUPPORT_DRM_LS |
1567 AMD_CG_SUPPORT_UVD_MGCG |
1568 AMD_CG_SUPPORT_VCE_MGCG;
1570 adev->external_rev_id = adev->rev_id + 0x50;
1572 case CHIP_POLARIS12:
1573 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
1574 AMD_CG_SUPPORT_GFX_RLC_LS |
1575 AMD_CG_SUPPORT_GFX_CP_LS |
1576 AMD_CG_SUPPORT_GFX_CGCG |
1577 AMD_CG_SUPPORT_GFX_CGLS |
1578 AMD_CG_SUPPORT_GFX_3D_CGCG |
1579 AMD_CG_SUPPORT_GFX_3D_CGLS |
1580 AMD_CG_SUPPORT_SDMA_MGCG |
1581 AMD_CG_SUPPORT_SDMA_LS |
1582 AMD_CG_SUPPORT_BIF_MGCG |
1583 AMD_CG_SUPPORT_BIF_LS |
1584 AMD_CG_SUPPORT_HDP_MGCG |
1585 AMD_CG_SUPPORT_HDP_LS |
1586 AMD_CG_SUPPORT_ROM_MGCG |
1587 AMD_CG_SUPPORT_MC_MGCG |
1588 AMD_CG_SUPPORT_MC_LS |
1589 AMD_CG_SUPPORT_DRM_LS |
1590 AMD_CG_SUPPORT_UVD_MGCG |
1591 AMD_CG_SUPPORT_VCE_MGCG;
1593 adev->external_rev_id = adev->rev_id + 0x64;
1597 /*AMD_CG_SUPPORT_GFX_MGCG |
1598 AMD_CG_SUPPORT_GFX_RLC_LS |
1599 AMD_CG_SUPPORT_GFX_CP_LS |
1600 AMD_CG_SUPPORT_GFX_CGCG |
1601 AMD_CG_SUPPORT_GFX_CGLS |
1602 AMD_CG_SUPPORT_GFX_3D_CGCG |
1603 AMD_CG_SUPPORT_GFX_3D_CGLS |
1604 AMD_CG_SUPPORT_SDMA_MGCG |
1605 AMD_CG_SUPPORT_SDMA_LS |
1606 AMD_CG_SUPPORT_BIF_MGCG |
1607 AMD_CG_SUPPORT_BIF_LS |
1608 AMD_CG_SUPPORT_HDP_MGCG |
1609 AMD_CG_SUPPORT_HDP_LS |
1610 AMD_CG_SUPPORT_ROM_MGCG |
1611 AMD_CG_SUPPORT_MC_MGCG |
1612 AMD_CG_SUPPORT_MC_LS |
1613 AMD_CG_SUPPORT_DRM_LS |
1614 AMD_CG_SUPPORT_UVD_MGCG |
1615 AMD_CG_SUPPORT_VCE_MGCG;*/
1617 adev->external_rev_id = adev->rev_id + 0x6E;
1620 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1621 AMD_CG_SUPPORT_GFX_MGCG |
1622 AMD_CG_SUPPORT_GFX_MGLS |
1623 AMD_CG_SUPPORT_GFX_RLC_LS |
1624 AMD_CG_SUPPORT_GFX_CP_LS |
1625 AMD_CG_SUPPORT_GFX_CGTS |
1626 AMD_CG_SUPPORT_GFX_CGTS_LS |
1627 AMD_CG_SUPPORT_GFX_CGCG |
1628 AMD_CG_SUPPORT_GFX_CGLS |
1629 AMD_CG_SUPPORT_BIF_LS |
1630 AMD_CG_SUPPORT_HDP_MGCG |
1631 AMD_CG_SUPPORT_HDP_LS |
1632 AMD_CG_SUPPORT_SDMA_MGCG |
1633 AMD_CG_SUPPORT_SDMA_LS |
1634 AMD_CG_SUPPORT_VCE_MGCG;
1635 /* rev0 hardware requires workarounds to support PG */
1637 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) {
1638 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG |
1639 AMD_PG_SUPPORT_GFX_PIPELINE |
1641 AMD_PG_SUPPORT_UVD |
1644 adev->external_rev_id = adev->rev_id + 0x1;
1647 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1648 AMD_CG_SUPPORT_GFX_MGCG |
1649 AMD_CG_SUPPORT_GFX_MGLS |
1650 AMD_CG_SUPPORT_GFX_RLC_LS |
1651 AMD_CG_SUPPORT_GFX_CP_LS |
1652 AMD_CG_SUPPORT_GFX_CGTS |
1653 AMD_CG_SUPPORT_GFX_CGTS_LS |
1654 AMD_CG_SUPPORT_GFX_CGLS |
1655 AMD_CG_SUPPORT_BIF_LS |
1656 AMD_CG_SUPPORT_HDP_MGCG |
1657 AMD_CG_SUPPORT_HDP_LS |
1658 AMD_CG_SUPPORT_SDMA_MGCG |
1659 AMD_CG_SUPPORT_SDMA_LS |
1660 AMD_CG_SUPPORT_VCE_MGCG;
1661 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG |
1662 AMD_PG_SUPPORT_GFX_SMG |
1663 AMD_PG_SUPPORT_GFX_PIPELINE |
1665 AMD_PG_SUPPORT_UVD |
1667 adev->external_rev_id = adev->rev_id + 0x61;
1670 /* FIXME: not supported yet */
1674 if (amdgpu_sriov_vf(adev)) {
1675 amdgpu_virt_init_setting(adev);
1676 xgpu_vi_mailbox_set_irq_funcs(adev);
1682 static int vi_common_late_init(struct amdgpu_ip_block *ip_block)
1684 struct amdgpu_device *adev = ip_block->adev;
1686 if (amdgpu_sriov_vf(adev))
1687 xgpu_vi_mailbox_get_irq(adev);
1692 static int vi_common_sw_init(struct amdgpu_ip_block *ip_block)
1694 struct amdgpu_device *adev = ip_block->adev;
1696 if (amdgpu_sriov_vf(adev))
1697 xgpu_vi_mailbox_add_irq_id(adev);
1702 static int vi_common_hw_init(struct amdgpu_ip_block *ip_block)
1704 struct amdgpu_device *adev = ip_block->adev;
1706 /* move the golden regs per IP block */
1707 vi_init_golden_registers(adev);
1709 vi_program_aspm(adev);
1710 /* enable the doorbell aperture */
1711 vi_enable_doorbell_aperture(adev, true);
1716 static int vi_common_hw_fini(struct amdgpu_ip_block *ip_block)
1718 struct amdgpu_device *adev = ip_block->adev;
1720 /* enable the doorbell aperture */
1721 vi_enable_doorbell_aperture(adev, false);
1723 if (amdgpu_sriov_vf(adev))
1724 xgpu_vi_mailbox_put_irq(adev);
1729 static int vi_common_suspend(struct amdgpu_ip_block *ip_block)
1731 return vi_common_hw_fini(ip_block);
1734 static int vi_common_resume(struct amdgpu_ip_block *ip_block)
1736 return vi_common_hw_init(ip_block);
1739 static bool vi_common_is_idle(void *handle)
1744 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
1747 uint32_t temp, data;
1749 temp = data = RREG32_PCIE(ixPCIE_CNTL2);
1751 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS))
1752 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1753 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1754 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
1756 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
1757 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
1758 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
1761 WREG32_PCIE(ixPCIE_CNTL2, data);
1764 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
1767 uint32_t temp, data;
1769 temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
1771 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
1772 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1774 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
1777 WREG32(mmHDP_HOST_PATH_CNTL, data);
1780 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev,
1783 uint32_t temp, data;
1785 temp = data = RREG32(mmHDP_MEM_POWER_LS);
1787 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
1788 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1790 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
1793 WREG32(mmHDP_MEM_POWER_LS, data);
1796 static void vi_update_drm_light_sleep(struct amdgpu_device *adev,
1799 uint32_t temp, data;
1801 temp = data = RREG32(0x157a);
1803 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS))
1809 WREG32(0x157a, data);
1813 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
1816 uint32_t temp, data;
1818 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
1820 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
1821 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1822 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
1824 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
1825 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
1828 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
1831 static int vi_common_set_clockgating_state_by_smu(void *handle,
1832 enum amd_clockgating_state state)
1834 uint32_t msg_id, pp_state = 0;
1835 uint32_t pp_support_state = 0;
1836 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1838 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) {
1839 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) {
1840 pp_support_state = PP_STATE_SUPPORT_LS;
1841 pp_state = PP_STATE_LS;
1843 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) {
1844 pp_support_state |= PP_STATE_SUPPORT_CG;
1845 pp_state |= PP_STATE_CG;
1847 if (state == AMD_CG_STATE_UNGATE)
1849 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1853 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1856 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) {
1857 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) {
1858 pp_support_state = PP_STATE_SUPPORT_LS;
1859 pp_state = PP_STATE_LS;
1861 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) {
1862 pp_support_state |= PP_STATE_SUPPORT_CG;
1863 pp_state |= PP_STATE_CG;
1865 if (state == AMD_CG_STATE_UNGATE)
1867 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1871 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1874 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) {
1875 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
1876 pp_support_state = PP_STATE_SUPPORT_LS;
1877 pp_state = PP_STATE_LS;
1879 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) {
1880 pp_support_state |= PP_STATE_SUPPORT_CG;
1881 pp_state |= PP_STATE_CG;
1883 if (state == AMD_CG_STATE_UNGATE)
1885 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1889 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1893 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) {
1894 if (state == AMD_CG_STATE_UNGATE)
1897 pp_state = PP_STATE_LS;
1899 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1901 PP_STATE_SUPPORT_LS,
1903 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1905 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) {
1906 if (state == AMD_CG_STATE_UNGATE)
1909 pp_state = PP_STATE_CG;
1911 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1913 PP_STATE_SUPPORT_CG,
1915 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1918 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) {
1920 if (state == AMD_CG_STATE_UNGATE)
1923 pp_state = PP_STATE_LS;
1925 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1927 PP_STATE_SUPPORT_LS,
1929 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1932 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) {
1934 if (state == AMD_CG_STATE_UNGATE)
1937 pp_state = PP_STATE_CG;
1939 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
1941 PP_STATE_SUPPORT_CG,
1943 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
1948 static int vi_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1949 enum amd_clockgating_state state)
1951 struct amdgpu_device *adev = ip_block->adev;
1953 if (amdgpu_sriov_vf(adev))
1956 switch (adev->asic_type) {
1958 vi_update_bif_medium_grain_light_sleep(adev,
1959 state == AMD_CG_STATE_GATE);
1960 vi_update_hdp_medium_grain_clock_gating(adev,
1961 state == AMD_CG_STATE_GATE);
1962 vi_update_hdp_light_sleep(adev,
1963 state == AMD_CG_STATE_GATE);
1964 vi_update_rom_medium_grain_clock_gating(adev,
1965 state == AMD_CG_STATE_GATE);
1969 vi_update_bif_medium_grain_light_sleep(adev,
1970 state == AMD_CG_STATE_GATE);
1971 vi_update_hdp_medium_grain_clock_gating(adev,
1972 state == AMD_CG_STATE_GATE);
1973 vi_update_hdp_light_sleep(adev,
1974 state == AMD_CG_STATE_GATE);
1975 vi_update_drm_light_sleep(adev,
1976 state == AMD_CG_STATE_GATE);
1979 case CHIP_POLARIS10:
1980 case CHIP_POLARIS11:
1981 case CHIP_POLARIS12:
1983 vi_common_set_clockgating_state_by_smu(adev, state);
1991 static int vi_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
1992 enum amd_powergating_state state)
1997 static void vi_common_get_clockgating_state(void *handle, u64 *flags)
1999 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2002 if (amdgpu_sriov_vf(adev))
2005 /* AMD_CG_SUPPORT_BIF_LS */
2006 data = RREG32_PCIE(ixPCIE_CNTL2);
2007 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
2008 *flags |= AMD_CG_SUPPORT_BIF_LS;
2010 /* AMD_CG_SUPPORT_HDP_LS */
2011 data = RREG32(mmHDP_MEM_POWER_LS);
2012 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)
2013 *flags |= AMD_CG_SUPPORT_HDP_LS;
2015 /* AMD_CG_SUPPORT_HDP_MGCG */
2016 data = RREG32(mmHDP_HOST_PATH_CNTL);
2017 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK))
2018 *flags |= AMD_CG_SUPPORT_HDP_MGCG;
2020 /* AMD_CG_SUPPORT_ROM_MGCG */
2021 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
2022 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
2023 *flags |= AMD_CG_SUPPORT_ROM_MGCG;
2026 static const struct amd_ip_funcs vi_common_ip_funcs = {
2027 .name = "vi_common",
2028 .early_init = vi_common_early_init,
2029 .late_init = vi_common_late_init,
2030 .sw_init = vi_common_sw_init,
2031 .hw_init = vi_common_hw_init,
2032 .hw_fini = vi_common_hw_fini,
2033 .suspend = vi_common_suspend,
2034 .resume = vi_common_resume,
2035 .is_idle = vi_common_is_idle,
2036 .set_clockgating_state = vi_common_set_clockgating_state,
2037 .set_powergating_state = vi_common_set_powergating_state,
2038 .get_clockgating_state = vi_common_get_clockgating_state,
2041 static const struct amdgpu_ip_block_version vi_common_ip_block =
2043 .type = AMD_IP_BLOCK_TYPE_COMMON,
2047 .funcs = &vi_common_ip_funcs,
2050 void vi_set_virt_ops(struct amdgpu_device *adev)
2052 adev->virt.ops = &xgpu_vi_virt_ops;
2055 int vi_set_ip_blocks(struct amdgpu_device *adev)
2057 amdgpu_device_set_sriov_virtual_display(adev);
2059 switch (adev->asic_type) {
2061 /* topaz has no DCE, UVD, VCE */
2062 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2063 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
2064 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
2065 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2066 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
2067 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2068 if (adev->enable_virtual_display)
2069 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2072 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2073 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
2074 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2075 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2076 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2077 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2078 if (adev->enable_virtual_display)
2079 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2080 #if defined(CONFIG_DRM_AMD_DC)
2081 else if (amdgpu_device_has_dc_support(adev))
2082 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2085 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
2086 if (!amdgpu_sriov_vf(adev)) {
2087 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2088 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2092 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2093 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2094 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2095 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2096 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2097 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2098 if (adev->enable_virtual_display)
2099 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2100 #if defined(CONFIG_DRM_AMD_DC)
2101 else if (amdgpu_device_has_dc_support(adev))
2102 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2105 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
2106 if (!amdgpu_sriov_vf(adev)) {
2107 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
2108 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
2111 case CHIP_POLARIS10:
2112 case CHIP_POLARIS11:
2113 case CHIP_POLARIS12:
2115 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2116 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
2117 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
2118 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2119 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
2120 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2121 if (adev->enable_virtual_display)
2122 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2123 #if defined(CONFIG_DRM_AMD_DC)
2124 else if (amdgpu_device_has_dc_support(adev))
2125 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2128 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
2129 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
2130 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2133 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2134 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2135 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2136 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
2137 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2138 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2139 if (adev->enable_virtual_display)
2140 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2141 #if defined(CONFIG_DRM_AMD_DC)
2142 else if (amdgpu_device_has_dc_support(adev))
2143 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2146 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2147 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
2148 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
2149 #if defined(CONFIG_DRM_AMD_ACP)
2150 amdgpu_device_ip_block_add(adev, &acp_ip_block);
2154 amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
2155 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
2156 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
2157 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
2158 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
2159 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
2160 if (adev->enable_virtual_display)
2161 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
2162 #if defined(CONFIG_DRM_AMD_DC)
2163 else if (amdgpu_device_has_dc_support(adev))
2164 amdgpu_device_ip_block_add(adev, &dm_ip_block);
2167 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
2168 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
2169 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
2170 #if defined(CONFIG_DRM_AMD_ACP)
2171 amdgpu_device_ip_block_add(adev, &acp_ip_block);
2175 /* FIXME: not supported yet */
2182 void legacy_doorbell_index_init(struct amdgpu_device *adev)
2184 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
2185 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
2186 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
2187 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
2188 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
2189 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
2190 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
2191 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
2192 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
2193 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
2194 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0;
2195 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1;
2196 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
2197 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;