2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "amdgpu_atombios.h"
25 #include "nbio_v6_1.h"
27 #include "vega10/soc15ip.h"
28 #include "vega10/NBIO/nbio_6_1_default.h"
29 #include "vega10/NBIO/nbio_6_1_offset.h"
30 #include "vega10/NBIO/nbio_6_1_sh_mask.h"
31 #include "vega10/vega10_enum.h"
33 #define smnCPM_CONTROL 0x11180460
34 #define smnPCIE_CNTL2 0x11180070
36 u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
38 u32 tmp = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0));
40 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
41 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
46 u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
49 return RREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0) + idx);
52 void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
53 uint32_t idx, uint32_t val)
55 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0) + idx, val);
58 void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
61 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_FB_EN),
62 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK);
64 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_FB_EN), 0);
67 void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
69 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
72 u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
74 return RREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE));
77 static const u32 nbio_sdma_doorbell_range_reg[] =
79 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
80 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
83 void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
84 bool use_doorbell, int doorbell_index)
86 u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
89 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
90 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2);
92 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
94 WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
97 void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
102 tmp = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_PF_0_0_RCC_DOORBELL_APER_EN));
104 tmp = REG_SET_FIELD(tmp, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1);
106 tmp = REG_SET_FIELD(tmp, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0);
108 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmRCC_PF_0_0_RCC_DOORBELL_APER_EN), tmp);
111 void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
117 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) |
118 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) |
119 REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0);
121 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW),
122 lower_32_bits(adev->doorbell.base));
123 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH),
124 upper_32_bits(adev->doorbell.base));
127 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL), tmp);
131 void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
132 bool use_doorbell, int doorbell_index)
134 u32 ih_doorbell_range = RREG32(SOC15_REG_OFFSET(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE));
137 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
138 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
140 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
142 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmBIF_IH_DOORBELL_RANGE), ih_doorbell_range);
145 void nbio_v6_1_ih_control(struct amdgpu_device *adev)
149 /* setup interrupt control */
150 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmINTERRUPT_CNTL2), adev->dummy_page.addr >> 8);
151 interrupt_cntl = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmINTERRUPT_CNTL));
152 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
153 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
155 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0);
156 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */
157 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0);
158 WREG32(SOC15_REG_OFFSET(NBIO, 0, mmINTERRUPT_CNTL), interrupt_cntl);
161 void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
166 def = data = RREG32_PCIE(smnCPM_CONTROL);
167 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) {
168 data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
169 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
170 CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
171 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
172 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
173 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
174 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
176 data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK |
177 CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK |
178 CPM_CONTROL__TXCLK_PERM_GATE_ENABLE_MASK |
179 CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK |
180 CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK |
181 CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK |
182 CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK);
186 WREG32_PCIE(smnCPM_CONTROL, data);
189 void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
194 def = data = RREG32_PCIE(smnPCIE_CNTL2);
195 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
196 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
197 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
198 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
200 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
201 PCIE_CNTL2__MST_MEM_LS_EN_MASK |
202 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
206 WREG32_PCIE(smnPCIE_CNTL2, data);
209 void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
213 /* AMD_CG_SUPPORT_BIF_MGCG */
214 data = RREG32_PCIE(smnCPM_CONTROL);
215 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
216 *flags |= AMD_CG_SUPPORT_BIF_MGCG;
218 /* AMD_CG_SUPPORT_BIF_LS */
219 data = RREG32_PCIE(smnPCIE_CNTL2);
220 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
221 *flags |= AMD_CG_SUPPORT_BIF_LS;
224 struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
225 struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
227 int nbio_v6_1_init(struct amdgpu_device *adev)
229 nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
230 nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
231 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
232 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
233 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
234 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
235 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
236 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
237 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
238 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
239 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
240 nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
241 nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
242 nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
244 nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
245 nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
250 void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
254 reg = RREG32(SOC15_REG_OFFSET(NBIO, 0,
255 mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER));
257 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
259 if (reg & 0x80000000)
260 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
263 if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
264 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;