]>
Commit | Line | Data |
---|---|---|
b1023571 KW |
1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
c1b24a14 | 23 | #include <linux/kernel.h> |
b1023571 | 24 | #include <linux/firmware.h> |
248a1d6f | 25 | #include <drm/drmP.h> |
b1023571 KW |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_gfx.h" | |
28 | #include "soc15.h" | |
29 | #include "soc15d.h" | |
3251c043 | 30 | #include "amdgpu_atomfirmware.h" |
b1023571 | 31 | |
cde5c34f FX |
32 | #include "gc/gc_9_0_offset.h" |
33 | #include "gc/gc_9_0_sh_mask.h" | |
fb960bd2 | 34 | #include "vega10_enum.h" |
75199b8c | 35 | #include "hdp/hdp_4_0_offset.h" |
b1023571 KW |
36 | |
37 | #include "soc15_common.h" | |
38 | #include "clearstate_gfx9.h" | |
39 | #include "v9_structs.h" | |
40 | ||
44a99b65 AG |
41 | #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" |
42 | ||
b1023571 | 43 | #define GFX9_NUM_GFX_RINGS 1 |
268cb4c7 | 44 | #define GFX9_MEC_HPD_SIZE 2048 |
6bce4667 HZ |
45 | #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L |
46 | #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L | |
b1023571 | 47 | |
91d3130a HZ |
48 | #define mmPWR_MISC_CNTL_STATUS 0x0183 |
49 | #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 | |
50 | #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 | |
51 | #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 | |
52 | #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L | |
53 | #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L | |
b1023571 KW |
54 | |
55 | MODULE_FIRMWARE("amdgpu/vega10_ce.bin"); | |
56 | MODULE_FIRMWARE("amdgpu/vega10_pfp.bin"); | |
57 | MODULE_FIRMWARE("amdgpu/vega10_me.bin"); | |
58 | MODULE_FIRMWARE("amdgpu/vega10_mec.bin"); | |
59 | MODULE_FIRMWARE("amdgpu/vega10_mec2.bin"); | |
60 | MODULE_FIRMWARE("amdgpu/vega10_rlc.bin"); | |
61 | ||
739ffd9b AD |
62 | MODULE_FIRMWARE("amdgpu/vega12_ce.bin"); |
63 | MODULE_FIRMWARE("amdgpu/vega12_pfp.bin"); | |
64 | MODULE_FIRMWARE("amdgpu/vega12_me.bin"); | |
65 | MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); | |
66 | MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); | |
67 | MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); | |
68 | ||
940328fe FX |
69 | MODULE_FIRMWARE("amdgpu/vega20_ce.bin"); |
70 | MODULE_FIRMWARE("amdgpu/vega20_pfp.bin"); | |
71 | MODULE_FIRMWARE("amdgpu/vega20_me.bin"); | |
72 | MODULE_FIRMWARE("amdgpu/vega20_mec.bin"); | |
73 | MODULE_FIRMWARE("amdgpu/vega20_mec2.bin"); | |
74 | MODULE_FIRMWARE("amdgpu/vega20_rlc.bin"); | |
75 | ||
060d124b CZ |
76 | MODULE_FIRMWARE("amdgpu/raven_ce.bin"); |
77 | MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); | |
78 | MODULE_FIRMWARE("amdgpu/raven_me.bin"); | |
79 | MODULE_FIRMWARE("amdgpu/raven_mec.bin"); | |
80 | MODULE_FIRMWARE("amdgpu/raven_mec2.bin"); | |
81 | MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); | |
82 | ||
501a580a LG |
83 | MODULE_FIRMWARE("amdgpu/picasso_ce.bin"); |
84 | MODULE_FIRMWARE("amdgpu/picasso_pfp.bin"); | |
85 | MODULE_FIRMWARE("amdgpu/picasso_me.bin"); | |
86 | MODULE_FIRMWARE("amdgpu/picasso_mec.bin"); | |
87 | MODULE_FIRMWARE("amdgpu/picasso_mec2.bin"); | |
88 | MODULE_FIRMWARE("amdgpu/picasso_rlc.bin"); | |
89 | ||
cf4b60c6 FX |
90 | MODULE_FIRMWARE("amdgpu/raven2_ce.bin"); |
91 | MODULE_FIRMWARE("amdgpu/raven2_pfp.bin"); | |
92 | MODULE_FIRMWARE("amdgpu/raven2_me.bin"); | |
93 | MODULE_FIRMWARE("amdgpu/raven2_mec.bin"); | |
94 | MODULE_FIRMWARE("amdgpu/raven2_mec2.bin"); | |
95 | MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); | |
96 | ||
946a4d5b SL |
97 | static const struct soc15_reg_golden golden_settings_gc_9_0[] = |
98 | { | |
54d682d9 | 99 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), |
c55045ad | 100 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000), |
946a4d5b | 101 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), |
946a4d5b SL |
102 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), |
103 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), | |
104 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), | |
946a4d5b | 105 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), |
c5fb5426 FX |
106 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), |
107 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), | |
108 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), | |
109 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), | |
946a4d5b SL |
110 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), |
111 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), | |
112 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), | |
113 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), | |
114 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), | |
c5fb5426 | 115 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) |
b1023571 KW |
116 | }; |
117 | ||
946a4d5b | 118 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = |
b1023571 | 119 | { |
946a4d5b SL |
120 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107), |
121 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), | |
e6d57520 FX |
122 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), |
123 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), | |
124 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), | |
946a4d5b SL |
125 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042), |
126 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042), | |
e6d57520 | 127 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), |
946a4d5b | 128 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000), |
e6d57520 FX |
129 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), |
130 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), | |
131 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), | |
132 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), | |
133 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), | |
946a4d5b | 134 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), |
e6d57520 FX |
135 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), |
136 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), | |
f9f97e3c TZ |
137 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), |
138 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), | |
139 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), | |
140 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) | |
b1023571 KW |
141 | }; |
142 | ||
bb5368aa FX |
143 | static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = |
144 | { | |
ac26b0f3 | 145 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080), |
bb5368aa FX |
146 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), |
147 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), | |
148 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), | |
149 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042), | |
150 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400), | |
151 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000), | |
152 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000), | |
153 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107), | |
154 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000), | |
155 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000) | |
156 | }; | |
157 | ||
946a4d5b SL |
158 | static const struct soc15_reg_golden golden_settings_gc_9_1[] = |
159 | { | |
160 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), | |
161 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), | |
162 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), | |
163 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), | |
164 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), | |
165 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), | |
166 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), | |
167 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), | |
168 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), | |
169 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), | |
170 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), | |
171 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), | |
172 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), | |
173 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), | |
174 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), | |
175 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), | |
176 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), | |
177 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120), | |
178 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), | |
179 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), | |
f9f97e3c TZ |
180 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), |
181 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), | |
182 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), | |
183 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) | |
a5fdb336 CZ |
184 | }; |
185 | ||
946a4d5b | 186 | static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = |
a5fdb336 | 187 | { |
946a4d5b SL |
188 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), |
189 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042), | |
190 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042), | |
191 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000), | |
192 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000), | |
193 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), | |
194 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800) | |
b1023571 KW |
195 | }; |
196 | ||
28ab1229 FX |
197 | static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] = |
198 | { | |
199 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000), | |
200 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), | |
201 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000), | |
202 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080), | |
203 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080), | |
204 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080), | |
205 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041), | |
206 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041), | |
207 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080), | |
208 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), | |
209 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080), | |
210 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080), | |
211 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080), | |
212 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080), | |
213 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080), | |
214 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), | |
215 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010), | |
216 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), | |
217 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080), | |
218 | }; | |
219 | ||
946a4d5b | 220 | static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = |
f5eaffcc | 221 | { |
946a4d5b SL |
222 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), |
223 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) | |
f5eaffcc KW |
224 | }; |
225 | ||
62b35f9a HZ |
226 | static const struct soc15_reg_golden golden_settings_gc_9_2_1[] = |
227 | { | |
228 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), | |
229 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), | |
230 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), | |
231 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), | |
232 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), | |
233 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), | |
234 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), | |
235 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), | |
236 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), | |
237 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), | |
238 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), | |
239 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), | |
240 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), | |
241 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), | |
242 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), | |
243 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) | |
244 | }; | |
245 | ||
246 | static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = | |
247 | { | |
248 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080), | |
249 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), | |
250 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), | |
251 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041), | |
252 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041), | |
253 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000), | |
254 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), | |
255 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), | |
256 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), | |
f9f97e3c TZ |
257 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), |
258 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), | |
259 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), | |
260 | SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) | |
62b35f9a HZ |
261 | }; |
262 | ||
727b888f HR |
263 | static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = |
264 | { | |
265 | mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
266 | mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
267 | mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
268 | mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
269 | mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
270 | mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
271 | mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
272 | mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0, | |
273 | }; | |
274 | ||
275 | static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = | |
276 | { | |
277 | mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
278 | mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
279 | mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
280 | mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
281 | mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
282 | mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
283 | mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
284 | mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, | |
285 | }; | |
286 | ||
b1023571 | 287 | #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 |
62b35f9a | 288 | #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 |
7b6ba9ea | 289 | #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 |
28ab1229 | 290 | #define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041 |
b1023571 KW |
291 | |
292 | static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev); | |
293 | static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev); | |
294 | static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev); | |
295 | static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev); | |
296 | static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, | |
297 | struct amdgpu_cu_info *cu_info); | |
298 | static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); | |
299 | static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); | |
635e7132 | 300 | static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); |
b1023571 KW |
301 | |
302 | static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) | |
303 | { | |
304 | switch (adev->asic_type) { | |
305 | case CHIP_VEGA10: | |
946a4d5b | 306 | soc15_program_register_sequence(adev, |
b1023571 | 307 | golden_settings_gc_9_0, |
c47b41a7 | 308 | ARRAY_SIZE(golden_settings_gc_9_0)); |
946a4d5b | 309 | soc15_program_register_sequence(adev, |
b1023571 | 310 | golden_settings_gc_9_0_vg10, |
c47b41a7 | 311 | ARRAY_SIZE(golden_settings_gc_9_0_vg10)); |
b1023571 | 312 | break; |
d5e8ef06 | 313 | case CHIP_VEGA12: |
62b35f9a HZ |
314 | soc15_program_register_sequence(adev, |
315 | golden_settings_gc_9_2_1, | |
316 | ARRAY_SIZE(golden_settings_gc_9_2_1)); | |
317 | soc15_program_register_sequence(adev, | |
318 | golden_settings_gc_9_2_1_vg12, | |
319 | ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); | |
d5e8ef06 | 320 | break; |
bb5368aa FX |
321 | case CHIP_VEGA20: |
322 | soc15_program_register_sequence(adev, | |
323 | golden_settings_gc_9_0, | |
324 | ARRAY_SIZE(golden_settings_gc_9_0)); | |
325 | soc15_program_register_sequence(adev, | |
326 | golden_settings_gc_9_0_vg20, | |
327 | ARRAY_SIZE(golden_settings_gc_9_0_vg20)); | |
328 | break; | |
a5fdb336 | 329 | case CHIP_RAVEN: |
28ab1229 FX |
330 | soc15_program_register_sequence(adev, golden_settings_gc_9_1, |
331 | ARRAY_SIZE(golden_settings_gc_9_1)); | |
332 | if (adev->rev_id >= 8) | |
333 | soc15_program_register_sequence(adev, | |
334 | golden_settings_gc_9_1_rv2, | |
335 | ARRAY_SIZE(golden_settings_gc_9_1_rv2)); | |
336 | else | |
337 | soc15_program_register_sequence(adev, | |
338 | golden_settings_gc_9_1_rv1, | |
339 | ARRAY_SIZE(golden_settings_gc_9_1_rv1)); | |
340 | break; | |
b1023571 KW |
341 | default: |
342 | break; | |
343 | } | |
f5eaffcc | 344 | |
946a4d5b | 345 | soc15_program_register_sequence(adev, golden_settings_gc_9_x_common, |
f5eaffcc | 346 | (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); |
b1023571 KW |
347 | } |
348 | ||
349 | static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) | |
350 | { | |
6a05148f | 351 | adev->gfx.scratch.num_reg = 8; |
b1023571 KW |
352 | adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); |
353 | adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; | |
354 | } | |
355 | ||
356 | static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, | |
357 | bool wc, uint32_t reg, uint32_t val) | |
358 | { | |
359 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
360 | amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | | |
361 | WRITE_DATA_DST_SEL(0) | | |
362 | (wc ? WR_CONFIRM : 0)); | |
363 | amdgpu_ring_write(ring, reg); | |
364 | amdgpu_ring_write(ring, 0); | |
365 | amdgpu_ring_write(ring, val); | |
366 | } | |
367 | ||
368 | static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, | |
369 | int mem_space, int opt, uint32_t addr0, | |
370 | uint32_t addr1, uint32_t ref, uint32_t mask, | |
371 | uint32_t inv) | |
372 | { | |
373 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | |
374 | amdgpu_ring_write(ring, | |
375 | /* memory (1) or register (0) */ | |
376 | (WAIT_REG_MEM_MEM_SPACE(mem_space) | | |
377 | WAIT_REG_MEM_OPERATION(opt) | /* wait */ | |
378 | WAIT_REG_MEM_FUNCTION(3) | /* equal */ | |
379 | WAIT_REG_MEM_ENGINE(eng_sel))); | |
380 | ||
381 | if (mem_space) | |
382 | BUG_ON(addr0 & 0x3); /* Dword align */ | |
383 | amdgpu_ring_write(ring, addr0); | |
384 | amdgpu_ring_write(ring, addr1); | |
385 | amdgpu_ring_write(ring, ref); | |
386 | amdgpu_ring_write(ring, mask); | |
387 | amdgpu_ring_write(ring, inv); /* poll interval */ | |
388 | } | |
389 | ||
390 | static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) | |
391 | { | |
392 | struct amdgpu_device *adev = ring->adev; | |
393 | uint32_t scratch; | |
394 | uint32_t tmp = 0; | |
395 | unsigned i; | |
396 | int r; | |
397 | ||
398 | r = amdgpu_gfx_scratch_get(adev, &scratch); | |
399 | if (r) { | |
400 | DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); | |
401 | return r; | |
402 | } | |
403 | WREG32(scratch, 0xCAFEDEAD); | |
404 | r = amdgpu_ring_alloc(ring, 3); | |
405 | if (r) { | |
406 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | |
407 | ring->idx, r); | |
408 | amdgpu_gfx_scratch_free(adev, scratch); | |
409 | return r; | |
410 | } | |
411 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | |
412 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | |
413 | amdgpu_ring_write(ring, 0xDEADBEEF); | |
414 | amdgpu_ring_commit(ring); | |
415 | ||
416 | for (i = 0; i < adev->usec_timeout; i++) { | |
417 | tmp = RREG32(scratch); | |
418 | if (tmp == 0xDEADBEEF) | |
419 | break; | |
420 | DRM_UDELAY(1); | |
421 | } | |
422 | if (i < adev->usec_timeout) { | |
9953b72f | 423 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", |
b1023571 KW |
424 | ring->idx, i); |
425 | } else { | |
426 | DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | |
427 | ring->idx, scratch, tmp); | |
428 | r = -EINVAL; | |
429 | } | |
430 | amdgpu_gfx_scratch_free(adev, scratch); | |
431 | return r; | |
432 | } | |
433 | ||
434 | static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |
435 | { | |
ed9324af ML |
436 | struct amdgpu_device *adev = ring->adev; |
437 | struct amdgpu_ib ib; | |
438 | struct dma_fence *f = NULL; | |
439 | ||
440 | unsigned index; | |
441 | uint64_t gpu_addr; | |
442 | uint32_t tmp; | |
443 | long r; | |
444 | ||
445 | r = amdgpu_device_wb_get(adev, &index); | |
446 | if (r) { | |
447 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | |
448 | return r; | |
449 | } | |
450 | ||
451 | gpu_addr = adev->wb.gpu_addr + (index * 4); | |
452 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); | |
453 | memset(&ib, 0, sizeof(ib)); | |
454 | r = amdgpu_ib_get(adev, NULL, 16, &ib); | |
455 | if (r) { | |
456 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | |
457 | goto err1; | |
458 | } | |
459 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); | |
460 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; | |
461 | ib.ptr[2] = lower_32_bits(gpu_addr); | |
462 | ib.ptr[3] = upper_32_bits(gpu_addr); | |
463 | ib.ptr[4] = 0xDEADBEEF; | |
464 | ib.length_dw = 5; | |
b1023571 | 465 | |
ed9324af ML |
466 | r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); |
467 | if (r) | |
468 | goto err2; | |
b1023571 | 469 | |
ed9324af ML |
470 | r = dma_fence_wait_timeout(f, false, timeout); |
471 | if (r == 0) { | |
472 | DRM_ERROR("amdgpu: IB test timed out.\n"); | |
473 | r = -ETIMEDOUT; | |
474 | goto err2; | |
475 | } else if (r < 0) { | |
476 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | |
477 | goto err2; | |
478 | } | |
479 | ||
480 | tmp = adev->wb.wb[index]; | |
481 | if (tmp == 0xDEADBEEF) { | |
482 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | |
483 | r = 0; | |
484 | } else { | |
485 | DRM_ERROR("ib test on ring %d failed\n", ring->idx); | |
486 | r = -EINVAL; | |
487 | } | |
b1023571 | 488 | |
b1023571 | 489 | err2: |
ed9324af ML |
490 | amdgpu_ib_free(adev, &ib, NULL); |
491 | dma_fence_put(f); | |
b1023571 | 492 | err1: |
ed9324af ML |
493 | amdgpu_device_wb_free(adev, index); |
494 | return r; | |
b1023571 KW |
495 | } |
496 | ||
c833d8aa ML |
497 | |
498 | static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) | |
499 | { | |
500 | release_firmware(adev->gfx.pfp_fw); | |
501 | adev->gfx.pfp_fw = NULL; | |
502 | release_firmware(adev->gfx.me_fw); | |
503 | adev->gfx.me_fw = NULL; | |
504 | release_firmware(adev->gfx.ce_fw); | |
505 | adev->gfx.ce_fw = NULL; | |
506 | release_firmware(adev->gfx.rlc_fw); | |
507 | adev->gfx.rlc_fw = NULL; | |
508 | release_firmware(adev->gfx.mec_fw); | |
509 | adev->gfx.mec_fw = NULL; | |
510 | release_firmware(adev->gfx.mec2_fw); | |
511 | adev->gfx.mec2_fw = NULL; | |
512 | ||
513 | kfree(adev->gfx.rlc.register_list_format); | |
514 | } | |
515 | ||
621a6318 HR |
516 | static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev) |
517 | { | |
518 | const struct rlc_firmware_header_v2_1 *rlc_hdr; | |
519 | ||
520 | rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; | |
521 | adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); | |
522 | adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); | |
523 | adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); | |
524 | adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); | |
525 | adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); | |
526 | adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); | |
527 | adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); | |
528 | adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); | |
529 | adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); | |
530 | adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); | |
531 | adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); | |
532 | adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); | |
533 | adev->gfx.rlc.reg_list_format_direct_reg_list_length = | |
534 | le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); | |
535 | } | |
536 | ||
39b62541 ED |
537 | static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev) |
538 | { | |
539 | adev->gfx.me_fw_write_wait = false; | |
540 | adev->gfx.mec_fw_write_wait = false; | |
541 | ||
542 | switch (adev->asic_type) { | |
543 | case CHIP_VEGA10: | |
544 | if ((adev->gfx.me_fw_version >= 0x0000009c) && | |
545 | (adev->gfx.me_feature_version >= 42) && | |
546 | (adev->gfx.pfp_fw_version >= 0x000000b1) && | |
547 | (adev->gfx.pfp_feature_version >= 42)) | |
548 | adev->gfx.me_fw_write_wait = true; | |
549 | ||
550 | if ((adev->gfx.mec_fw_version >= 0x00000193) && | |
551 | (adev->gfx.mec_feature_version >= 42)) | |
552 | adev->gfx.mec_fw_write_wait = true; | |
553 | break; | |
554 | case CHIP_VEGA12: | |
555 | if ((adev->gfx.me_fw_version >= 0x0000009c) && | |
556 | (adev->gfx.me_feature_version >= 44) && | |
557 | (adev->gfx.pfp_fw_version >= 0x000000b2) && | |
558 | (adev->gfx.pfp_feature_version >= 44)) | |
559 | adev->gfx.me_fw_write_wait = true; | |
560 | ||
561 | if ((adev->gfx.mec_fw_version >= 0x00000196) && | |
562 | (adev->gfx.mec_feature_version >= 44)) | |
563 | adev->gfx.mec_fw_write_wait = true; | |
564 | break; | |
565 | case CHIP_VEGA20: | |
566 | if ((adev->gfx.me_fw_version >= 0x0000009c) && | |
567 | (adev->gfx.me_feature_version >= 44) && | |
568 | (adev->gfx.pfp_fw_version >= 0x000000b2) && | |
569 | (adev->gfx.pfp_feature_version >= 44)) | |
570 | adev->gfx.me_fw_write_wait = true; | |
571 | ||
572 | if ((adev->gfx.mec_fw_version >= 0x00000197) && | |
573 | (adev->gfx.mec_feature_version >= 44)) | |
574 | adev->gfx.mec_fw_write_wait = true; | |
575 | break; | |
576 | case CHIP_RAVEN: | |
577 | if ((adev->gfx.me_fw_version >= 0x0000009c) && | |
578 | (adev->gfx.me_feature_version >= 42) && | |
579 | (adev->gfx.pfp_fw_version >= 0x000000b1) && | |
580 | (adev->gfx.pfp_feature_version >= 42)) | |
581 | adev->gfx.me_fw_write_wait = true; | |
582 | ||
583 | if ((adev->gfx.mec_fw_version >= 0x00000192) && | |
584 | (adev->gfx.mec_feature_version >= 42)) | |
585 | adev->gfx.mec_fw_write_wait = true; | |
586 | break; | |
a00ead2b RZ |
587 | default: |
588 | break; | |
39b62541 ED |
589 | } |
590 | } | |
591 | ||
b1023571 KW |
592 | static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) |
593 | { | |
594 | const char *chip_name; | |
595 | char fw_name[30]; | |
596 | int err; | |
597 | struct amdgpu_firmware_info *info = NULL; | |
598 | const struct common_firmware_header *header = NULL; | |
599 | const struct gfx_firmware_header_v1_0 *cp_hdr; | |
a4d41ad0 HZ |
600 | const struct rlc_firmware_header_v2_0 *rlc_hdr; |
601 | unsigned int *tmp = NULL; | |
602 | unsigned int i = 0; | |
621a6318 HR |
603 | uint16_t version_major; |
604 | uint16_t version_minor; | |
b1023571 KW |
605 | |
606 | DRM_DEBUG("\n"); | |
607 | ||
608 | switch (adev->asic_type) { | |
609 | case CHIP_VEGA10: | |
610 | chip_name = "vega10"; | |
611 | break; | |
739ffd9b AD |
612 | case CHIP_VEGA12: |
613 | chip_name = "vega12"; | |
614 | break; | |
940328fe FX |
615 | case CHIP_VEGA20: |
616 | chip_name = "vega20"; | |
617 | break; | |
eaa85724 | 618 | case CHIP_RAVEN: |
cf4b60c6 FX |
619 | if (adev->rev_id >= 8) |
620 | chip_name = "raven2"; | |
741deade AD |
621 | else if (adev->pdev->device == 0x15d8) |
622 | chip_name = "picasso"; | |
cf4b60c6 FX |
623 | else |
624 | chip_name = "raven"; | |
eaa85724 | 625 | break; |
b1023571 KW |
626 | default: |
627 | BUG(); | |
628 | } | |
629 | ||
630 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); | |
631 | err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); | |
632 | if (err) | |
633 | goto out; | |
634 | err = amdgpu_ucode_validate(adev->gfx.pfp_fw); | |
635 | if (err) | |
636 | goto out; | |
637 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | |
638 | adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); | |
639 | adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); | |
640 | ||
641 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); | |
642 | err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); | |
643 | if (err) | |
644 | goto out; | |
645 | err = amdgpu_ucode_validate(adev->gfx.me_fw); | |
646 | if (err) | |
647 | goto out; | |
648 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | |
649 | adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); | |
650 | adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); | |
651 | ||
652 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); | |
653 | err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); | |
654 | if (err) | |
655 | goto out; | |
656 | err = amdgpu_ucode_validate(adev->gfx.ce_fw); | |
657 | if (err) | |
658 | goto out; | |
659 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | |
660 | adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); | |
661 | adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); | |
662 | ||
663 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); | |
664 | err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); | |
665 | if (err) | |
666 | goto out; | |
667 | err = amdgpu_ucode_validate(adev->gfx.rlc_fw); | |
a4d41ad0 | 668 | rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; |
621a6318 HR |
669 | |
670 | version_major = le16_to_cpu(rlc_hdr->header.header_version_major); | |
671 | version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); | |
672 | if (version_major == 2 && version_minor == 1) | |
673 | adev->gfx.rlc.is_rlc_v2_1 = true; | |
674 | ||
a4d41ad0 HZ |
675 | adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); |
676 | adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); | |
677 | adev->gfx.rlc.save_and_restore_offset = | |
678 | le32_to_cpu(rlc_hdr->save_and_restore_offset); | |
679 | adev->gfx.rlc.clear_state_descriptor_offset = | |
680 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); | |
681 | adev->gfx.rlc.avail_scratch_ram_locations = | |
682 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); | |
683 | adev->gfx.rlc.reg_restore_list_size = | |
684 | le32_to_cpu(rlc_hdr->reg_restore_list_size); | |
685 | adev->gfx.rlc.reg_list_format_start = | |
686 | le32_to_cpu(rlc_hdr->reg_list_format_start); | |
687 | adev->gfx.rlc.reg_list_format_separate_start = | |
688 | le32_to_cpu(rlc_hdr->reg_list_format_separate_start); | |
689 | adev->gfx.rlc.starting_offsets_start = | |
690 | le32_to_cpu(rlc_hdr->starting_offsets_start); | |
691 | adev->gfx.rlc.reg_list_format_size_bytes = | |
692 | le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); | |
693 | adev->gfx.rlc.reg_list_size_bytes = | |
694 | le32_to_cpu(rlc_hdr->reg_list_size_bytes); | |
695 | adev->gfx.rlc.register_list_format = | |
696 | kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + | |
697 | adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); | |
698 | if (!adev->gfx.rlc.register_list_format) { | |
699 | err = -ENOMEM; | |
700 | goto out; | |
701 | } | |
702 | ||
703 | tmp = (unsigned int *)((uintptr_t)rlc_hdr + | |
704 | le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); | |
06668916 | 705 | for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++) |
a4d41ad0 HZ |
706 | adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); |
707 | ||
708 | adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; | |
709 | ||
710 | tmp = (unsigned int *)((uintptr_t)rlc_hdr + | |
711 | le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); | |
06668916 | 712 | for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++) |
a4d41ad0 | 713 | adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); |
b1023571 | 714 | |
621a6318 HR |
715 | if (adev->gfx.rlc.is_rlc_v2_1) |
716 | gfx_v9_0_init_rlc_ext_microcode(adev); | |
717 | ||
b1023571 KW |
718 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); |
719 | err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); | |
720 | if (err) | |
721 | goto out; | |
722 | err = amdgpu_ucode_validate(adev->gfx.mec_fw); | |
723 | if (err) | |
724 | goto out; | |
725 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | |
726 | adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); | |
727 | adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); | |
728 | ||
729 | ||
730 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); | |
731 | err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); | |
732 | if (!err) { | |
733 | err = amdgpu_ucode_validate(adev->gfx.mec2_fw); | |
734 | if (err) | |
735 | goto out; | |
736 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
737 | adev->gfx.mec2_fw->data; | |
738 | adev->gfx.mec2_fw_version = | |
739 | le32_to_cpu(cp_hdr->header.ucode_version); | |
740 | adev->gfx.mec2_feature_version = | |
741 | le32_to_cpu(cp_hdr->ucode_feature_version); | |
742 | } else { | |
743 | err = 0; | |
744 | adev->gfx.mec2_fw = NULL; | |
745 | } | |
746 | ||
747 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { | |
748 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; | |
749 | info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; | |
750 | info->fw = adev->gfx.pfp_fw; | |
751 | header = (const struct common_firmware_header *)info->fw->data; | |
752 | adev->firmware.fw_size += | |
753 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
754 | ||
755 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; | |
756 | info->ucode_id = AMDGPU_UCODE_ID_CP_ME; | |
757 | info->fw = adev->gfx.me_fw; | |
758 | header = (const struct common_firmware_header *)info->fw->data; | |
759 | adev->firmware.fw_size += | |
760 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
761 | ||
762 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; | |
763 | info->ucode_id = AMDGPU_UCODE_ID_CP_CE; | |
764 | info->fw = adev->gfx.ce_fw; | |
765 | header = (const struct common_firmware_header *)info->fw->data; | |
766 | adev->firmware.fw_size += | |
767 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
768 | ||
769 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; | |
770 | info->ucode_id = AMDGPU_UCODE_ID_RLC_G; | |
771 | info->fw = adev->gfx.rlc_fw; | |
772 | header = (const struct common_firmware_header *)info->fw->data; | |
773 | adev->firmware.fw_size += | |
774 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | |
775 | ||
b58b65cf EQ |
776 | if (adev->gfx.rlc.is_rlc_v2_1 && |
777 | adev->gfx.rlc.save_restore_list_cntl_size_bytes && | |
778 | adev->gfx.rlc.save_restore_list_gpm_size_bytes && | |
779 | adev->gfx.rlc.save_restore_list_srm_size_bytes) { | |
621a6318 HR |
780 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; |
781 | info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; | |
782 | info->fw = adev->gfx.rlc_fw; | |
783 | adev->firmware.fw_size += | |
784 | ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); | |
785 | ||
786 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; | |
787 | info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; | |
788 | info->fw = adev->gfx.rlc_fw; | |
789 | adev->firmware.fw_size += | |
790 | ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); | |
791 | ||
792 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; | |
793 | info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; | |
794 | info->fw = adev->gfx.rlc_fw; | |
795 | adev->firmware.fw_size += | |
796 | ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); | |
797 | } | |
798 | ||
b1023571 KW |
799 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; |
800 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; | |
801 | info->fw = adev->gfx.mec_fw; | |
802 | header = (const struct common_firmware_header *)info->fw->data; | |
803 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; | |
804 | adev->firmware.fw_size += | |
805 | ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); | |
806 | ||
807 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT]; | |
808 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT; | |
809 | info->fw = adev->gfx.mec_fw; | |
810 | adev->firmware.fw_size += | |
811 | ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); | |
812 | ||
813 | if (adev->gfx.mec2_fw) { | |
814 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; | |
815 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; | |
816 | info->fw = adev->gfx.mec2_fw; | |
817 | header = (const struct common_firmware_header *)info->fw->data; | |
818 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data; | |
819 | adev->firmware.fw_size += | |
820 | ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); | |
821 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; | |
822 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; | |
823 | info->fw = adev->gfx.mec2_fw; | |
824 | adev->firmware.fw_size += | |
825 | ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE); | |
826 | } | |
827 | ||
828 | } | |
829 | ||
830 | out: | |
39b62541 | 831 | gfx_v9_0_check_fw_write_wait(adev); |
b1023571 KW |
832 | if (err) { |
833 | dev_err(adev->dev, | |
834 | "gfx9: Failed to load firmware \"%s\"\n", | |
835 | fw_name); | |
836 | release_firmware(adev->gfx.pfp_fw); | |
837 | adev->gfx.pfp_fw = NULL; | |
838 | release_firmware(adev->gfx.me_fw); | |
839 | adev->gfx.me_fw = NULL; | |
840 | release_firmware(adev->gfx.ce_fw); | |
841 | adev->gfx.ce_fw = NULL; | |
842 | release_firmware(adev->gfx.rlc_fw); | |
843 | adev->gfx.rlc_fw = NULL; | |
844 | release_firmware(adev->gfx.mec_fw); | |
845 | adev->gfx.mec_fw = NULL; | |
846 | release_firmware(adev->gfx.mec2_fw); | |
847 | adev->gfx.mec2_fw = NULL; | |
848 | } | |
849 | return err; | |
850 | } | |
851 | ||
c9719c69 HZ |
852 | static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev) |
853 | { | |
854 | u32 count = 0; | |
855 | const struct cs_section_def *sect = NULL; | |
856 | const struct cs_extent_def *ext = NULL; | |
857 | ||
858 | /* begin clear state */ | |
859 | count += 2; | |
860 | /* context control state */ | |
861 | count += 3; | |
862 | ||
863 | for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { | |
864 | for (ext = sect->section; ext->extent != NULL; ++ext) { | |
865 | if (sect->id == SECT_CONTEXT) | |
866 | count += 2 + ext->reg_count; | |
867 | else | |
868 | return 0; | |
869 | } | |
870 | } | |
871 | ||
872 | /* end clear state */ | |
873 | count += 2; | |
874 | /* clear state */ | |
875 | count += 2; | |
876 | ||
877 | return count; | |
878 | } | |
879 | ||
880 | static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, | |
881 | volatile u32 *buffer) | |
882 | { | |
883 | u32 count = 0, i; | |
884 | const struct cs_section_def *sect = NULL; | |
885 | const struct cs_extent_def *ext = NULL; | |
886 | ||
887 | if (adev->gfx.rlc.cs_data == NULL) | |
888 | return; | |
889 | if (buffer == NULL) | |
890 | return; | |
891 | ||
892 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
893 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | |
894 | ||
895 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); | |
896 | buffer[count++] = cpu_to_le32(0x80000000); | |
897 | buffer[count++] = cpu_to_le32(0x80000000); | |
898 | ||
899 | for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { | |
900 | for (ext = sect->section; ext->extent != NULL; ++ext) { | |
901 | if (sect->id == SECT_CONTEXT) { | |
902 | buffer[count++] = | |
903 | cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); | |
904 | buffer[count++] = cpu_to_le32(ext->reg_index - | |
905 | PACKET3_SET_CONTEXT_REG_START); | |
906 | for (i = 0; i < ext->reg_count; i++) | |
907 | buffer[count++] = cpu_to_le32(ext->extent[i]); | |
908 | } else { | |
909 | return; | |
910 | } | |
911 | } | |
912 | } | |
913 | ||
914 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
915 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); | |
916 | ||
917 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); | |
918 | buffer[count++] = cpu_to_le32(0); | |
919 | } | |
920 | ||
989b6823 EQ |
921 | static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) |
922 | { | |
923 | struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; | |
924 | uint32_t pg_always_on_cu_num = 2; | |
925 | uint32_t always_on_cu_num; | |
926 | uint32_t i, j, k; | |
927 | uint32_t mask, cu_bitmap, counter; | |
928 | ||
929 | if (adev->flags & AMD_IS_APU) | |
930 | always_on_cu_num = 4; | |
931 | else if (adev->asic_type == CHIP_VEGA12) | |
932 | always_on_cu_num = 8; | |
933 | else | |
934 | always_on_cu_num = 12; | |
935 | ||
936 | mutex_lock(&adev->grbm_idx_mutex); | |
937 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
938 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | |
939 | mask = 1; | |
940 | cu_bitmap = 0; | |
941 | counter = 0; | |
942 | gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); | |
943 | ||
944 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { | |
945 | if (cu_info->bitmap[i][j] & mask) { | |
946 | if (counter == pg_always_on_cu_num) | |
947 | WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap); | |
948 | if (counter < always_on_cu_num) | |
949 | cu_bitmap |= mask; | |
950 | else | |
951 | break; | |
952 | counter++; | |
953 | } | |
954 | mask <<= 1; | |
955 | } | |
956 | ||
957 | WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap); | |
958 | cu_info->ao_cu_bitmap[i][j] = cu_bitmap; | |
959 | } | |
960 | } | |
961 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
962 | mutex_unlock(&adev->grbm_idx_mutex); | |
963 | } | |
964 | ||
ba7bb665 HZ |
965 | static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) |
966 | { | |
e5475e16 | 967 | uint32_t data; |
ba7bb665 HZ |
968 | |
969 | /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ | |
970 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); | |
971 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7); | |
972 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); | |
973 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16)); | |
974 | ||
975 | /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ | |
976 | WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); | |
977 | ||
978 | /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ | |
979 | WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500); | |
980 | ||
981 | mutex_lock(&adev->grbm_idx_mutex); | |
982 | /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ | |
983 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
984 | WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); | |
985 | ||
986 | /* set mmRLC_LB_PARAMS = 0x003F_1006 */ | |
e5475e16 TSD |
987 | data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); |
988 | data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); | |
989 | data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); | |
ba7bb665 HZ |
990 | WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); |
991 | ||
992 | /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ | |
993 | data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); | |
994 | data &= 0x0000FFFF; | |
995 | data |= 0x00C00000; | |
996 | WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); | |
997 | ||
b989531b EQ |
998 | /* |
999 | * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven), | |
1000 | * programmed in gfx_v9_0_init_always_on_cu_mask() | |
1001 | */ | |
ba7bb665 HZ |
1002 | |
1003 | /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, | |
1004 | * but used for RLC_LB_CNTL configuration */ | |
1005 | data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; | |
e5475e16 TSD |
1006 | data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); |
1007 | data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); | |
ba7bb665 HZ |
1008 | WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); |
1009 | mutex_unlock(&adev->grbm_idx_mutex); | |
b989531b EQ |
1010 | |
1011 | gfx_v9_0_init_always_on_cu_mask(adev); | |
ba7bb665 HZ |
1012 | } |
1013 | ||
989b6823 EQ |
1014 | static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) |
1015 | { | |
1016 | uint32_t data; | |
1017 | ||
1018 | /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ | |
1019 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); | |
1020 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8); | |
1021 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); | |
1022 | WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16)); | |
1023 | ||
1024 | /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ | |
1025 | WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); | |
1026 | ||
1027 | /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ | |
1028 | WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800); | |
1029 | ||
1030 | mutex_lock(&adev->grbm_idx_mutex); | |
1031 | /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ | |
1032 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
1033 | WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); | |
1034 | ||
1035 | /* set mmRLC_LB_PARAMS = 0x003F_1006 */ | |
1036 | data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); | |
1037 | data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); | |
1038 | data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); | |
1039 | WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); | |
1040 | ||
1041 | /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ | |
1042 | data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); | |
1043 | data &= 0x0000FFFF; | |
1044 | data |= 0x00C00000; | |
1045 | WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); | |
1046 | ||
1047 | /* | |
1048 | * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON), | |
1049 | * programmed in gfx_v9_0_init_always_on_cu_mask() | |
1050 | */ | |
1051 | ||
1052 | /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, | |
1053 | * but used for RLC_LB_CNTL configuration */ | |
1054 | data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; | |
1055 | data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); | |
1056 | data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); | |
1057 | WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); | |
1058 | mutex_unlock(&adev->grbm_idx_mutex); | |
1059 | ||
1060 | gfx_v9_0_init_always_on_cu_mask(adev); | |
1061 | } | |
1062 | ||
e8835e0e HZ |
1063 | static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) |
1064 | { | |
e5475e16 | 1065 | WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); |
e8835e0e HZ |
1066 | } |
1067 | ||
c9719c69 HZ |
1068 | static void rv_init_cp_jump_table(struct amdgpu_device *adev) |
1069 | { | |
1070 | const __le32 *fw_data; | |
1071 | volatile u32 *dst_ptr; | |
1072 | int me, i, max_me = 5; | |
1073 | u32 bo_offset = 0; | |
1074 | u32 table_offset, table_size; | |
1075 | ||
1076 | /* write the cp table buffer */ | |
1077 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | |
1078 | for (me = 0; me < max_me; me++) { | |
1079 | if (me == 0) { | |
1080 | const struct gfx_firmware_header_v1_0 *hdr = | |
1081 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | |
1082 | fw_data = (const __le32 *) | |
1083 | (adev->gfx.ce_fw->data + | |
1084 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
1085 | table_offset = le32_to_cpu(hdr->jt_offset); | |
1086 | table_size = le32_to_cpu(hdr->jt_size); | |
1087 | } else if (me == 1) { | |
1088 | const struct gfx_firmware_header_v1_0 *hdr = | |
1089 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | |
1090 | fw_data = (const __le32 *) | |
1091 | (adev->gfx.pfp_fw->data + | |
1092 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
1093 | table_offset = le32_to_cpu(hdr->jt_offset); | |
1094 | table_size = le32_to_cpu(hdr->jt_size); | |
1095 | } else if (me == 2) { | |
1096 | const struct gfx_firmware_header_v1_0 *hdr = | |
1097 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | |
1098 | fw_data = (const __le32 *) | |
1099 | (adev->gfx.me_fw->data + | |
1100 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
1101 | table_offset = le32_to_cpu(hdr->jt_offset); | |
1102 | table_size = le32_to_cpu(hdr->jt_size); | |
1103 | } else if (me == 3) { | |
1104 | const struct gfx_firmware_header_v1_0 *hdr = | |
1105 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | |
1106 | fw_data = (const __le32 *) | |
1107 | (adev->gfx.mec_fw->data + | |
1108 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
1109 | table_offset = le32_to_cpu(hdr->jt_offset); | |
1110 | table_size = le32_to_cpu(hdr->jt_size); | |
1111 | } else if (me == 4) { | |
1112 | const struct gfx_firmware_header_v1_0 *hdr = | |
1113 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | |
1114 | fw_data = (const __le32 *) | |
1115 | (adev->gfx.mec2_fw->data + | |
1116 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
1117 | table_offset = le32_to_cpu(hdr->jt_offset); | |
1118 | table_size = le32_to_cpu(hdr->jt_size); | |
1119 | } | |
1120 | ||
1121 | for (i = 0; i < table_size; i ++) { | |
1122 | dst_ptr[bo_offset + i] = | |
1123 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | |
1124 | } | |
1125 | ||
1126 | bo_offset += table_size; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev) | |
1131 | { | |
1132 | /* clear state block */ | |
1133 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, | |
1134 | &adev->gfx.rlc.clear_state_gpu_addr, | |
1135 | (void **)&adev->gfx.rlc.cs_ptr); | |
1136 | ||
1137 | /* jump table block */ | |
1138 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, | |
1139 | &adev->gfx.rlc.cp_table_gpu_addr, | |
1140 | (void **)&adev->gfx.rlc.cp_table_ptr); | |
1141 | } | |
1142 | ||
1143 | static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | |
1144 | { | |
1145 | volatile u32 *dst_ptr; | |
1146 | u32 dws; | |
1147 | const struct cs_section_def *cs_data; | |
1148 | int r; | |
1149 | ||
1150 | adev->gfx.rlc.cs_data = gfx9_cs_data; | |
1151 | ||
1152 | cs_data = adev->gfx.rlc.cs_data; | |
1153 | ||
1154 | if (cs_data) { | |
1155 | /* clear state block */ | |
1156 | adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); | |
a4a02777 CK |
1157 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
1158 | AMDGPU_GEM_DOMAIN_VRAM, | |
1159 | &adev->gfx.rlc.clear_state_obj, | |
1160 | &adev->gfx.rlc.clear_state_gpu_addr, | |
1161 | (void **)&adev->gfx.rlc.cs_ptr); | |
1162 | if (r) { | |
1163 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", | |
1164 | r); | |
1165 | gfx_v9_0_rlc_fini(adev); | |
1166 | return r; | |
c9719c69 HZ |
1167 | } |
1168 | /* set up the cs buffer */ | |
1169 | dst_ptr = adev->gfx.rlc.cs_ptr; | |
1170 | gfx_v9_0_get_csb_buffer(adev, dst_ptr); | |
1171 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | |
137dc4b9 | 1172 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); |
c9719c69 HZ |
1173 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); |
1174 | } | |
1175 | ||
741deade | 1176 | if (adev->asic_type == CHIP_RAVEN) { |
c9719c69 HZ |
1177 | /* TODO: double check the cp_table_size for RV */ |
1178 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | |
a4a02777 CK |
1179 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, |
1180 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | |
1181 | &adev->gfx.rlc.cp_table_obj, | |
1182 | &adev->gfx.rlc.cp_table_gpu_addr, | |
1183 | (void **)&adev->gfx.rlc.cp_table_ptr); | |
1184 | if (r) { | |
1185 | dev_err(adev->dev, | |
1186 | "(%d) failed to create cp table bo\n", r); | |
1187 | gfx_v9_0_rlc_fini(adev); | |
1188 | return r; | |
c9719c69 HZ |
1189 | } |
1190 | ||
1191 | rv_init_cp_jump_table(adev); | |
1192 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | |
1193 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | |
989b6823 | 1194 | } |
ba7bb665 | 1195 | |
989b6823 EQ |
1196 | switch (adev->asic_type) { |
1197 | case CHIP_RAVEN: | |
ba7bb665 | 1198 | gfx_v9_0_init_lbpw(adev); |
989b6823 EQ |
1199 | break; |
1200 | case CHIP_VEGA20: | |
1201 | gfx_v9_4_init_lbpw(adev); | |
1202 | break; | |
1203 | default: | |
1204 | break; | |
c9719c69 HZ |
1205 | } |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
137dc4b9 EQ |
1210 | static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev) |
1211 | { | |
1212 | int r; | |
1213 | ||
1214 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); | |
1215 | if (unlikely(r != 0)) | |
1216 | return r; | |
1217 | ||
1218 | r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, | |
1219 | AMDGPU_GEM_DOMAIN_VRAM); | |
1220 | if (!r) | |
1221 | adev->gfx.rlc.clear_state_gpu_addr = | |
1222 | amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj); | |
1223 | ||
1224 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | |
1225 | ||
1226 | return r; | |
1227 | } | |
1228 | ||
1229 | static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev) | |
1230 | { | |
1231 | int r; | |
1232 | ||
1233 | if (!adev->gfx.rlc.clear_state_obj) | |
1234 | return; | |
1235 | ||
1236 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); | |
1237 | if (likely(r == 0)) { | |
1238 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | |
1239 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | |
1240 | } | |
1241 | } | |
1242 | ||
b1023571 KW |
1243 | static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) |
1244 | { | |
078af1a3 CK |
1245 | amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
1246 | amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); | |
b1023571 KW |
1247 | } |
1248 | ||
b1023571 KW |
1249 | static int gfx_v9_0_mec_init(struct amdgpu_device *adev) |
1250 | { | |
1251 | int r; | |
1252 | u32 *hpd; | |
1253 | const __le32 *fw_data; | |
1254 | unsigned fw_size; | |
1255 | u32 *fw; | |
42794b27 | 1256 | size_t mec_hpd_size; |
b1023571 KW |
1257 | |
1258 | const struct gfx_firmware_header_v1_0 *mec_hdr; | |
1259 | ||
78c16834 AR |
1260 | bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); |
1261 | ||
78c16834 | 1262 | /* take ownership of the relevant compute queues */ |
41f6a99a | 1263 | amdgpu_gfx_compute_queue_acquire(adev); |
78c16834 | 1264 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; |
b1023571 | 1265 | |
a4a02777 CK |
1266 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
1267 | AMDGPU_GEM_DOMAIN_GTT, | |
1268 | &adev->gfx.mec.hpd_eop_obj, | |
1269 | &adev->gfx.mec.hpd_eop_gpu_addr, | |
1270 | (void **)&hpd); | |
b1023571 | 1271 | if (r) { |
a4a02777 | 1272 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); |
b1023571 KW |
1273 | gfx_v9_0_mec_fini(adev); |
1274 | return r; | |
1275 | } | |
1276 | ||
1277 | memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); | |
1278 | ||
1279 | amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); | |
1280 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | |
1281 | ||
1282 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | |
1283 | ||
1284 | fw_data = (const __le32 *) | |
1285 | (adev->gfx.mec_fw->data + | |
1286 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); | |
1287 | fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; | |
1288 | ||
a4a02777 CK |
1289 | r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, |
1290 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, | |
1291 | &adev->gfx.mec.mec_fw_obj, | |
1292 | &adev->gfx.mec.mec_fw_gpu_addr, | |
1293 | (void **)&fw); | |
b1023571 | 1294 | if (r) { |
a4a02777 | 1295 | dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); |
b1023571 KW |
1296 | gfx_v9_0_mec_fini(adev); |
1297 | return r; | |
1298 | } | |
a4a02777 | 1299 | |
b1023571 KW |
1300 | memcpy(fw, fw_data, fw_size); |
1301 | ||
1302 | amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); | |
1303 | amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); | |
1304 | ||
b1023571 KW |
1305 | return 0; |
1306 | } | |
1307 | ||
1308 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address) | |
1309 | { | |
5e78835a | 1310 | WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, |
b1023571 KW |
1311 | (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | |
1312 | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | | |
1313 | (address << SQ_IND_INDEX__INDEX__SHIFT) | | |
1314 | (SQ_IND_INDEX__FORCE_READ_MASK)); | |
5e78835a | 1315 | return RREG32_SOC15(GC, 0, mmSQ_IND_DATA); |
b1023571 KW |
1316 | } |
1317 | ||
1318 | static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd, | |
1319 | uint32_t wave, uint32_t thread, | |
1320 | uint32_t regno, uint32_t num, uint32_t *out) | |
1321 | { | |
5e78835a | 1322 | WREG32_SOC15(GC, 0, mmSQ_IND_INDEX, |
b1023571 KW |
1323 | (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | |
1324 | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | | |
1325 | (regno << SQ_IND_INDEX__INDEX__SHIFT) | | |
1326 | (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | | |
1327 | (SQ_IND_INDEX__FORCE_READ_MASK) | | |
1328 | (SQ_IND_INDEX__AUTO_INCR_MASK)); | |
1329 | while (num--) | |
5e78835a | 1330 | *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA); |
b1023571 KW |
1331 | } |
1332 | ||
1333 | static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields) | |
1334 | { | |
1335 | /* type 1 wave data */ | |
1336 | dst[(*no_fields)++] = 1; | |
1337 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS); | |
1338 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO); | |
1339 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI); | |
1340 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO); | |
1341 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI); | |
1342 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID); | |
1343 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0); | |
1344 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1); | |
1345 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC); | |
1346 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC); | |
1347 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS); | |
1348 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS); | |
1349 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0); | |
1350 | dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0); | |
1351 | } | |
1352 | ||
1353 | static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, | |
1354 | uint32_t wave, uint32_t start, | |
1355 | uint32_t size, uint32_t *dst) | |
1356 | { | |
1357 | wave_read_regs( | |
1358 | adev, simd, wave, 0, | |
1359 | start + SQIND_WAVE_SGPRS_OFFSET, size, dst); | |
1360 | } | |
1361 | ||
822770ad NH |
1362 | static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, |
1363 | uint32_t wave, uint32_t thread, | |
1364 | uint32_t start, uint32_t size, | |
1365 | uint32_t *dst) | |
1366 | { | |
1367 | wave_read_regs( | |
1368 | adev, simd, wave, thread, | |
1369 | start + SQIND_WAVE_VGPRS_OFFSET, size, dst); | |
1370 | } | |
b1023571 | 1371 | |
f7a9ee81 AG |
1372 | static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev, |
1373 | u32 me, u32 pipe, u32 q) | |
1374 | { | |
1375 | soc15_grbm_select(adev, me, pipe, q, 0); | |
1376 | } | |
1377 | ||
b1023571 KW |
1378 | static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { |
1379 | .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, | |
1380 | .select_se_sh = &gfx_v9_0_select_se_sh, | |
1381 | .read_wave_data = &gfx_v9_0_read_wave_data, | |
1382 | .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, | |
822770ad | 1383 | .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, |
f7a9ee81 | 1384 | .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q |
b1023571 KW |
1385 | }; |
1386 | ||
3251c043 | 1387 | static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) |
b1023571 KW |
1388 | { |
1389 | u32 gb_addr_config; | |
3251c043 | 1390 | int err; |
b1023571 KW |
1391 | |
1392 | adev->gfx.funcs = &gfx_v9_0_gfx_funcs; | |
1393 | ||
1394 | switch (adev->asic_type) { | |
1395 | case CHIP_VEGA10: | |
b1023571 | 1396 | adev->gfx.config.max_hw_contexts = 8; |
b1023571 KW |
1397 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; |
1398 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | |
1399 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | |
1400 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; | |
1401 | gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN; | |
1402 | break; | |
e5c62edd AD |
1403 | case CHIP_VEGA12: |
1404 | adev->gfx.config.max_hw_contexts = 8; | |
1405 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | |
1406 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | |
1407 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | |
1408 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; | |
62b35f9a | 1409 | gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; |
e5c62edd AD |
1410 | DRM_INFO("fix gfx.config for vega12\n"); |
1411 | break; | |
d3adedb4 FX |
1412 | case CHIP_VEGA20: |
1413 | adev->gfx.config.max_hw_contexts = 8; | |
1414 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | |
1415 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | |
1416 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | |
1417 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; | |
1418 | gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); | |
1419 | gb_addr_config &= ~0xf3e777ff; | |
1420 | gb_addr_config |= 0x22014042; | |
3251c043 AD |
1421 | /* check vbios table if gpu info is not available */ |
1422 | err = amdgpu_atomfirmware_get_gfx_info(adev); | |
1423 | if (err) | |
1424 | return err; | |
d3adedb4 | 1425 | break; |
5cf7433d CZ |
1426 | case CHIP_RAVEN: |
1427 | adev->gfx.config.max_hw_contexts = 8; | |
1428 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | |
1429 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | |
1430 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | |
1431 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; | |
28ab1229 FX |
1432 | if (adev->rev_id >= 8) |
1433 | gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN; | |
1434 | else | |
1435 | gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; | |
5cf7433d | 1436 | break; |
b1023571 KW |
1437 | default: |
1438 | BUG(); | |
1439 | break; | |
1440 | } | |
1441 | ||
1442 | adev->gfx.config.gb_addr_config = gb_addr_config; | |
1443 | ||
1444 | adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << | |
1445 | REG_GET_FIELD( | |
1446 | adev->gfx.config.gb_addr_config, | |
1447 | GB_ADDR_CONFIG, | |
1448 | NUM_PIPES); | |
ad7d0ff3 AD |
1449 | |
1450 | adev->gfx.config.max_tile_pipes = | |
1451 | adev->gfx.config.gb_addr_config_fields.num_pipes; | |
1452 | ||
b1023571 KW |
1453 | adev->gfx.config.gb_addr_config_fields.num_banks = 1 << |
1454 | REG_GET_FIELD( | |
1455 | adev->gfx.config.gb_addr_config, | |
1456 | GB_ADDR_CONFIG, | |
1457 | NUM_BANKS); | |
1458 | adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << | |
1459 | REG_GET_FIELD( | |
1460 | adev->gfx.config.gb_addr_config, | |
1461 | GB_ADDR_CONFIG, | |
1462 | MAX_COMPRESSED_FRAGS); | |
1463 | adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << | |
1464 | REG_GET_FIELD( | |
1465 | adev->gfx.config.gb_addr_config, | |
1466 | GB_ADDR_CONFIG, | |
1467 | NUM_RB_PER_SE); | |
1468 | adev->gfx.config.gb_addr_config_fields.num_se = 1 << | |
1469 | REG_GET_FIELD( | |
1470 | adev->gfx.config.gb_addr_config, | |
1471 | GB_ADDR_CONFIG, | |
1472 | NUM_SHADER_ENGINES); | |
1473 | adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + | |
1474 | REG_GET_FIELD( | |
1475 | adev->gfx.config.gb_addr_config, | |
1476 | GB_ADDR_CONFIG, | |
1477 | PIPE_INTERLEAVE_SIZE)); | |
3251c043 AD |
1478 | |
1479 | return 0; | |
b1023571 KW |
1480 | } |
1481 | ||
1482 | static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, | |
1483 | struct amdgpu_ngg_buf *ngg_buf, | |
1484 | int size_se, | |
1485 | int default_size_se) | |
1486 | { | |
1487 | int r; | |
1488 | ||
1489 | if (size_se < 0) { | |
1490 | dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); | |
1491 | return -EINVAL; | |
1492 | } | |
1493 | size_se = size_se ? size_se : default_size_se; | |
1494 | ||
42ce2243 | 1495 | ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; |
b1023571 KW |
1496 | r = amdgpu_bo_create_kernel(adev, ngg_buf->size, |
1497 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | |
1498 | &ngg_buf->bo, | |
1499 | &ngg_buf->gpu_addr, | |
1500 | NULL); | |
1501 | if (r) { | |
1502 | dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); | |
1503 | return r; | |
1504 | } | |
1505 | ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); | |
1506 | ||
1507 | return r; | |
1508 | } | |
1509 | ||
1510 | static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) | |
1511 | { | |
1512 | int i; | |
1513 | ||
1514 | for (i = 0; i < NGG_BUF_MAX; i++) | |
1515 | amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, | |
1516 | &adev->gfx.ngg.buf[i].gpu_addr, | |
1517 | NULL); | |
1518 | ||
1519 | memset(&adev->gfx.ngg.buf[0], 0, | |
1520 | sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); | |
1521 | ||
1522 | adev->gfx.ngg.init = false; | |
1523 | ||
1524 | return 0; | |
1525 | } | |
1526 | ||
1527 | static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) | |
1528 | { | |
1529 | int r; | |
1530 | ||
1531 | if (!amdgpu_ngg || adev->gfx.ngg.init == true) | |
1532 | return 0; | |
1533 | ||
1534 | /* GDS reserve memory: 64 bytes alignment */ | |
1535 | adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); | |
1536 | adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size; | |
1537 | adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size; | |
d33bba4d JZ |
1538 | adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); |
1539 | adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); | |
b1023571 KW |
1540 | |
1541 | /* Primitive Buffer */ | |
af8baf15 | 1542 | r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], |
b1023571 KW |
1543 | amdgpu_prim_buf_per_se, |
1544 | 64 * 1024); | |
1545 | if (r) { | |
1546 | dev_err(adev->dev, "Failed to create Primitive Buffer\n"); | |
1547 | goto err; | |
1548 | } | |
1549 | ||
1550 | /* Position Buffer */ | |
af8baf15 | 1551 | r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], |
b1023571 KW |
1552 | amdgpu_pos_buf_per_se, |
1553 | 256 * 1024); | |
1554 | if (r) { | |
1555 | dev_err(adev->dev, "Failed to create Position Buffer\n"); | |
1556 | goto err; | |
1557 | } | |
1558 | ||
1559 | /* Control Sideband */ | |
af8baf15 | 1560 | r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], |
b1023571 KW |
1561 | amdgpu_cntl_sb_buf_per_se, |
1562 | 256); | |
1563 | if (r) { | |
1564 | dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); | |
1565 | goto err; | |
1566 | } | |
1567 | ||
1568 | /* Parameter Cache, not created by default */ | |
1569 | if (amdgpu_param_buf_per_se <= 0) | |
1570 | goto out; | |
1571 | ||
af8baf15 | 1572 | r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], |
b1023571 KW |
1573 | amdgpu_param_buf_per_se, |
1574 | 512 * 1024); | |
1575 | if (r) { | |
1576 | dev_err(adev->dev, "Failed to create Parameter Cache\n"); | |
1577 | goto err; | |
1578 | } | |
1579 | ||
1580 | out: | |
1581 | adev->gfx.ngg.init = true; | |
1582 | return 0; | |
1583 | err: | |
1584 | gfx_v9_0_ngg_fini(adev); | |
1585 | return r; | |
1586 | } | |
1587 | ||
1588 | static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) | |
1589 | { | |
1590 | struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; | |
1591 | int r; | |
91629eff | 1592 | u32 data, base; |
b1023571 KW |
1593 | |
1594 | if (!amdgpu_ngg) | |
1595 | return 0; | |
1596 | ||
1597 | /* Program buffer size */ | |
91629eff TSD |
1598 | data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, |
1599 | adev->gfx.ngg.buf[NGG_PRIM].size >> 8); | |
1600 | data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, | |
1601 | adev->gfx.ngg.buf[NGG_POS].size >> 8); | |
5e78835a | 1602 | WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); |
b1023571 | 1603 | |
91629eff TSD |
1604 | data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, |
1605 | adev->gfx.ngg.buf[NGG_CNTL].size >> 8); | |
1606 | data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, | |
1607 | adev->gfx.ngg.buf[NGG_PARAM].size >> 10); | |
5e78835a | 1608 | WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); |
b1023571 KW |
1609 | |
1610 | /* Program buffer base address */ | |
af8baf15 | 1611 | base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); |
b1023571 | 1612 | data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); |
5e78835a | 1613 | WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); |
b1023571 | 1614 | |
af8baf15 | 1615 | base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); |
b1023571 | 1616 | data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); |
5e78835a | 1617 | WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); |
b1023571 | 1618 | |
af8baf15 | 1619 | base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); |
b1023571 | 1620 | data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); |
5e78835a | 1621 | WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); |
b1023571 | 1622 | |
af8baf15 | 1623 | base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); |
b1023571 | 1624 | data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); |
5e78835a | 1625 | WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); |
b1023571 | 1626 | |
af8baf15 | 1627 | base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); |
b1023571 | 1628 | data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); |
5e78835a | 1629 | WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); |
b1023571 | 1630 | |
af8baf15 | 1631 | base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); |
b1023571 | 1632 | data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); |
5e78835a | 1633 | WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); |
b1023571 KW |
1634 | |
1635 | /* Clear GDS reserved memory */ | |
1636 | r = amdgpu_ring_alloc(ring, 17); | |
1637 | if (r) { | |
1638 | DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n", | |
1639 | ring->idx, r); | |
1640 | return r; | |
1641 | } | |
1642 | ||
1643 | gfx_v9_0_write_data_to_reg(ring, 0, false, | |
946a4d5b | 1644 | SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), |
b1023571 | 1645 | (adev->gds.mem.total_size + |
77a2faa5 | 1646 | adev->gfx.ngg.gds_reserve_size)); |
b1023571 KW |
1647 | |
1648 | amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); | |
1649 | amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | | |
d33bba4d | 1650 | PACKET3_DMA_DATA_DST_SEL(1) | |
b1023571 KW |
1651 | PACKET3_DMA_DATA_SRC_SEL(2))); |
1652 | amdgpu_ring_write(ring, 0); | |
1653 | amdgpu_ring_write(ring, 0); | |
1654 | amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); | |
1655 | amdgpu_ring_write(ring, 0); | |
d33bba4d JZ |
1656 | amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | |
1657 | adev->gfx.ngg.gds_reserve_size); | |
b1023571 KW |
1658 | |
1659 | gfx_v9_0_write_data_to_reg(ring, 0, false, | |
946a4d5b | 1660 | SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); |
b1023571 KW |
1661 | |
1662 | amdgpu_ring_commit(ring); | |
1663 | ||
1664 | return 0; | |
1665 | } | |
1666 | ||
1361f455 AD |
1667 | static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, |
1668 | int mec, int pipe, int queue) | |
1669 | { | |
1670 | int r; | |
1671 | unsigned irq_type; | |
1672 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; | |
1673 | ||
1674 | ring = &adev->gfx.compute_ring[ring_id]; | |
1675 | ||
1676 | /* mec0 is me1 */ | |
1677 | ring->me = mec + 1; | |
1678 | ring->pipe = pipe; | |
1679 | ring->queue = queue; | |
1680 | ||
1681 | ring->ring_obj = NULL; | |
1682 | ring->use_doorbell = true; | |
7366af81 | 1683 | ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1; |
1361f455 AD |
1684 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr |
1685 | + (ring_id * GFX9_MEC_HPD_SIZE); | |
1686 | sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue); | |
1687 | ||
1688 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP | |
1689 | + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) | |
1690 | + ring->pipe; | |
1691 | ||
1692 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | |
1693 | r = amdgpu_ring_init(adev, ring, 1024, | |
1694 | &adev->gfx.eop_irq, irq_type); | |
1695 | if (r) | |
1696 | return r; | |
1697 | ||
1698 | ||
1699 | return 0; | |
1700 | } | |
1701 | ||
b1023571 KW |
1702 | static int gfx_v9_0_sw_init(void *handle) |
1703 | { | |
1361f455 | 1704 | int i, j, k, r, ring_id; |
b1023571 | 1705 | struct amdgpu_ring *ring; |
ac104e99 | 1706 | struct amdgpu_kiq *kiq; |
b1023571 KW |
1707 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1708 | ||
4853bbb6 AD |
1709 | switch (adev->asic_type) { |
1710 | case CHIP_VEGA10: | |
8b399477 | 1711 | case CHIP_VEGA12: |
61324ddc | 1712 | case CHIP_VEGA20: |
4853bbb6 AD |
1713 | case CHIP_RAVEN: |
1714 | adev->gfx.mec.num_mec = 2; | |
1715 | break; | |
1716 | default: | |
1717 | adev->gfx.mec.num_mec = 1; | |
1718 | break; | |
1719 | } | |
1720 | ||
1721 | adev->gfx.mec.num_pipe_per_mec = 4; | |
1722 | adev->gfx.mec.num_queue_per_pipe = 8; | |
1723 | ||
b1023571 | 1724 | /* EOP Event */ |
44a99b65 | 1725 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); |
b1023571 KW |
1726 | if (r) |
1727 | return r; | |
1728 | ||
1729 | /* Privileged reg */ | |
44a99b65 | 1730 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, |
b1023571 KW |
1731 | &adev->gfx.priv_reg_irq); |
1732 | if (r) | |
1733 | return r; | |
1734 | ||
1735 | /* Privileged inst */ | |
44a99b65 | 1736 | r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, |
b1023571 KW |
1737 | &adev->gfx.priv_inst_irq); |
1738 | if (r) | |
1739 | return r; | |
1740 | ||
1741 | adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; | |
1742 | ||
1743 | gfx_v9_0_scratch_init(adev); | |
1744 | ||
1745 | r = gfx_v9_0_init_microcode(adev); | |
1746 | if (r) { | |
1747 | DRM_ERROR("Failed to load gfx firmware!\n"); | |
1748 | return r; | |
1749 | } | |
1750 | ||
c9719c69 HZ |
1751 | r = gfx_v9_0_rlc_init(adev); |
1752 | if (r) { | |
1753 | DRM_ERROR("Failed to init rlc BOs!\n"); | |
1754 | return r; | |
1755 | } | |
1756 | ||
b1023571 KW |
1757 | r = gfx_v9_0_mec_init(adev); |
1758 | if (r) { | |
1759 | DRM_ERROR("Failed to init MEC BOs!\n"); | |
1760 | return r; | |
1761 | } | |
1762 | ||
1763 | /* set up the gfx ring */ | |
1764 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | |
1765 | ring = &adev->gfx.gfx_ring[i]; | |
1766 | ring->ring_obj = NULL; | |
f6886c47 TSD |
1767 | if (!i) |
1768 | sprintf(ring->name, "gfx"); | |
1769 | else | |
1770 | sprintf(ring->name, "gfx_%d", i); | |
b1023571 KW |
1771 | ring->use_doorbell = true; |
1772 | ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1; | |
1773 | r = amdgpu_ring_init(adev, ring, 1024, | |
1774 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP); | |
1775 | if (r) | |
1776 | return r; | |
1777 | } | |
1778 | ||
1361f455 AD |
1779 | /* set up the compute queues - allocate horizontally across pipes */ |
1780 | ring_id = 0; | |
1781 | for (i = 0; i < adev->gfx.mec.num_mec; ++i) { | |
1782 | for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { | |
1783 | for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { | |
2db0cdbe | 1784 | if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j)) |
1361f455 AD |
1785 | continue; |
1786 | ||
1787 | r = gfx_v9_0_compute_ring_init(adev, | |
1788 | ring_id, | |
1789 | i, k, j); | |
1790 | if (r) | |
1791 | return r; | |
1792 | ||
1793 | ring_id++; | |
1794 | } | |
b1023571 | 1795 | } |
b1023571 KW |
1796 | } |
1797 | ||
71c37505 | 1798 | r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE); |
e30a5223 AD |
1799 | if (r) { |
1800 | DRM_ERROR("Failed to init KIQ BOs!\n"); | |
1801 | return r; | |
1802 | } | |
ac104e99 | 1803 | |
e30a5223 | 1804 | kiq = &adev->gfx.kiq; |
71c37505 | 1805 | r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq); |
e30a5223 AD |
1806 | if (r) |
1807 | return r; | |
464826d6 | 1808 | |
e30a5223 | 1809 | /* create MQD for all compute queues as wel as KIQ for SRIOV case */ |
ffe6d881 | 1810 | r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation)); |
e30a5223 AD |
1811 | if (r) |
1812 | return r; | |
ac104e99 | 1813 | |
b1023571 KW |
1814 | adev->gfx.ce_ram_size = 0x8000; |
1815 | ||
3251c043 AD |
1816 | r = gfx_v9_0_gpu_early_init(adev); |
1817 | if (r) | |
1818 | return r; | |
b1023571 KW |
1819 | |
1820 | r = gfx_v9_0_ngg_init(adev); | |
1821 | if (r) | |
1822 | return r; | |
1823 | ||
1824 | return 0; | |
1825 | } | |
1826 | ||
1827 | ||
1828 | static int gfx_v9_0_sw_fini(void *handle) | |
1829 | { | |
1830 | int i; | |
1831 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
1832 | ||
1833 | amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL); | |
1834 | amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL); | |
1835 | amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL); | |
1836 | ||
1837 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | |
1838 | amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); | |
1839 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | |
1840 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | |
1841 | ||
b9683c21 | 1842 | amdgpu_gfx_compute_mqd_sw_fini(adev); |
71c37505 AD |
1843 | amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); |
1844 | amdgpu_gfx_kiq_fini(adev); | |
ac104e99 | 1845 | |
b1023571 KW |
1846 | gfx_v9_0_mec_fini(adev); |
1847 | gfx_v9_0_ngg_fini(adev); | |
9862def9 ML |
1848 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, |
1849 | &adev->gfx.rlc.clear_state_gpu_addr, | |
1850 | (void **)&adev->gfx.rlc.cs_ptr); | |
741deade | 1851 | if (adev->asic_type == CHIP_RAVEN) { |
9862def9 ML |
1852 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, |
1853 | &adev->gfx.rlc.cp_table_gpu_addr, | |
1854 | (void **)&adev->gfx.rlc.cp_table_ptr); | |
1855 | } | |
c833d8aa | 1856 | gfx_v9_0_free_microcode(adev); |
b1023571 KW |
1857 | |
1858 | return 0; | |
1859 | } | |
1860 | ||
1861 | ||
1862 | static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev) | |
1863 | { | |
1864 | /* TODO */ | |
1865 | } | |
1866 | ||
1867 | static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) | |
1868 | { | |
be448a4d | 1869 | u32 data; |
b1023571 | 1870 | |
be448a4d NH |
1871 | if (instance == 0xffffffff) |
1872 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); | |
1873 | else | |
1874 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); | |
1875 | ||
1876 | if (se_num == 0xffffffff) | |
b1023571 | 1877 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); |
be448a4d | 1878 | else |
b1023571 | 1879 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); |
be448a4d NH |
1880 | |
1881 | if (sh_num == 0xffffffff) | |
1882 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); | |
1883 | else | |
b1023571 | 1884 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); |
be448a4d | 1885 | |
5e78835a | 1886 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); |
b1023571 KW |
1887 | } |
1888 | ||
b1023571 KW |
1889 | static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev) |
1890 | { | |
1891 | u32 data, mask; | |
1892 | ||
5e78835a TSD |
1893 | data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE); |
1894 | data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE); | |
b1023571 KW |
1895 | |
1896 | data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; | |
1897 | data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; | |
1898 | ||
378506a7 AD |
1899 | mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se / |
1900 | adev->gfx.config.max_sh_per_se); | |
b1023571 KW |
1901 | |
1902 | return (~data) & mask; | |
1903 | } | |
1904 | ||
1905 | static void gfx_v9_0_setup_rb(struct amdgpu_device *adev) | |
1906 | { | |
1907 | int i, j; | |
2572c24c | 1908 | u32 data; |
b1023571 KW |
1909 | u32 active_rbs = 0; |
1910 | u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / | |
1911 | adev->gfx.config.max_sh_per_se; | |
1912 | ||
1913 | mutex_lock(&adev->grbm_idx_mutex); | |
1914 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
1915 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | |
1916 | gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); | |
1917 | data = gfx_v9_0_get_rb_active_bitmap(adev); | |
1918 | active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * | |
1919 | rb_bitmap_width_per_sh); | |
1920 | } | |
1921 | } | |
1922 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
1923 | mutex_unlock(&adev->grbm_idx_mutex); | |
1924 | ||
1925 | adev->gfx.config.backend_enable_mask = active_rbs; | |
2572c24c | 1926 | adev->gfx.config.num_rbs = hweight32(active_rbs); |
b1023571 KW |
1927 | } |
1928 | ||
1929 | #define DEFAULT_SH_MEM_BASES (0x6000) | |
1930 | #define FIRST_COMPUTE_VMID (8) | |
1931 | #define LAST_COMPUTE_VMID (16) | |
1932 | static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) | |
1933 | { | |
1934 | int i; | |
1935 | uint32_t sh_mem_config; | |
1936 | uint32_t sh_mem_bases; | |
1937 | ||
1938 | /* | |
1939 | * Configure apertures: | |
1940 | * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) | |
1941 | * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) | |
1942 | * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) | |
1943 | */ | |
1944 | sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); | |
1945 | ||
1946 | sh_mem_config = SH_MEM_ADDRESS_MODE_64 | | |
1947 | SH_MEM_ALIGNMENT_MODE_UNALIGNED << | |
eaa05d52 | 1948 | SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; |
b1023571 KW |
1949 | |
1950 | mutex_lock(&adev->srbm_mutex); | |
1951 | for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { | |
1952 | soc15_grbm_select(adev, 0, 0, 0, i); | |
1953 | /* CP and shaders */ | |
5e78835a TSD |
1954 | WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); |
1955 | WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); | |
b1023571 KW |
1956 | } |
1957 | soc15_grbm_select(adev, 0, 0, 0, 0); | |
1958 | mutex_unlock(&adev->srbm_mutex); | |
1959 | } | |
1960 | ||
434e6df2 | 1961 | static void gfx_v9_0_constants_init(struct amdgpu_device *adev) |
b1023571 KW |
1962 | { |
1963 | u32 tmp; | |
1964 | int i; | |
1965 | ||
40f06773 | 1966 | WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff); |
b1023571 KW |
1967 | |
1968 | gfx_v9_0_tiling_mode_table_init(adev); | |
1969 | ||
1970 | gfx_v9_0_setup_rb(adev); | |
1971 | gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); | |
5eeae247 | 1972 | adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); |
b1023571 KW |
1973 | |
1974 | /* XXX SH_MEM regs */ | |
1975 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | |
1976 | mutex_lock(&adev->srbm_mutex); | |
32b646b2 | 1977 | for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) { |
b1023571 KW |
1978 | soc15_grbm_select(adev, 0, 0, 0, i); |
1979 | /* CP and shaders */ | |
a7ea6548 AD |
1980 | if (i == 0) { |
1981 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, | |
1982 | SH_MEM_ALIGNMENT_MODE_UNALIGNED); | |
1983 | WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); | |
1984 | WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0); | |
1985 | } else { | |
1986 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, | |
1987 | SH_MEM_ALIGNMENT_MODE_UNALIGNED); | |
1988 | WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); | |
bfa8eea2 FC |
1989 | tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, |
1990 | (adev->gmc.private_aperture_start >> 48)); | |
1991 | tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, | |
1992 | (adev->gmc.shared_aperture_start >> 48)); | |
a7ea6548 AD |
1993 | WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); |
1994 | } | |
b1023571 KW |
1995 | } |
1996 | soc15_grbm_select(adev, 0, 0, 0, 0); | |
1997 | ||
1998 | mutex_unlock(&adev->srbm_mutex); | |
1999 | ||
2000 | gfx_v9_0_init_compute_vmid(adev); | |
2001 | ||
2002 | mutex_lock(&adev->grbm_idx_mutex); | |
2003 | /* | |
2004 | * making sure that the following register writes will be broadcasted | |
2005 | * to all the shaders | |
2006 | */ | |
2007 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
2008 | ||
5e78835a | 2009 | WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE, |
b1023571 KW |
2010 | (adev->gfx.config.sc_prim_fifo_size_frontend << |
2011 | PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | | |
2012 | (adev->gfx.config.sc_prim_fifo_size_backend << | |
2013 | PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | | |
2014 | (adev->gfx.config.sc_hiz_tile_fifo_size << | |
2015 | PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | | |
2016 | (adev->gfx.config.sc_earlyz_tile_fifo_size << | |
2017 | PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); | |
2018 | mutex_unlock(&adev->grbm_idx_mutex); | |
2019 | ||
2020 | } | |
2021 | ||
2022 | static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) | |
2023 | { | |
2024 | u32 i, j, k; | |
2025 | u32 mask; | |
2026 | ||
2027 | mutex_lock(&adev->grbm_idx_mutex); | |
2028 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
2029 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | |
2030 | gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); | |
2031 | for (k = 0; k < adev->usec_timeout; k++) { | |
5e78835a | 2032 | if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0) |
b1023571 KW |
2033 | break; |
2034 | udelay(1); | |
2035 | } | |
1366b2d0 | 2036 | if (k == adev->usec_timeout) { |
2037 | gfx_v9_0_select_se_sh(adev, 0xffffffff, | |
2038 | 0xffffffff, 0xffffffff); | |
2039 | mutex_unlock(&adev->grbm_idx_mutex); | |
2040 | DRM_INFO("Timeout wait for RLC serdes %u,%u\n", | |
2041 | i, j); | |
2042 | return; | |
2043 | } | |
b1023571 KW |
2044 | } |
2045 | } | |
2046 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
2047 | mutex_unlock(&adev->grbm_idx_mutex); | |
2048 | ||
2049 | mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | | |
2050 | RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | | |
2051 | RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | | |
2052 | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; | |
2053 | for (k = 0; k < adev->usec_timeout; k++) { | |
5e78835a | 2054 | if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) |
b1023571 KW |
2055 | break; |
2056 | udelay(1); | |
2057 | } | |
2058 | } | |
2059 | ||
2060 | static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, | |
2061 | bool enable) | |
2062 | { | |
5e78835a | 2063 | u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0); |
b1023571 | 2064 | |
b1023571 KW |
2065 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); |
2066 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); | |
2067 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); | |
2068 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0); | |
2069 | ||
5e78835a | 2070 | WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); |
b1023571 KW |
2071 | } |
2072 | ||
6bce4667 HZ |
2073 | static void gfx_v9_0_init_csb(struct amdgpu_device *adev) |
2074 | { | |
2075 | /* csib */ | |
2076 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI), | |
2077 | adev->gfx.rlc.clear_state_gpu_addr >> 32); | |
2078 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO), | |
2079 | adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); | |
2080 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH), | |
2081 | adev->gfx.rlc.clear_state_size); | |
2082 | } | |
2083 | ||
727b888f | 2084 | static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, |
6bce4667 HZ |
2085 | int indirect_offset, |
2086 | int list_size, | |
2087 | int *unique_indirect_regs, | |
cb5ed37f | 2088 | int unique_indirect_reg_count, |
6bce4667 | 2089 | int *indirect_start_offsets, |
cb5ed37f EQ |
2090 | int *indirect_start_offsets_count, |
2091 | int max_start_offsets_count) | |
6bce4667 HZ |
2092 | { |
2093 | int idx; | |
6bce4667 HZ |
2094 | |
2095 | for (; indirect_offset < list_size; indirect_offset++) { | |
cb5ed37f | 2096 | WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count); |
727b888f HR |
2097 | indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; |
2098 | *indirect_start_offsets_count = *indirect_start_offsets_count + 1; | |
6bce4667 | 2099 | |
727b888f HR |
2100 | while (register_list_format[indirect_offset] != 0xFFFFFFFF) { |
2101 | indirect_offset += 2; | |
6bce4667 | 2102 | |
727b888f | 2103 | /* look for the matching indice */ |
cb5ed37f | 2104 | for (idx = 0; idx < unique_indirect_reg_count; idx++) { |
727b888f HR |
2105 | if (unique_indirect_regs[idx] == |
2106 | register_list_format[indirect_offset] || | |
2107 | !unique_indirect_regs[idx]) | |
2108 | break; | |
2109 | } | |
6bce4667 | 2110 | |
cb5ed37f | 2111 | BUG_ON(idx >= unique_indirect_reg_count); |
6bce4667 | 2112 | |
727b888f HR |
2113 | if (!unique_indirect_regs[idx]) |
2114 | unique_indirect_regs[idx] = register_list_format[indirect_offset]; | |
6bce4667 | 2115 | |
727b888f | 2116 | indirect_offset++; |
6bce4667 | 2117 | } |
6bce4667 HZ |
2118 | } |
2119 | } | |
2120 | ||
727b888f | 2121 | static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) |
6bce4667 HZ |
2122 | { |
2123 | int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; | |
2124 | int unique_indirect_reg_count = 0; | |
2125 | ||
2126 | int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; | |
2127 | int indirect_start_offsets_count = 0; | |
2128 | ||
2129 | int list_size = 0; | |
727b888f | 2130 | int i = 0, j = 0; |
6bce4667 HZ |
2131 | u32 tmp = 0; |
2132 | ||
2133 | u32 *register_list_format = | |
2134 | kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); | |
2135 | if (!register_list_format) | |
2136 | return -ENOMEM; | |
2137 | memcpy(register_list_format, adev->gfx.rlc.register_list_format, | |
2138 | adev->gfx.rlc.reg_list_format_size_bytes); | |
2139 | ||
2140 | /* setup unique_indirect_regs array and indirect_start_offsets array */ | |
727b888f HR |
2141 | unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs); |
2142 | gfx_v9_1_parse_ind_reg_list(register_list_format, | |
2143 | adev->gfx.rlc.reg_list_format_direct_reg_list_length, | |
2144 | adev->gfx.rlc.reg_list_format_size_bytes >> 2, | |
2145 | unique_indirect_regs, | |
cb5ed37f | 2146 | unique_indirect_reg_count, |
727b888f | 2147 | indirect_start_offsets, |
cb5ed37f EQ |
2148 | &indirect_start_offsets_count, |
2149 | ARRAY_SIZE(indirect_start_offsets)); | |
6bce4667 HZ |
2150 | |
2151 | /* enable auto inc in case it is disabled */ | |
2152 | tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); | |
2153 | tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; | |
2154 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); | |
2155 | ||
2156 | /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */ | |
2157 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), | |
2158 | RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET); | |
2159 | for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) | |
2160 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), | |
2161 | adev->gfx.rlc.register_restore[i]); | |
2162 | ||
6bce4667 HZ |
2163 | /* load indirect register */ |
2164 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), | |
2165 | adev->gfx.rlc.reg_list_format_start); | |
727b888f HR |
2166 | |
2167 | /* direct register portion */ | |
2168 | for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++) | |
6bce4667 HZ |
2169 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), |
2170 | register_list_format[i]); | |
2171 | ||
727b888f HR |
2172 | /* indirect register portion */ |
2173 | while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) { | |
2174 | if (register_list_format[i] == 0xFFFFFFFF) { | |
2175 | WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); | |
2176 | continue; | |
2177 | } | |
2178 | ||
2179 | WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); | |
2180 | WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); | |
2181 | ||
2182 | for (j = 0; j < unique_indirect_reg_count; j++) { | |
2183 | if (register_list_format[i] == unique_indirect_regs[j]) { | |
2184 | WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j); | |
2185 | break; | |
2186 | } | |
2187 | } | |
2188 | ||
2189 | BUG_ON(j >= unique_indirect_reg_count); | |
2190 | ||
2191 | i++; | |
2192 | } | |
2193 | ||
6bce4667 HZ |
2194 | /* set save/restore list size */ |
2195 | list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; | |
2196 | list_size = list_size >> 1; | |
2197 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), | |
2198 | adev->gfx.rlc.reg_restore_list_size); | |
2199 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size); | |
2200 | ||
2201 | /* write the starting offsets to RLC scratch ram */ | |
2202 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), | |
2203 | adev->gfx.rlc.starting_offsets_start); | |
c1b24a14 | 2204 | for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) |
6bce4667 | 2205 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), |
727b888f | 2206 | indirect_start_offsets[i]); |
6bce4667 HZ |
2207 | |
2208 | /* load unique indirect regs*/ | |
c1b24a14 | 2209 | for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) { |
727b888f HR |
2210 | if (unique_indirect_regs[i] != 0) { |
2211 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) | |
2212 | + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i], | |
2213 | unique_indirect_regs[i] & 0x3FFFF); | |
2214 | ||
2215 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) | |
2216 | + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i], | |
2217 | unique_indirect_regs[i] >> 20); | |
2218 | } | |
6bce4667 HZ |
2219 | } |
2220 | ||
2221 | kfree(register_list_format); | |
2222 | return 0; | |
2223 | } | |
2224 | ||
2225 | static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev) | |
2226 | { | |
0e5293d0 | 2227 | WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1); |
6bce4667 HZ |
2228 | } |
2229 | ||
91d3130a HZ |
2230 | static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev, |
2231 | bool enable) | |
2232 | { | |
2233 | uint32_t data = 0; | |
2234 | uint32_t default_data = 0; | |
2235 | ||
2236 | default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS)); | |
2237 | if (enable == true) { | |
2238 | /* enable GFXIP control over CGPG */ | |
2239 | data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; | |
2240 | if(default_data != data) | |
2241 | WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); | |
2242 | ||
2243 | /* update status */ | |
2244 | data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK; | |
2245 | data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT); | |
2246 | if(default_data != data) | |
2247 | WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); | |
2248 | } else { | |
2249 | /* restore GFXIP control over GCPG */ | |
2250 | data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK; | |
2251 | if(default_data != data) | |
2252 | WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data); | |
2253 | } | |
2254 | } | |
2255 | ||
2256 | static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev) | |
2257 | { | |
2258 | uint32_t data = 0; | |
2259 | ||
2260 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | | |
2261 | AMD_PG_SUPPORT_GFX_SMG | | |
2262 | AMD_PG_SUPPORT_GFX_DMG)) { | |
2263 | /* init IDLE_POLL_COUNT = 60 */ | |
2264 | data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL)); | |
2265 | data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; | |
2266 | data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); | |
2267 | WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data); | |
2268 | ||
2269 | /* init RLC PG Delay */ | |
2270 | data = 0; | |
2271 | data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT); | |
2272 | data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT); | |
2273 | data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT); | |
2274 | data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT); | |
2275 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data); | |
2276 | ||
2277 | data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2)); | |
2278 | data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK; | |
2279 | data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT); | |
2280 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data); | |
2281 | ||
2282 | data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3)); | |
2283 | data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK; | |
2284 | data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT); | |
2285 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data); | |
2286 | ||
2287 | data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL)); | |
2288 | data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK; | |
2289 | ||
2290 | /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */ | |
2291 | data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT); | |
2292 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data); | |
2293 | ||
2294 | pwr_10_0_gfxip_control_over_cgpg(adev, true); | |
2295 | } | |
2296 | } | |
2297 | ||
ed5ad1e4 HZ |
2298 | static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, |
2299 | bool enable) | |
2300 | { | |
2301 | uint32_t data = 0; | |
2302 | uint32_t default_data = 0; | |
2303 | ||
2304 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
e24c7f06 TSD |
2305 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2306 | SMU_CLK_SLOWDOWN_ON_PU_ENABLE, | |
2307 | enable ? 1 : 0); | |
2308 | if (default_data != data) | |
2309 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
ed5ad1e4 HZ |
2310 | } |
2311 | ||
2312 | static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, | |
2313 | bool enable) | |
2314 | { | |
2315 | uint32_t data = 0; | |
2316 | uint32_t default_data = 0; | |
2317 | ||
2318 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
b926fe8e TSD |
2319 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2320 | SMU_CLK_SLOWDOWN_ON_PD_ENABLE, | |
2321 | enable ? 1 : 0); | |
2322 | if(default_data != data) | |
2323 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
ed5ad1e4 HZ |
2324 | } |
2325 | ||
3a6cc477 HZ |
2326 | static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev, |
2327 | bool enable) | |
2328 | { | |
2329 | uint32_t data = 0; | |
2330 | uint32_t default_data = 0; | |
2331 | ||
2332 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
54cfe0fc TSD |
2333 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2334 | CP_PG_DISABLE, | |
2335 | enable ? 0 : 1); | |
2336 | if(default_data != data) | |
2337 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
3a6cc477 HZ |
2338 | } |
2339 | ||
197f95c8 HZ |
2340 | static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev, |
2341 | bool enable) | |
2342 | { | |
2343 | uint32_t data, default_data; | |
2344 | ||
2345 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
f55ee212 TSD |
2346 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2347 | GFX_POWER_GATING_ENABLE, | |
2348 | enable ? 1 : 0); | |
197f95c8 HZ |
2349 | if(default_data != data) |
2350 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
2351 | } | |
2352 | ||
2353 | static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev, | |
2354 | bool enable) | |
2355 | { | |
2356 | uint32_t data, default_data; | |
2357 | ||
2358 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
513f8133 TSD |
2359 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2360 | GFX_PIPELINE_PG_ENABLE, | |
2361 | enable ? 1 : 0); | |
197f95c8 HZ |
2362 | if(default_data != data) |
2363 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
2364 | ||
2365 | if (!enable) | |
2366 | /* read any GFX register to wake up GFX */ | |
2367 | data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL)); | |
2368 | } | |
2369 | ||
552c8f76 | 2370 | static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, |
2371 | bool enable) | |
18924c71 HZ |
2372 | { |
2373 | uint32_t data, default_data; | |
2374 | ||
2375 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
7915c8fd TSD |
2376 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2377 | STATIC_PER_CU_PG_ENABLE, | |
2378 | enable ? 1 : 0); | |
18924c71 HZ |
2379 | if(default_data != data) |
2380 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
2381 | } | |
2382 | ||
552c8f76 | 2383 | static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, |
18924c71 HZ |
2384 | bool enable) |
2385 | { | |
2386 | uint32_t data, default_data; | |
2387 | ||
2388 | default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL)); | |
e567fa69 TSD |
2389 | data = REG_SET_FIELD(data, RLC_PG_CNTL, |
2390 | DYN_PER_CU_PG_ENABLE, | |
2391 | enable ? 1 : 0); | |
18924c71 HZ |
2392 | if(default_data != data) |
2393 | WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data); | |
2394 | } | |
2395 | ||
6bce4667 HZ |
2396 | static void gfx_v9_0_init_pg(struct amdgpu_device *adev) |
2397 | { | |
af356b6d EQ |
2398 | gfx_v9_0_init_csb(adev); |
2399 | ||
b58b65cf EQ |
2400 | /* |
2401 | * Rlc save restore list is workable since v2_1. | |
2402 | * And it's needed by gfxoff feature. | |
2403 | */ | |
2404 | if (adev->gfx.rlc.is_rlc_v2_1) { | |
2405 | gfx_v9_1_init_rlc_save_restore_list(adev); | |
2406 | gfx_v9_0_enable_save_restore_machine(adev); | |
2407 | } | |
a5acf930 | 2408 | |
6bce4667 HZ |
2409 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | |
2410 | AMD_PG_SUPPORT_GFX_SMG | | |
2411 | AMD_PG_SUPPORT_GFX_DMG | | |
2412 | AMD_PG_SUPPORT_CP | | |
2413 | AMD_PG_SUPPORT_GDS | | |
2414 | AMD_PG_SUPPORT_RLC_SMU_HS)) { | |
a5acf930 HR |
2415 | WREG32(mmRLC_JUMP_TABLE_RESTORE, |
2416 | adev->gfx.rlc.cp_table_gpu_addr >> 8); | |
2417 | gfx_v9_0_init_gfx_power_gating(adev); | |
6bce4667 HZ |
2418 | } |
2419 | } | |
2420 | ||
b1023571 KW |
2421 | void gfx_v9_0_rlc_stop(struct amdgpu_device *adev) |
2422 | { | |
b08796ce | 2423 | WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0); |
b1023571 | 2424 | gfx_v9_0_enable_gui_idle_interrupt(adev, false); |
b1023571 KW |
2425 | gfx_v9_0_wait_for_rlc_serdes(adev); |
2426 | } | |
2427 | ||
2428 | static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev) | |
2429 | { | |
596c8e8b | 2430 | WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); |
b1023571 | 2431 | udelay(50); |
596c8e8b | 2432 | WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); |
b1023571 KW |
2433 | udelay(50); |
2434 | } | |
2435 | ||
2436 | static void gfx_v9_0_rlc_start(struct amdgpu_device *adev) | |
2437 | { | |
2438 | #ifdef AMDGPU_RLC_DEBUG_RETRY | |
2439 | u32 rlc_ucode_ver; | |
2440 | #endif | |
b1023571 | 2441 | |
342cda25 | 2442 | WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1); |
ad97d9de | 2443 | udelay(50); |
b1023571 KW |
2444 | |
2445 | /* carrizo do enable cp interrupt after cp inited */ | |
ad97d9de | 2446 | if (!(adev->flags & AMD_IS_APU)) { |
b1023571 | 2447 | gfx_v9_0_enable_gui_idle_interrupt(adev, true); |
ad97d9de | 2448 | udelay(50); |
2449 | } | |
b1023571 KW |
2450 | |
2451 | #ifdef AMDGPU_RLC_DEBUG_RETRY | |
2452 | /* RLC_GPM_GENERAL_6 : RLC Ucode version */ | |
5e78835a | 2453 | rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6); |
b1023571 KW |
2454 | if(rlc_ucode_ver == 0x108) { |
2455 | DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n", | |
2456 | rlc_ucode_ver, adev->gfx.rlc_fw_version); | |
2457 | /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, | |
2458 | * default is 0x9C4 to create a 100us interval */ | |
5e78835a | 2459 | WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4); |
b1023571 | 2460 | /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr |
eaa05d52 | 2461 | * to disable the page fault retry interrupts, default is |
b1023571 | 2462 | * 0x100 (256) */ |
5e78835a | 2463 | WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100); |
b1023571 KW |
2464 | } |
2465 | #endif | |
2466 | } | |
2467 | ||
2468 | static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev) | |
2469 | { | |
2470 | const struct rlc_firmware_header_v2_0 *hdr; | |
2471 | const __le32 *fw_data; | |
2472 | unsigned i, fw_size; | |
2473 | ||
2474 | if (!adev->gfx.rlc_fw) | |
2475 | return -EINVAL; | |
2476 | ||
2477 | hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; | |
2478 | amdgpu_ucode_print_rlc_hdr(&hdr->header); | |
2479 | ||
2480 | fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + | |
2481 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
2482 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | |
2483 | ||
5e78835a | 2484 | WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, |
b1023571 KW |
2485 | RLCG_UCODE_LOADING_START_ADDRESS); |
2486 | for (i = 0; i < fw_size; i++) | |
5e78835a TSD |
2487 | WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); |
2488 | WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); | |
b1023571 KW |
2489 | |
2490 | return 0; | |
2491 | } | |
2492 | ||
2493 | static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) | |
2494 | { | |
2495 | int r; | |
2496 | ||
f840cc5f ML |
2497 | if (amdgpu_sriov_vf(adev)) { |
2498 | gfx_v9_0_init_csb(adev); | |
cfee05bc | 2499 | return 0; |
f840cc5f | 2500 | } |
cfee05bc | 2501 | |
b1023571 KW |
2502 | gfx_v9_0_rlc_stop(adev); |
2503 | ||
2504 | /* disable CG */ | |
5e78835a | 2505 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); |
b1023571 | 2506 | |
b1023571 KW |
2507 | gfx_v9_0_rlc_reset(adev); |
2508 | ||
6bce4667 HZ |
2509 | gfx_v9_0_init_pg(adev); |
2510 | ||
b1023571 KW |
2511 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
2512 | /* legacy rlc firmware loading */ | |
2513 | r = gfx_v9_0_rlc_load_microcode(adev); | |
2514 | if (r) | |
2515 | return r; | |
2516 | } | |
2517 | ||
989b6823 EQ |
2518 | if (adev->asic_type == CHIP_RAVEN || |
2519 | adev->asic_type == CHIP_VEGA20) { | |
e8835e0e HZ |
2520 | if (amdgpu_lbpw != 0) |
2521 | gfx_v9_0_enable_lbpw(adev, true); | |
2522 | else | |
2523 | gfx_v9_0_enable_lbpw(adev, false); | |
2524 | } | |
2525 | ||
b1023571 KW |
2526 | gfx_v9_0_rlc_start(adev); |
2527 | ||
2528 | return 0; | |
2529 | } | |
2530 | ||
2531 | static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | |
2532 | { | |
2533 | int i; | |
5e78835a | 2534 | u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); |
b1023571 | 2535 | |
ea64468e TSD |
2536 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1); |
2537 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1); | |
2538 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); | |
2539 | if (!enable) { | |
b1023571 KW |
2540 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
2541 | adev->gfx.gfx_ring[i].ready = false; | |
2542 | } | |
5e78835a | 2543 | WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); |
b1023571 KW |
2544 | udelay(50); |
2545 | } | |
2546 | ||
2547 | static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev) | |
2548 | { | |
2549 | const struct gfx_firmware_header_v1_0 *pfp_hdr; | |
2550 | const struct gfx_firmware_header_v1_0 *ce_hdr; | |
2551 | const struct gfx_firmware_header_v1_0 *me_hdr; | |
2552 | const __le32 *fw_data; | |
2553 | unsigned i, fw_size; | |
2554 | ||
2555 | if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) | |
2556 | return -EINVAL; | |
2557 | ||
2558 | pfp_hdr = (const struct gfx_firmware_header_v1_0 *) | |
2559 | adev->gfx.pfp_fw->data; | |
2560 | ce_hdr = (const struct gfx_firmware_header_v1_0 *) | |
2561 | adev->gfx.ce_fw->data; | |
2562 | me_hdr = (const struct gfx_firmware_header_v1_0 *) | |
2563 | adev->gfx.me_fw->data; | |
2564 | ||
2565 | amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); | |
2566 | amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); | |
2567 | amdgpu_ucode_print_gfx_hdr(&me_hdr->header); | |
2568 | ||
2569 | gfx_v9_0_cp_gfx_enable(adev, false); | |
2570 | ||
2571 | /* PFP */ | |
2572 | fw_data = (const __le32 *) | |
2573 | (adev->gfx.pfp_fw->data + | |
2574 | le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); | |
2575 | fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; | |
5e78835a | 2576 | WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0); |
b1023571 | 2577 | for (i = 0; i < fw_size; i++) |
5e78835a TSD |
2578 | WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); |
2579 | WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); | |
b1023571 KW |
2580 | |
2581 | /* CE */ | |
2582 | fw_data = (const __le32 *) | |
2583 | (adev->gfx.ce_fw->data + | |
2584 | le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); | |
2585 | fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; | |
5e78835a | 2586 | WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0); |
b1023571 | 2587 | for (i = 0; i < fw_size; i++) |
5e78835a TSD |
2588 | WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); |
2589 | WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); | |
b1023571 KW |
2590 | |
2591 | /* ME */ | |
2592 | fw_data = (const __le32 *) | |
2593 | (adev->gfx.me_fw->data + | |
2594 | le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); | |
2595 | fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; | |
5e78835a | 2596 | WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0); |
b1023571 | 2597 | for (i = 0; i < fw_size; i++) |
5e78835a TSD |
2598 | WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); |
2599 | WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); | |
b1023571 KW |
2600 | |
2601 | return 0; | |
2602 | } | |
2603 | ||
b1023571 KW |
2604 | static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev) |
2605 | { | |
2606 | struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; | |
2607 | const struct cs_section_def *sect = NULL; | |
2608 | const struct cs_extent_def *ext = NULL; | |
d5de797f | 2609 | int r, i, tmp; |
b1023571 KW |
2610 | |
2611 | /* init the CP */ | |
5e78835a TSD |
2612 | WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); |
2613 | WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1); | |
b1023571 KW |
2614 | |
2615 | gfx_v9_0_cp_gfx_enable(adev, true); | |
2616 | ||
d5de797f | 2617 | r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3); |
b1023571 KW |
2618 | if (r) { |
2619 | DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); | |
2620 | return r; | |
2621 | } | |
2622 | ||
2623 | amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
2624 | amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | |
2625 | ||
2626 | amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); | |
2627 | amdgpu_ring_write(ring, 0x80000000); | |
2628 | amdgpu_ring_write(ring, 0x80000000); | |
2629 | ||
2630 | for (sect = gfx9_cs_data; sect->section != NULL; ++sect) { | |
2631 | for (ext = sect->section; ext->extent != NULL; ++ext) { | |
2632 | if (sect->id == SECT_CONTEXT) { | |
2633 | amdgpu_ring_write(ring, | |
2634 | PACKET3(PACKET3_SET_CONTEXT_REG, | |
2635 | ext->reg_count)); | |
2636 | amdgpu_ring_write(ring, | |
2637 | ext->reg_index - PACKET3_SET_CONTEXT_REG_START); | |
2638 | for (i = 0; i < ext->reg_count; i++) | |
2639 | amdgpu_ring_write(ring, ext->extent[i]); | |
2640 | } | |
2641 | } | |
2642 | } | |
2643 | ||
2644 | amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | |
2645 | amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); | |
2646 | ||
2647 | amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); | |
2648 | amdgpu_ring_write(ring, 0); | |
2649 | ||
2650 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); | |
2651 | amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); | |
2652 | amdgpu_ring_write(ring, 0x8000); | |
2653 | amdgpu_ring_write(ring, 0x8000); | |
2654 | ||
d5de797f KW |
2655 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1)); |
2656 | tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE | | |
2657 | (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START)); | |
2658 | amdgpu_ring_write(ring, tmp); | |
2659 | amdgpu_ring_write(ring, 0); | |
2660 | ||
b1023571 KW |
2661 | amdgpu_ring_commit(ring); |
2662 | ||
2663 | return 0; | |
2664 | } | |
2665 | ||
2666 | static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) | |
2667 | { | |
2668 | struct amdgpu_ring *ring; | |
2669 | u32 tmp; | |
2670 | u32 rb_bufsz; | |
3fc08b61 | 2671 | u64 rb_addr, rptr_addr, wptr_gpu_addr; |
b1023571 KW |
2672 | |
2673 | /* Set the write pointer delay */ | |
5e78835a | 2674 | WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0); |
b1023571 KW |
2675 | |
2676 | /* set the RB to use vmid 0 */ | |
5e78835a | 2677 | WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0); |
b1023571 KW |
2678 | |
2679 | /* Set ring buffer size */ | |
2680 | ring = &adev->gfx.gfx_ring[0]; | |
2681 | rb_bufsz = order_base_2(ring->ring_size / 8); | |
2682 | tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); | |
2683 | tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); | |
2684 | #ifdef __BIG_ENDIAN | |
2685 | tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); | |
2686 | #endif | |
5e78835a | 2687 | WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); |
b1023571 KW |
2688 | |
2689 | /* Initialize the ring buffer's write pointers */ | |
2690 | ring->wptr = 0; | |
5e78835a TSD |
2691 | WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); |
2692 | WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); | |
b1023571 KW |
2693 | |
2694 | /* set the wb address wether it's enabled or not */ | |
2695 | rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
5e78835a TSD |
2696 | WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); |
2697 | WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); | |
b1023571 | 2698 | |
3fc08b61 | 2699 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); |
5e78835a TSD |
2700 | WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr)); |
2701 | WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr)); | |
3fc08b61 | 2702 | |
b1023571 | 2703 | mdelay(1); |
5e78835a | 2704 | WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp); |
b1023571 KW |
2705 | |
2706 | rb_addr = ring->gpu_addr >> 8; | |
5e78835a TSD |
2707 | WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr); |
2708 | WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); | |
b1023571 | 2709 | |
5e78835a | 2710 | tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL); |
b1023571 KW |
2711 | if (ring->use_doorbell) { |
2712 | tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, | |
2713 | DOORBELL_OFFSET, ring->doorbell_index); | |
2714 | tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, | |
2715 | DOORBELL_EN, 1); | |
2716 | } else { | |
2717 | tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0); | |
2718 | } | |
5e78835a | 2719 | WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp); |
b1023571 KW |
2720 | |
2721 | tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, | |
2722 | DOORBELL_RANGE_LOWER, ring->doorbell_index); | |
5e78835a | 2723 | WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); |
b1023571 | 2724 | |
5e78835a | 2725 | WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER, |
b1023571 KW |
2726 | CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); |
2727 | ||
2728 | ||
2729 | /* start the ring */ | |
2730 | gfx_v9_0_cp_gfx_start(adev); | |
2731 | ring->ready = true; | |
2732 | ||
2733 | return 0; | |
2734 | } | |
2735 | ||
2736 | static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | |
2737 | { | |
2738 | int i; | |
2739 | ||
2740 | if (enable) { | |
5e78835a | 2741 | WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0); |
b1023571 | 2742 | } else { |
5e78835a | 2743 | WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, |
b1023571 KW |
2744 | (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
2745 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | |
2746 | adev->gfx.compute_ring[i].ready = false; | |
ac104e99 | 2747 | adev->gfx.kiq.ring.ready = false; |
b1023571 KW |
2748 | } |
2749 | udelay(50); | |
2750 | } | |
2751 | ||
b1023571 KW |
2752 | static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev) |
2753 | { | |
2754 | const struct gfx_firmware_header_v1_0 *mec_hdr; | |
2755 | const __le32 *fw_data; | |
2756 | unsigned i; | |
2757 | u32 tmp; | |
2758 | ||
2759 | if (!adev->gfx.mec_fw) | |
2760 | return -EINVAL; | |
2761 | ||
2762 | gfx_v9_0_cp_compute_enable(adev, false); | |
2763 | ||
2764 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | |
2765 | amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); | |
2766 | ||
2767 | fw_data = (const __le32 *) | |
2768 | (adev->gfx.mec_fw->data + | |
2769 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); | |
2770 | tmp = 0; | |
2771 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); | |
2772 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); | |
5e78835a | 2773 | WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp); |
b1023571 | 2774 | |
5e78835a | 2775 | WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO, |
b1023571 | 2776 | adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); |
5e78835a | 2777 | WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI, |
b1023571 | 2778 | upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); |
eaa05d52 | 2779 | |
b1023571 | 2780 | /* MEC1 */ |
5e78835a | 2781 | WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, |
b1023571 KW |
2782 | mec_hdr->jt_offset); |
2783 | for (i = 0; i < mec_hdr->jt_size; i++) | |
5e78835a | 2784 | WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA, |
b1023571 KW |
2785 | le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); |
2786 | ||
5e78835a | 2787 | WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR, |
b1023571 KW |
2788 | adev->gfx.mec_fw_version); |
2789 | /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ | |
2790 | ||
2791 | return 0; | |
2792 | } | |
2793 | ||
464826d6 XY |
2794 | /* KIQ functions */ |
2795 | static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring) | |
b1023571 | 2796 | { |
464826d6 XY |
2797 | uint32_t tmp; |
2798 | struct amdgpu_device *adev = ring->adev; | |
b1023571 | 2799 | |
464826d6 | 2800 | /* tell RLC which is KIQ queue */ |
5e78835a | 2801 | tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); |
464826d6 XY |
2802 | tmp &= 0xffffff00; |
2803 | tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); | |
5e78835a | 2804 | WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); |
464826d6 | 2805 | tmp |= 0x80; |
5e78835a | 2806 | WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp); |
464826d6 | 2807 | } |
b1023571 | 2808 | |
0f1dfd52 | 2809 | static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) |
464826d6 | 2810 | { |
bd3402ea | 2811 | struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; |
de65513a | 2812 | uint64_t queue_mask = 0; |
2fdde9fa | 2813 | int r, i; |
b1023571 | 2814 | |
de65513a AR |
2815 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { |
2816 | if (!test_bit(i, adev->gfx.mec.queue_bitmap)) | |
2817 | continue; | |
b1023571 | 2818 | |
de65513a AR |
2819 | /* This situation may be hit in the future if a new HW |
2820 | * generation exposes more than 64 queues. If so, the | |
2821 | * definition of queue_mask needs updating */ | |
1d11ee89 | 2822 | if (WARN_ON(i >= (sizeof(queue_mask)*8))) { |
de65513a AR |
2823 | DRM_ERROR("Invalid KCQ enabled: %d\n", i); |
2824 | break; | |
b1023571 | 2825 | } |
b1023571 | 2826 | |
de65513a AR |
2827 | queue_mask |= (1ull << i); |
2828 | } | |
b1023571 | 2829 | |
841cf911 | 2830 | r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 8); |
2fdde9fa AD |
2831 | if (r) { |
2832 | DRM_ERROR("Failed to lock KIQ (%d).\n", r); | |
b1023571 | 2833 | return r; |
2fdde9fa | 2834 | } |
b1023571 | 2835 | |
0f1dfd52 AD |
2836 | /* set resources */ |
2837 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); | |
2838 | amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) | | |
2839 | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */ | |
de65513a AR |
2840 | amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */ |
2841 | amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */ | |
0f1dfd52 AD |
2842 | amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */ |
2843 | amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */ | |
2844 | amdgpu_ring_write(kiq_ring, 0); /* oac mask */ | |
2845 | amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */ | |
bd3402ea AD |
2846 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
2847 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | |
2848 | uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); | |
2849 | uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | |
2850 | ||
2851 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); | |
2852 | /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ | |
2853 | amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ | |
2854 | PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ | |
2855 | PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ | |
2856 | PACKET3_MAP_QUEUES_QUEUE(ring->queue) | | |
2857 | PACKET3_MAP_QUEUES_PIPE(ring->pipe) | | |
2858 | PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | | |
2859 | PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ | |
f4534f06 | 2860 | PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ |
bd3402ea AD |
2861 | PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ |
2862 | PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ | |
2863 | amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); | |
2864 | amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); | |
2865 | amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); | |
2866 | amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); | |
2867 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); | |
2868 | } | |
b1023571 | 2869 | |
841cf911 RZ |
2870 | r = amdgpu_ring_test_ring(kiq_ring); |
2871 | if (r) { | |
2872 | DRM_ERROR("KCQ enable failed\n"); | |
2873 | kiq_ring->ready = false; | |
2fdde9fa | 2874 | } |
464826d6 | 2875 | |
2fdde9fa | 2876 | return r; |
464826d6 XY |
2877 | } |
2878 | ||
e322edc3 | 2879 | static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring) |
464826d6 | 2880 | { |
33fb8698 | 2881 | struct amdgpu_device *adev = ring->adev; |
e322edc3 | 2882 | struct v9_mqd *mqd = ring->mqd_ptr; |
464826d6 XY |
2883 | uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; |
2884 | uint32_t tmp; | |
2885 | ||
2886 | mqd->header = 0xC0310800; | |
2887 | mqd->compute_pipelinestat_enable = 0x00000001; | |
2888 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; | |
2889 | mqd->compute_static_thread_mgmt_se1 = 0xffffffff; | |
2890 | mqd->compute_static_thread_mgmt_se2 = 0xffffffff; | |
2891 | mqd->compute_static_thread_mgmt_se3 = 0xffffffff; | |
2892 | mqd->compute_misc_reserved = 0x00000003; | |
2893 | ||
ffe6d881 AD |
2894 | mqd->dynamic_cu_mask_addr_lo = |
2895 | lower_32_bits(ring->mqd_gpu_addr | |
2896 | + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); | |
2897 | mqd->dynamic_cu_mask_addr_hi = | |
2898 | upper_32_bits(ring->mqd_gpu_addr | |
2899 | + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); | |
2900 | ||
d72f2f46 | 2901 | eop_base_addr = ring->eop_gpu_addr >> 8; |
464826d6 XY |
2902 | mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; |
2903 | mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); | |
2904 | ||
2905 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ | |
5e78835a | 2906 | tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL); |
464826d6 | 2907 | tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, |
268cb4c7 | 2908 | (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); |
464826d6 XY |
2909 | |
2910 | mqd->cp_hqd_eop_control = tmp; | |
2911 | ||
2912 | /* enable doorbell? */ | |
5e78835a | 2913 | tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); |
464826d6 XY |
2914 | |
2915 | if (ring->use_doorbell) { | |
2916 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2917 | DOORBELL_OFFSET, ring->doorbell_index); | |
2918 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2919 | DOORBELL_EN, 1); | |
2920 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2921 | DOORBELL_SOURCE, 0); | |
2922 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2923 | DOORBELL_HIT, 0); | |
78888cff | 2924 | } else { |
464826d6 XY |
2925 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
2926 | DOORBELL_EN, 0); | |
78888cff | 2927 | } |
464826d6 XY |
2928 | |
2929 | mqd->cp_hqd_pq_doorbell_control = tmp; | |
2930 | ||
2931 | /* disable the queue if it's active */ | |
2932 | ring->wptr = 0; | |
2933 | mqd->cp_hqd_dequeue_request = 0; | |
2934 | mqd->cp_hqd_pq_rptr = 0; | |
2935 | mqd->cp_hqd_pq_wptr_lo = 0; | |
2936 | mqd->cp_hqd_pq_wptr_hi = 0; | |
2937 | ||
2938 | /* set the pointer to the MQD */ | |
33fb8698 AD |
2939 | mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; |
2940 | mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); | |
464826d6 XY |
2941 | |
2942 | /* set MQD vmid to 0 */ | |
5e78835a | 2943 | tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL); |
464826d6 XY |
2944 | tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); |
2945 | mqd->cp_mqd_control = tmp; | |
2946 | ||
2947 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ | |
2948 | hqd_gpu_addr = ring->gpu_addr >> 8; | |
2949 | mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; | |
2950 | mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); | |
2951 | ||
2952 | /* set up the HQD, this is similar to CP_RB0_CNTL */ | |
5e78835a | 2953 | tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL); |
464826d6 XY |
2954 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, |
2955 | (order_base_2(ring->ring_size / 4) - 1)); | |
2956 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, | |
2957 | ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); | |
2958 | #ifdef __BIG_ENDIAN | |
2959 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); | |
2960 | #endif | |
2961 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); | |
2962 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); | |
2963 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); | |
2964 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); | |
2965 | mqd->cp_hqd_pq_control = tmp; | |
2966 | ||
2967 | /* set the wb address whether it's enabled or not */ | |
2968 | wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | |
2969 | mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; | |
2970 | mqd->cp_hqd_pq_rptr_report_addr_hi = | |
2971 | upper_32_bits(wb_gpu_addr) & 0xffff; | |
2972 | ||
2973 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ | |
2974 | wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | |
2975 | mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; | |
2976 | mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; | |
2977 | ||
2978 | tmp = 0; | |
2979 | /* enable the doorbell if requested */ | |
2980 | if (ring->use_doorbell) { | |
5e78835a | 2981 | tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL); |
464826d6 XY |
2982 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
2983 | DOORBELL_OFFSET, ring->doorbell_index); | |
2984 | ||
2985 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2986 | DOORBELL_EN, 1); | |
2987 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2988 | DOORBELL_SOURCE, 0); | |
2989 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | |
2990 | DOORBELL_HIT, 0); | |
2991 | } | |
2992 | ||
2993 | mqd->cp_hqd_pq_doorbell_control = tmp; | |
2994 | ||
2995 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ | |
2996 | ring->wptr = 0; | |
0274a9c5 | 2997 | mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR); |
464826d6 XY |
2998 | |
2999 | /* set the vmid for the queue */ | |
3000 | mqd->cp_hqd_vmid = 0; | |
3001 | ||
0274a9c5 | 3002 | tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE); |
464826d6 XY |
3003 | tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); |
3004 | mqd->cp_hqd_persistent_state = tmp; | |
3005 | ||
fca4ce69 AD |
3006 | /* set MIN_IB_AVAIL_SIZE */ |
3007 | tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL); | |
3008 | tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); | |
3009 | mqd->cp_hqd_ib_control = tmp; | |
3010 | ||
464826d6 XY |
3011 | /* activate the queue */ |
3012 | mqd->cp_hqd_active = 1; | |
3013 | ||
3014 | return 0; | |
3015 | } | |
3016 | ||
e322edc3 | 3017 | static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring) |
464826d6 | 3018 | { |
33fb8698 | 3019 | struct amdgpu_device *adev = ring->adev; |
e322edc3 | 3020 | struct v9_mqd *mqd = ring->mqd_ptr; |
464826d6 XY |
3021 | int j; |
3022 | ||
3023 | /* disable wptr polling */ | |
72edadd5 | 3024 | WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); |
464826d6 | 3025 | |
5e78835a | 3026 | WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, |
464826d6 | 3027 | mqd->cp_hqd_eop_base_addr_lo); |
5e78835a | 3028 | WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, |
464826d6 XY |
3029 | mqd->cp_hqd_eop_base_addr_hi); |
3030 | ||
3031 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ | |
5e78835a | 3032 | WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, |
464826d6 XY |
3033 | mqd->cp_hqd_eop_control); |
3034 | ||
3035 | /* enable doorbell? */ | |
5e78835a | 3036 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, |
464826d6 XY |
3037 | mqd->cp_hqd_pq_doorbell_control); |
3038 | ||
3039 | /* disable the queue if it's active */ | |
5e78835a TSD |
3040 | if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { |
3041 | WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); | |
464826d6 | 3042 | for (j = 0; j < adev->usec_timeout; j++) { |
5e78835a | 3043 | if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) |
464826d6 XY |
3044 | break; |
3045 | udelay(1); | |
3046 | } | |
5e78835a | 3047 | WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, |
464826d6 | 3048 | mqd->cp_hqd_dequeue_request); |
5e78835a | 3049 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, |
464826d6 | 3050 | mqd->cp_hqd_pq_rptr); |
5e78835a | 3051 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, |
464826d6 | 3052 | mqd->cp_hqd_pq_wptr_lo); |
5e78835a | 3053 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, |
464826d6 XY |
3054 | mqd->cp_hqd_pq_wptr_hi); |
3055 | } | |
3056 | ||
3057 | /* set the pointer to the MQD */ | |
5e78835a | 3058 | WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, |
464826d6 | 3059 | mqd->cp_mqd_base_addr_lo); |
5e78835a | 3060 | WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, |
464826d6 XY |
3061 | mqd->cp_mqd_base_addr_hi); |
3062 | ||
3063 | /* set MQD vmid to 0 */ | |
5e78835a | 3064 | WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, |
464826d6 XY |
3065 | mqd->cp_mqd_control); |
3066 | ||
3067 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ | |
5e78835a | 3068 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, |
464826d6 | 3069 | mqd->cp_hqd_pq_base_lo); |
5e78835a | 3070 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, |
464826d6 XY |
3071 | mqd->cp_hqd_pq_base_hi); |
3072 | ||
3073 | /* set up the HQD, this is similar to CP_RB0_CNTL */ | |
5e78835a | 3074 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, |
464826d6 XY |
3075 | mqd->cp_hqd_pq_control); |
3076 | ||
3077 | /* set the wb address whether it's enabled or not */ | |
5e78835a | 3078 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR, |
464826d6 | 3079 | mqd->cp_hqd_pq_rptr_report_addr_lo); |
5e78835a | 3080 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, |
464826d6 XY |
3081 | mqd->cp_hqd_pq_rptr_report_addr_hi); |
3082 | ||
3083 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ | |
5e78835a | 3084 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, |
464826d6 | 3085 | mqd->cp_hqd_pq_wptr_poll_addr_lo); |
5e78835a | 3086 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, |
464826d6 XY |
3087 | mqd->cp_hqd_pq_wptr_poll_addr_hi); |
3088 | ||
3089 | /* enable the doorbell if requested */ | |
3090 | if (ring->use_doorbell) { | |
5e78835a | 3091 | WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER, |
464826d6 | 3092 | (AMDGPU_DOORBELL64_KIQ *2) << 2); |
5e78835a | 3093 | WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER, |
464826d6 XY |
3094 | (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2); |
3095 | } | |
3096 | ||
5e78835a | 3097 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, |
464826d6 XY |
3098 | mqd->cp_hqd_pq_doorbell_control); |
3099 | ||
3100 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ | |
5e78835a | 3101 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, |
464826d6 | 3102 | mqd->cp_hqd_pq_wptr_lo); |
5e78835a | 3103 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, |
464826d6 XY |
3104 | mqd->cp_hqd_pq_wptr_hi); |
3105 | ||
3106 | /* set the vmid for the queue */ | |
5e78835a | 3107 | WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid); |
464826d6 | 3108 | |
5e78835a | 3109 | WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, |
464826d6 XY |
3110 | mqd->cp_hqd_persistent_state); |
3111 | ||
3112 | /* activate the queue */ | |
5e78835a | 3113 | WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, |
464826d6 XY |
3114 | mqd->cp_hqd_active); |
3115 | ||
72edadd5 TSD |
3116 | if (ring->use_doorbell) |
3117 | WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1); | |
464826d6 XY |
3118 | |
3119 | return 0; | |
3120 | } | |
3121 | ||
326aa996 AG |
3122 | static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring) |
3123 | { | |
3124 | struct amdgpu_device *adev = ring->adev; | |
3125 | int j; | |
3126 | ||
3127 | /* disable the queue if it's active */ | |
3128 | if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) { | |
3129 | ||
3130 | WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1); | |
3131 | ||
3132 | for (j = 0; j < adev->usec_timeout; j++) { | |
3133 | if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1)) | |
3134 | break; | |
3135 | udelay(1); | |
3136 | } | |
3137 | ||
f7a9ee81 | 3138 | if (j == AMDGPU_MAX_USEC_TIMEOUT) { |
326aa996 AG |
3139 | DRM_DEBUG("KIQ dequeue request failed.\n"); |
3140 | ||
f7a9ee81 | 3141 | /* Manual disable if dequeue request times out */ |
326aa996 AG |
3142 | WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0); |
3143 | } | |
3144 | ||
326aa996 AG |
3145 | WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, |
3146 | 0); | |
3147 | } | |
3148 | ||
3149 | WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0); | |
3150 | WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0); | |
3151 | WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0); | |
3152 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); | |
3153 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0); | |
3154 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0); | |
3155 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0); | |
3156 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0); | |
3157 | ||
3158 | return 0; | |
3159 | } | |
3160 | ||
e322edc3 | 3161 | static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring) |
464826d6 XY |
3162 | { |
3163 | struct amdgpu_device *adev = ring->adev; | |
e322edc3 | 3164 | struct v9_mqd *mqd = ring->mqd_ptr; |
464826d6 XY |
3165 | int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; |
3166 | ||
898b7893 | 3167 | gfx_v9_0_kiq_setting(ring); |
464826d6 | 3168 | |
13a752e3 | 3169 | if (adev->in_gpu_reset) { /* for GPU_RESET case */ |
464826d6 | 3170 | /* reset MQD to a clean status */ |
0ef376ca | 3171 | if (adev->gfx.mec.mqd_backup[mqd_idx]) |
ffe6d881 | 3172 | memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); |
464826d6 XY |
3173 | |
3174 | /* reset ring buffer */ | |
3175 | ring->wptr = 0; | |
b98724db | 3176 | amdgpu_ring_clear_ring(ring); |
464826d6 | 3177 | |
898b7893 AD |
3178 | mutex_lock(&adev->srbm_mutex); |
3179 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | |
3180 | gfx_v9_0_kiq_init_register(ring); | |
3181 | soc15_grbm_select(adev, 0, 0, 0, 0); | |
3182 | mutex_unlock(&adev->srbm_mutex); | |
464826d6 | 3183 | } else { |
ffe6d881 AD |
3184 | memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); |
3185 | ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; | |
3186 | ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; | |
ba0c19f5 AD |
3187 | mutex_lock(&adev->srbm_mutex); |
3188 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | |
3189 | gfx_v9_0_mqd_init(ring); | |
3190 | gfx_v9_0_kiq_init_register(ring); | |
3191 | soc15_grbm_select(adev, 0, 0, 0, 0); | |
3192 | mutex_unlock(&adev->srbm_mutex); | |
3193 | ||
3194 | if (adev->gfx.mec.mqd_backup[mqd_idx]) | |
ffe6d881 | 3195 | memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); |
464826d6 XY |
3196 | } |
3197 | ||
0f1dfd52 | 3198 | return 0; |
898b7893 AD |
3199 | } |
3200 | ||
3201 | static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) | |
3202 | { | |
3203 | struct amdgpu_device *adev = ring->adev; | |
898b7893 AD |
3204 | struct v9_mqd *mqd = ring->mqd_ptr; |
3205 | int mqd_idx = ring - &adev->gfx.compute_ring[0]; | |
898b7893 | 3206 | |
44779b43 | 3207 | if (!adev->in_gpu_reset && !adev->in_suspend) { |
ffe6d881 AD |
3208 | memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); |
3209 | ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; | |
3210 | ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; | |
464826d6 XY |
3211 | mutex_lock(&adev->srbm_mutex); |
3212 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | |
e322edc3 | 3213 | gfx_v9_0_mqd_init(ring); |
464826d6 XY |
3214 | soc15_grbm_select(adev, 0, 0, 0, 0); |
3215 | mutex_unlock(&adev->srbm_mutex); | |
3216 | ||
898b7893 | 3217 | if (adev->gfx.mec.mqd_backup[mqd_idx]) |
ffe6d881 | 3218 | memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); |
13a752e3 | 3219 | } else if (adev->in_gpu_reset) { /* for GPU_RESET case */ |
464826d6 | 3220 | /* reset MQD to a clean status */ |
898b7893 | 3221 | if (adev->gfx.mec.mqd_backup[mqd_idx]) |
ffe6d881 | 3222 | memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); |
464826d6 XY |
3223 | |
3224 | /* reset ring buffer */ | |
3225 | ring->wptr = 0; | |
898b7893 | 3226 | amdgpu_ring_clear_ring(ring); |
ba0c19f5 AD |
3227 | } else { |
3228 | amdgpu_ring_clear_ring(ring); | |
464826d6 XY |
3229 | } |
3230 | ||
464826d6 XY |
3231 | return 0; |
3232 | } | |
3233 | ||
3234 | static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) | |
3235 | { | |
a9a8a788 RZ |
3236 | struct amdgpu_ring *ring; |
3237 | int r; | |
464826d6 XY |
3238 | |
3239 | ring = &adev->gfx.kiq.ring; | |
e1d53aa8 AD |
3240 | |
3241 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | |
3242 | if (unlikely(r != 0)) | |
a9a8a788 | 3243 | return r; |
e1d53aa8 AD |
3244 | |
3245 | r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); | |
a9a8a788 RZ |
3246 | if (unlikely(r != 0)) |
3247 | return r; | |
3248 | ||
3249 | gfx_v9_0_kiq_init_queue(ring); | |
3250 | amdgpu_bo_kunmap(ring->mqd_obj); | |
3251 | ring->mqd_ptr = NULL; | |
e1d53aa8 | 3252 | amdgpu_bo_unreserve(ring->mqd_obj); |
a9a8a788 RZ |
3253 | ring->ready = true; |
3254 | return 0; | |
3255 | } | |
3256 | ||
3257 | static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev) | |
3258 | { | |
3259 | struct amdgpu_ring *ring = NULL; | |
3260 | int r = 0, i; | |
3261 | ||
3262 | gfx_v9_0_cp_compute_enable(adev, true); | |
464826d6 XY |
3263 | |
3264 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
3265 | ring = &adev->gfx.compute_ring[i]; | |
e1d53aa8 AD |
3266 | |
3267 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | |
3268 | if (unlikely(r != 0)) | |
3269 | goto done; | |
3270 | r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr); | |
3271 | if (!r) { | |
898b7893 | 3272 | r = gfx_v9_0_kcq_init_queue(ring); |
464826d6 XY |
3273 | amdgpu_bo_kunmap(ring->mqd_obj); |
3274 | ring->mqd_ptr = NULL; | |
464826d6 | 3275 | } |
e1d53aa8 AD |
3276 | amdgpu_bo_unreserve(ring->mqd_obj); |
3277 | if (r) | |
3278 | goto done; | |
464826d6 XY |
3279 | } |
3280 | ||
0f1dfd52 | 3281 | r = gfx_v9_0_kiq_kcq_enable(adev); |
e1d53aa8 AD |
3282 | done: |
3283 | return r; | |
464826d6 XY |
3284 | } |
3285 | ||
b1023571 KW |
3286 | static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) |
3287 | { | |
bd3402ea | 3288 | int r, i; |
b1023571 KW |
3289 | struct amdgpu_ring *ring; |
3290 | ||
3291 | if (!(adev->flags & AMD_IS_APU)) | |
3292 | gfx_v9_0_enable_gui_idle_interrupt(adev, false); | |
3293 | ||
3294 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | |
3295 | /* legacy firmware loading */ | |
3296 | r = gfx_v9_0_cp_gfx_load_microcode(adev); | |
3297 | if (r) | |
3298 | return r; | |
3299 | ||
3300 | r = gfx_v9_0_cp_compute_load_microcode(adev); | |
3301 | if (r) | |
3302 | return r; | |
3303 | } | |
3304 | ||
a9a8a788 RZ |
3305 | r = gfx_v9_0_kiq_resume(adev); |
3306 | if (r) | |
3307 | return r; | |
3308 | ||
b1023571 KW |
3309 | r = gfx_v9_0_cp_gfx_resume(adev); |
3310 | if (r) | |
3311 | return r; | |
3312 | ||
a9a8a788 | 3313 | r = gfx_v9_0_kcq_resume(adev); |
b1023571 KW |
3314 | if (r) |
3315 | return r; | |
3316 | ||
3317 | ring = &adev->gfx.gfx_ring[0]; | |
3318 | r = amdgpu_ring_test_ring(ring); | |
3319 | if (r) { | |
3320 | ring->ready = false; | |
3321 | return r; | |
3322 | } | |
e30a5223 | 3323 | |
b1023571 KW |
3324 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
3325 | ring = &adev->gfx.compute_ring[i]; | |
3326 | ||
3327 | ring->ready = true; | |
3328 | r = amdgpu_ring_test_ring(ring); | |
3329 | if (r) | |
3330 | ring->ready = false; | |
3331 | } | |
3332 | ||
3333 | gfx_v9_0_enable_gui_idle_interrupt(adev, true); | |
3334 | ||
3335 | return 0; | |
3336 | } | |
3337 | ||
3338 | static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable) | |
3339 | { | |
3340 | gfx_v9_0_cp_gfx_enable(adev, enable); | |
3341 | gfx_v9_0_cp_compute_enable(adev, enable); | |
3342 | } | |
3343 | ||
3344 | static int gfx_v9_0_hw_init(void *handle) | |
3345 | { | |
3346 | int r; | |
3347 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3348 | ||
3349 | gfx_v9_0_init_golden_registers(adev); | |
3350 | ||
434e6df2 | 3351 | gfx_v9_0_constants_init(adev); |
b1023571 | 3352 | |
137dc4b9 EQ |
3353 | r = gfx_v9_0_csb_vram_pin(adev); |
3354 | if (r) | |
3355 | return r; | |
3356 | ||
b1023571 KW |
3357 | r = gfx_v9_0_rlc_resume(adev); |
3358 | if (r) | |
3359 | return r; | |
3360 | ||
3361 | r = gfx_v9_0_cp_resume(adev); | |
3362 | if (r) | |
3363 | return r; | |
3364 | ||
3365 | r = gfx_v9_0_ngg_en(adev); | |
3366 | if (r) | |
3367 | return r; | |
3368 | ||
3369 | return r; | |
3370 | } | |
3371 | ||
ffabea84 | 3372 | static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) |
85f95ad6 | 3373 | { |
ffabea84 RZ |
3374 | int r, i; |
3375 | struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; | |
85f95ad6 | 3376 | |
ffabea84 RZ |
3377 | r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings); |
3378 | if (r) | |
85f95ad6 | 3379 | DRM_ERROR("Failed to lock KIQ (%d).\n", r); |
85f95ad6 | 3380 | |
ffabea84 RZ |
3381 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
3382 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | |
3383 | ||
3384 | amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); | |
3385 | amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ | |
85f95ad6 ML |
3386 | PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */ |
3387 | PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | | |
3388 | PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) | | |
3389 | PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); | |
ffabea84 RZ |
3390 | amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); |
3391 | amdgpu_ring_write(kiq_ring, 0); | |
3392 | amdgpu_ring_write(kiq_ring, 0); | |
3393 | amdgpu_ring_write(kiq_ring, 0); | |
3394 | } | |
841cf911 RZ |
3395 | r = amdgpu_ring_test_ring(kiq_ring); |
3396 | if (r) | |
3397 | DRM_ERROR("KCQ disable failed\n"); | |
3398 | ||
85f95ad6 ML |
3399 | return r; |
3400 | } | |
3401 | ||
b1023571 KW |
3402 | static int gfx_v9_0_hw_fini(void *handle) |
3403 | { | |
3404 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3405 | ||
3406 | amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); | |
3407 | amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); | |
85f95ad6 ML |
3408 | |
3409 | /* disable KCQ to avoid CPC touch memory not valid anymore */ | |
ffabea84 | 3410 | gfx_v9_0_kcq_disable(adev); |
85f95ad6 | 3411 | |
464826d6 | 3412 | if (amdgpu_sriov_vf(adev)) { |
9f0178fb ML |
3413 | gfx_v9_0_cp_gfx_enable(adev, false); |
3414 | /* must disable polling for SRIOV when hw finished, otherwise | |
3415 | * CPC engine may still keep fetching WB address which is already | |
3416 | * invalid after sw finished and trigger DMAR reading error in | |
3417 | * hypervisor side. | |
3418 | */ | |
3419 | WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0); | |
464826d6 XY |
3420 | return 0; |
3421 | } | |
326aa996 AG |
3422 | |
3423 | /* Use deinitialize sequence from CAIL when unbinding device from driver, | |
3424 | * otherwise KIQ is hanging when binding back | |
3425 | */ | |
44779b43 | 3426 | if (!adev->in_gpu_reset && !adev->in_suspend) { |
326aa996 AG |
3427 | mutex_lock(&adev->srbm_mutex); |
3428 | soc15_grbm_select(adev, adev->gfx.kiq.ring.me, | |
3429 | adev->gfx.kiq.ring.pipe, | |
3430 | adev->gfx.kiq.ring.queue, 0); | |
3431 | gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring); | |
3432 | soc15_grbm_select(adev, 0, 0, 0, 0); | |
3433 | mutex_unlock(&adev->srbm_mutex); | |
3434 | } | |
3435 | ||
b1023571 KW |
3436 | gfx_v9_0_cp_enable(adev, false); |
3437 | gfx_v9_0_rlc_stop(adev); | |
b1023571 | 3438 | |
137dc4b9 EQ |
3439 | gfx_v9_0_csb_vram_unpin(adev); |
3440 | ||
b1023571 KW |
3441 | return 0; |
3442 | } | |
3443 | ||
3444 | static int gfx_v9_0_suspend(void *handle) | |
3445 | { | |
44779b43 | 3446 | return gfx_v9_0_hw_fini(handle); |
b1023571 KW |
3447 | } |
3448 | ||
3449 | static int gfx_v9_0_resume(void *handle) | |
3450 | { | |
44779b43 | 3451 | return gfx_v9_0_hw_init(handle); |
b1023571 KW |
3452 | } |
3453 | ||
3454 | static bool gfx_v9_0_is_idle(void *handle) | |
3455 | { | |
3456 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3457 | ||
5e78835a | 3458 | if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS), |
b1023571 KW |
3459 | GRBM_STATUS, GUI_ACTIVE)) |
3460 | return false; | |
3461 | else | |
3462 | return true; | |
3463 | } | |
3464 | ||
3465 | static int gfx_v9_0_wait_for_idle(void *handle) | |
3466 | { | |
3467 | unsigned i; | |
b1023571 KW |
3468 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3469 | ||
3470 | for (i = 0; i < adev->usec_timeout; i++) { | |
2b9bdfa7 | 3471 | if (gfx_v9_0_is_idle(handle)) |
b1023571 KW |
3472 | return 0; |
3473 | udelay(1); | |
3474 | } | |
3475 | return -ETIMEDOUT; | |
3476 | } | |
3477 | ||
b1023571 KW |
3478 | static int gfx_v9_0_soft_reset(void *handle) |
3479 | { | |
3480 | u32 grbm_soft_reset = 0; | |
3481 | u32 tmp; | |
3482 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3483 | ||
3484 | /* GRBM_STATUS */ | |
5e78835a | 3485 | tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS); |
b1023571 KW |
3486 | if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | |
3487 | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | | |
3488 | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | | |
3489 | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | | |
3490 | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | | |
3491 | GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { | |
3492 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
3493 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | |
3494 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
3495 | GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); | |
3496 | } | |
3497 | ||
3498 | if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { | |
3499 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
3500 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | |
3501 | } | |
3502 | ||
3503 | /* GRBM_STATUS2 */ | |
5e78835a | 3504 | tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2); |
b1023571 KW |
3505 | if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) |
3506 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | |
3507 | GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); | |
3508 | ||
3509 | ||
75bac5c6 | 3510 | if (grbm_soft_reset) { |
b1023571 KW |
3511 | /* stop the rlc */ |
3512 | gfx_v9_0_rlc_stop(adev); | |
3513 | ||
3514 | /* Disable GFX parsing/prefetching */ | |
3515 | gfx_v9_0_cp_gfx_enable(adev, false); | |
3516 | ||
3517 | /* Disable MEC parsing/prefetching */ | |
3518 | gfx_v9_0_cp_compute_enable(adev, false); | |
3519 | ||
3520 | if (grbm_soft_reset) { | |
5e78835a | 3521 | tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); |
b1023571 KW |
3522 | tmp |= grbm_soft_reset; |
3523 | dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); | |
5e78835a TSD |
3524 | WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); |
3525 | tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); | |
b1023571 KW |
3526 | |
3527 | udelay(50); | |
3528 | ||
3529 | tmp &= ~grbm_soft_reset; | |
5e78835a TSD |
3530 | WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp); |
3531 | tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); | |
b1023571 KW |
3532 | } |
3533 | ||
3534 | /* Wait a little for things to settle down */ | |
3535 | udelay(50); | |
b1023571 KW |
3536 | } |
3537 | return 0; | |
3538 | } | |
3539 | ||
3540 | static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) | |
3541 | { | |
3542 | uint64_t clock; | |
3543 | ||
3544 | mutex_lock(&adev->gfx.gpu_clock_mutex); | |
5e78835a TSD |
3545 | WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); |
3546 | clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | | |
3547 | ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); | |
b1023571 KW |
3548 | mutex_unlock(&adev->gfx.gpu_clock_mutex); |
3549 | return clock; | |
3550 | } | |
3551 | ||
3552 | static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | |
3553 | uint32_t vmid, | |
3554 | uint32_t gds_base, uint32_t gds_size, | |
3555 | uint32_t gws_base, uint32_t gws_size, | |
3556 | uint32_t oa_base, uint32_t oa_size) | |
3557 | { | |
946a4d5b SL |
3558 | struct amdgpu_device *adev = ring->adev; |
3559 | ||
b1023571 KW |
3560 | /* GDS Base */ |
3561 | gfx_v9_0_write_data_to_reg(ring, 0, false, | |
946a4d5b | 3562 | SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid, |
b1023571 KW |
3563 | gds_base); |
3564 | ||
3565 | /* GDS Size */ | |
3566 | gfx_v9_0_write_data_to_reg(ring, 0, false, | |
946a4d5b | 3567 | SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid, |
b1023571 KW |
3568 | gds_size); |
3569 | ||
3570 | /* GWS */ | |
3571 | gfx_v9_0_write_data_to_reg(ring, 0, false, | |
946a4d5b | 3572 | SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid, |
b1023571 KW |
3573 | gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); |
3574 | ||
3575 | /* OA */ | |
3576 | gfx_v9_0_write_data_to_reg(ring, 0, false, | |
946a4d5b | 3577 | SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid, |
b1023571 KW |
3578 | (1 << (oa_size + oa_base)) - (1 << oa_base)); |
3579 | } | |
3580 | ||
3581 | static int gfx_v9_0_early_init(void *handle) | |
3582 | { | |
3583 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3584 | ||
3585 | adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS; | |
78c16834 | 3586 | adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; |
b1023571 KW |
3587 | gfx_v9_0_set_ring_funcs(adev); |
3588 | gfx_v9_0_set_irq_funcs(adev); | |
3589 | gfx_v9_0_set_gds_init(adev); | |
3590 | gfx_v9_0_set_rlc_funcs(adev); | |
3591 | ||
3592 | return 0; | |
3593 | } | |
3594 | ||
3595 | static int gfx_v9_0_late_init(void *handle) | |
3596 | { | |
3597 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3598 | int r; | |
3599 | ||
3600 | r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); | |
3601 | if (r) | |
3602 | return r; | |
3603 | ||
3604 | r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); | |
3605 | if (r) | |
3606 | return r; | |
3607 | ||
3608 | return 0; | |
3609 | } | |
3610 | ||
3611 | static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | |
3612 | { | |
3613 | uint32_t rlc_setting, data; | |
3614 | unsigned i; | |
3615 | ||
3616 | if (adev->gfx.rlc.in_safe_mode) | |
3617 | return; | |
3618 | ||
3619 | /* if RLC is not enabled, do nothing */ | |
5e78835a | 3620 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); |
b1023571 KW |
3621 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
3622 | return; | |
3623 | ||
3624 | if (adev->cg_flags & | |
3625 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | |
3626 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | |
3627 | data = RLC_SAFE_MODE__CMD_MASK; | |
3628 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | |
5e78835a | 3629 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); |
b1023571 KW |
3630 | |
3631 | /* wait for RLC_SAFE_MODE */ | |
3632 | for (i = 0; i < adev->usec_timeout; i++) { | |
226127a6 | 3633 | if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
b1023571 KW |
3634 | break; |
3635 | udelay(1); | |
3636 | } | |
3637 | adev->gfx.rlc.in_safe_mode = true; | |
3638 | } | |
3639 | } | |
3640 | ||
3641 | static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) | |
3642 | { | |
3643 | uint32_t rlc_setting, data; | |
3644 | ||
3645 | if (!adev->gfx.rlc.in_safe_mode) | |
3646 | return; | |
3647 | ||
3648 | /* if RLC is not enabled, do nothing */ | |
5e78835a | 3649 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); |
b1023571 KW |
3650 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
3651 | return; | |
3652 | ||
3653 | if (adev->cg_flags & | |
3654 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | |
3655 | /* | |
3656 | * Try to exit safe mode only if it is already in safe | |
3657 | * mode. | |
3658 | */ | |
3659 | data = RLC_SAFE_MODE__CMD_MASK; | |
5e78835a | 3660 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); |
b1023571 KW |
3661 | adev->gfx.rlc.in_safe_mode = false; |
3662 | } | |
3663 | } | |
3664 | ||
197f95c8 HZ |
3665 | static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, |
3666 | bool enable) | |
3667 | { | |
72408a41 | 3668 | gfx_v9_0_enter_rlc_safe_mode(adev); |
197f95c8 HZ |
3669 | |
3670 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { | |
3671 | gfx_v9_0_enable_gfx_cg_power_gating(adev, true); | |
3672 | if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) | |
3673 | gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); | |
3674 | } else { | |
3675 | gfx_v9_0_enable_gfx_cg_power_gating(adev, false); | |
3676 | gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); | |
3677 | } | |
3678 | ||
72408a41 | 3679 | gfx_v9_0_exit_rlc_safe_mode(adev); |
197f95c8 HZ |
3680 | } |
3681 | ||
18924c71 HZ |
3682 | static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, |
3683 | bool enable) | |
3684 | { | |
3685 | /* TODO: double check if we need to perform under safe mode */ | |
3686 | /* gfx_v9_0_enter_rlc_safe_mode(adev); */ | |
3687 | ||
3688 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) | |
3689 | gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true); | |
3690 | else | |
3691 | gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false); | |
3692 | ||
3693 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) | |
3694 | gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true); | |
3695 | else | |
3696 | gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false); | |
3697 | ||
3698 | /* gfx_v9_0_exit_rlc_safe_mode(adev); */ | |
3699 | } | |
3700 | ||
b1023571 KW |
3701 | static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, |
3702 | bool enable) | |
3703 | { | |
3704 | uint32_t data, def; | |
3705 | ||
3706 | /* It is disabled by HW by default */ | |
3707 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { | |
3708 | /* 1 - RLC_CGTT_MGCG_OVERRIDE */ | |
5e78835a | 3709 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
c3693768 EQ |
3710 | |
3711 | if (adev->asic_type != CHIP_VEGA12) | |
3712 | data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; | |
3713 | ||
3714 | data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | | |
b1023571 KW |
3715 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | |
3716 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); | |
3717 | ||
3718 | /* only for Vega10 & Raven1 */ | |
3719 | data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; | |
3720 | ||
3721 | if (def != data) | |
5e78835a | 3722 | WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); |
b1023571 KW |
3723 | |
3724 | /* MGLS is a global flag to control all MGLS in GFX */ | |
3725 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { | |
3726 | /* 2 - RLC memory Light sleep */ | |
3727 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { | |
5e78835a | 3728 | def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); |
b1023571 KW |
3729 | data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; |
3730 | if (def != data) | |
5e78835a | 3731 | WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); |
b1023571 KW |
3732 | } |
3733 | /* 3 - CP memory Light sleep */ | |
3734 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { | |
5e78835a | 3735 | def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); |
b1023571 KW |
3736 | data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; |
3737 | if (def != data) | |
5e78835a | 3738 | WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); |
b1023571 KW |
3739 | } |
3740 | } | |
3741 | } else { | |
3742 | /* 1 - MGCG_OVERRIDE */ | |
5e78835a | 3743 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
c3693768 EQ |
3744 | |
3745 | if (adev->asic_type != CHIP_VEGA12) | |
3746 | data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK; | |
3747 | ||
3748 | data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | | |
b1023571 KW |
3749 | RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | |
3750 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | | |
3751 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); | |
c3693768 | 3752 | |
b1023571 | 3753 | if (def != data) |
5e78835a | 3754 | WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); |
b1023571 KW |
3755 | |
3756 | /* 2 - disable MGLS in RLC */ | |
5e78835a | 3757 | data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); |
b1023571 KW |
3758 | if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { |
3759 | data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; | |
5e78835a | 3760 | WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data); |
b1023571 KW |
3761 | } |
3762 | ||
3763 | /* 3 - disable MGLS in CP */ | |
5e78835a | 3764 | data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); |
b1023571 KW |
3765 | if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { |
3766 | data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; | |
5e78835a | 3767 | WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data); |
b1023571 KW |
3768 | } |
3769 | } | |
3770 | } | |
3771 | ||
3772 | static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, | |
3773 | bool enable) | |
3774 | { | |
3775 | uint32_t data, def; | |
3776 | ||
3777 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | |
3778 | ||
3779 | /* Enable 3D CGCG/CGLS */ | |
3780 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { | |
3781 | /* write cmd to clear cgcg/cgls ov */ | |
5e78835a | 3782 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
b1023571 KW |
3783 | /* unset CGCG override */ |
3784 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK; | |
3785 | /* update CGCG and CGLS override bits */ | |
3786 | if (def != data) | |
5e78835a | 3787 | WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); |
a5aedc2d EQ |
3788 | |
3789 | /* enable 3Dcgcg FSM(0x0000363f) */ | |
5e78835a | 3790 | def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); |
a5aedc2d EQ |
3791 | |
3792 | data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | | |
b1023571 KW |
3793 | RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK; |
3794 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) | |
3795 | data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) | | |
3796 | RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK; | |
3797 | if (def != data) | |
5e78835a | 3798 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); |
b1023571 KW |
3799 | |
3800 | /* set IDLE_POLL_COUNT(0x00900100) */ | |
5e78835a | 3801 | def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); |
b1023571 KW |
3802 | data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | |
3803 | (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); | |
3804 | if (def != data) | |
5e78835a | 3805 | WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); |
b1023571 KW |
3806 | } else { |
3807 | /* Disable CGCG/CGLS */ | |
5e78835a | 3808 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); |
b1023571 KW |
3809 | /* disable cgcg, cgls should be disabled */ |
3810 | data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK | | |
3811 | RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK); | |
3812 | /* disable cgcg and cgls in FSM */ | |
3813 | if (def != data) | |
5e78835a | 3814 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); |
b1023571 KW |
3815 | } |
3816 | ||
3817 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | |
3818 | } | |
3819 | ||
3820 | static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | |
3821 | bool enable) | |
3822 | { | |
3823 | uint32_t def, data; | |
3824 | ||
3825 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | |
3826 | ||
3827 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { | |
5e78835a | 3828 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
b1023571 KW |
3829 | /* unset CGCG override */ |
3830 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; | |
3831 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) | |
3832 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; | |
3833 | else | |
3834 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; | |
3835 | /* update CGCG and CGLS override bits */ | |
3836 | if (def != data) | |
5e78835a | 3837 | WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); |
b1023571 | 3838 | |
a5aedc2d | 3839 | /* enable cgcg FSM(0x0000363F) */ |
5e78835a | 3840 | def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); |
a5aedc2d EQ |
3841 | |
3842 | data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | | |
b1023571 KW |
3843 | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; |
3844 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) | |
3845 | data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | | |
3846 | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; | |
3847 | if (def != data) | |
5e78835a | 3848 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); |
b1023571 KW |
3849 | |
3850 | /* set IDLE_POLL_COUNT(0x00900100) */ | |
5e78835a | 3851 | def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL); |
b1023571 KW |
3852 | data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | |
3853 | (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); | |
3854 | if (def != data) | |
5e78835a | 3855 | WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data); |
b1023571 | 3856 | } else { |
5e78835a | 3857 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); |
b1023571 KW |
3858 | /* reset CGCG/CGLS bits */ |
3859 | data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); | |
3860 | /* disable cgcg and cgls in FSM */ | |
3861 | if (def != data) | |
5e78835a | 3862 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); |
b1023571 KW |
3863 | } |
3864 | ||
3865 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | |
3866 | } | |
3867 | ||
3868 | static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, | |
3869 | bool enable) | |
3870 | { | |
3871 | if (enable) { | |
3872 | /* CGCG/CGLS should be enabled after MGCG/MGLS | |
3873 | * === MGCG + MGLS === | |
3874 | */ | |
3875 | gfx_v9_0_update_medium_grain_clock_gating(adev, enable); | |
3876 | /* === CGCG /CGLS for GFX 3D Only === */ | |
3877 | gfx_v9_0_update_3d_clock_gating(adev, enable); | |
3878 | /* === CGCG + CGLS === */ | |
3879 | gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); | |
3880 | } else { | |
3881 | /* CGCG/CGLS should be disabled before MGCG/MGLS | |
3882 | * === CGCG + CGLS === | |
3883 | */ | |
3884 | gfx_v9_0_update_coarse_grain_clock_gating(adev, enable); | |
3885 | /* === CGCG /CGLS for GFX 3D Only === */ | |
3886 | gfx_v9_0_update_3d_clock_gating(adev, enable); | |
3887 | /* === MGCG + MGLS === */ | |
3888 | gfx_v9_0_update_medium_grain_clock_gating(adev, enable); | |
3889 | } | |
3890 | return 0; | |
3891 | } | |
3892 | ||
3893 | static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { | |
3894 | .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, | |
3895 | .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode | |
3896 | }; | |
3897 | ||
3898 | static int gfx_v9_0_set_powergating_state(void *handle, | |
3899 | enum amd_powergating_state state) | |
3900 | { | |
5897c99e | 3901 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
197f95c8 | 3902 | bool enable = (state == AMD_PG_STATE_GATE) ? true : false; |
5897c99e HZ |
3903 | |
3904 | switch (adev->asic_type) { | |
3905 | case CHIP_RAVEN: | |
05df1f01 RZ |
3906 | if (!enable) { |
3907 | amdgpu_gfx_off_ctrl(adev, false); | |
3908 | cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); | |
3909 | } | |
5897c99e HZ |
3910 | if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { |
3911 | gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); | |
3912 | gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); | |
3913 | } else { | |
3914 | gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false); | |
3915 | gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false); | |
3916 | } | |
3917 | ||
3918 | if (adev->pg_flags & AMD_PG_SUPPORT_CP) | |
3919 | gfx_v9_0_enable_cp_power_gating(adev, true); | |
3920 | else | |
3921 | gfx_v9_0_enable_cp_power_gating(adev, false); | |
197f95c8 HZ |
3922 | |
3923 | /* update gfx cgpg state */ | |
3924 | gfx_v9_0_update_gfx_cg_power_gating(adev, enable); | |
18924c71 HZ |
3925 | |
3926 | /* update mgcg state */ | |
3927 | gfx_v9_0_update_gfx_mg_power_gating(adev, enable); | |
9134c6d7 | 3928 | |
05df1f01 RZ |
3929 | if (enable) |
3930 | amdgpu_gfx_off_ctrl(adev, true); | |
991a6b32 EQ |
3931 | break; |
3932 | case CHIP_VEGA12: | |
05df1f01 RZ |
3933 | if (!enable) { |
3934 | amdgpu_gfx_off_ctrl(adev, false); | |
3935 | cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work); | |
3936 | } else { | |
3937 | amdgpu_gfx_off_ctrl(adev, true); | |
3938 | } | |
5897c99e HZ |
3939 | break; |
3940 | default: | |
3941 | break; | |
3942 | } | |
3943 | ||
b1023571 KW |
3944 | return 0; |
3945 | } | |
3946 | ||
3947 | static int gfx_v9_0_set_clockgating_state(void *handle, | |
3948 | enum amd_clockgating_state state) | |
3949 | { | |
3950 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3951 | ||
fb82afab XY |
3952 | if (amdgpu_sriov_vf(adev)) |
3953 | return 0; | |
3954 | ||
b1023571 KW |
3955 | switch (adev->asic_type) { |
3956 | case CHIP_VEGA10: | |
23862464 | 3957 | case CHIP_VEGA12: |
28b576b2 | 3958 | case CHIP_VEGA20: |
a4dc61f5 | 3959 | case CHIP_RAVEN: |
b1023571 KW |
3960 | gfx_v9_0_update_gfx_clock_gating(adev, |
3961 | state == AMD_CG_STATE_GATE ? true : false); | |
3962 | break; | |
3963 | default: | |
3964 | break; | |
3965 | } | |
3966 | return 0; | |
3967 | } | |
3968 | ||
12ad27fa HR |
3969 | static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags) |
3970 | { | |
3971 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | |
3972 | int data; | |
3973 | ||
3974 | if (amdgpu_sriov_vf(adev)) | |
3975 | *flags = 0; | |
3976 | ||
3977 | /* AMD_CG_SUPPORT_GFX_MGCG */ | |
5e78835a | 3978 | data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
12ad27fa HR |
3979 | if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) |
3980 | *flags |= AMD_CG_SUPPORT_GFX_MGCG; | |
3981 | ||
3982 | /* AMD_CG_SUPPORT_GFX_CGCG */ | |
5e78835a | 3983 | data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); |
12ad27fa HR |
3984 | if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) |
3985 | *flags |= AMD_CG_SUPPORT_GFX_CGCG; | |
3986 | ||
3987 | /* AMD_CG_SUPPORT_GFX_CGLS */ | |
3988 | if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) | |
3989 | *flags |= AMD_CG_SUPPORT_GFX_CGLS; | |
3990 | ||
3991 | /* AMD_CG_SUPPORT_GFX_RLC_LS */ | |
5e78835a | 3992 | data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); |
12ad27fa HR |
3993 | if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) |
3994 | *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; | |
3995 | ||
3996 | /* AMD_CG_SUPPORT_GFX_CP_LS */ | |
5e78835a | 3997 | data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); |
12ad27fa HR |
3998 | if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) |
3999 | *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; | |
4000 | ||
4001 | /* AMD_CG_SUPPORT_GFX_3D_CGCG */ | |
5e78835a | 4002 | data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); |
12ad27fa HR |
4003 | if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) |
4004 | *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; | |
4005 | ||
4006 | /* AMD_CG_SUPPORT_GFX_3D_CGLS */ | |
4007 | if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK) | |
4008 | *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS; | |
4009 | } | |
4010 | ||
b1023571 KW |
4011 | static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) |
4012 | { | |
4013 | return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/ | |
4014 | } | |
4015 | ||
4016 | static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) | |
4017 | { | |
4018 | struct amdgpu_device *adev = ring->adev; | |
4019 | u64 wptr; | |
4020 | ||
4021 | /* XXX check if swapping is necessary on BE */ | |
4022 | if (ring->use_doorbell) { | |
4023 | wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]); | |
4024 | } else { | |
5e78835a TSD |
4025 | wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR); |
4026 | wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32; | |
b1023571 KW |
4027 | } |
4028 | ||
4029 | return wptr; | |
4030 | } | |
4031 | ||
4032 | static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) | |
4033 | { | |
4034 | struct amdgpu_device *adev = ring->adev; | |
4035 | ||
4036 | if (ring->use_doorbell) { | |
4037 | /* XXX check if swapping is necessary on BE */ | |
4038 | atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); | |
4039 | WDOORBELL64(ring->doorbell_index, ring->wptr); | |
4040 | } else { | |
5e78835a TSD |
4041 | WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr)); |
4042 | WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr)); | |
b1023571 KW |
4043 | } |
4044 | } | |
4045 | ||
4046 | static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |
4047 | { | |
946a4d5b | 4048 | struct amdgpu_device *adev = ring->adev; |
b1023571 | 4049 | u32 ref_and_mask, reg_mem_engine; |
bf383fb6 | 4050 | const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; |
b1023571 KW |
4051 | |
4052 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { | |
4053 | switch (ring->me) { | |
4054 | case 1: | |
4055 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; | |
4056 | break; | |
4057 | case 2: | |
4058 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; | |
4059 | break; | |
4060 | default: | |
4061 | return; | |
4062 | } | |
4063 | reg_mem_engine = 0; | |
4064 | } else { | |
4065 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; | |
4066 | reg_mem_engine = 1; /* pfp */ | |
4067 | } | |
4068 | ||
4069 | gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, | |
946a4d5b SL |
4070 | adev->nbio_funcs->get_hdp_flush_req_offset(adev), |
4071 | adev->nbio_funcs->get_hdp_flush_done_offset(adev), | |
b1023571 KW |
4072 | ref_and_mask, ref_and_mask, 0x20); |
4073 | } | |
4074 | ||
b1023571 KW |
4075 | static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, |
4076 | struct amdgpu_ib *ib, | |
c4f46f22 | 4077 | unsigned vmid, bool ctx_switch) |
b1023571 | 4078 | { |
eaa05d52 | 4079 | u32 header, control = 0; |
b1023571 | 4080 | |
eaa05d52 ML |
4081 | if (ib->flags & AMDGPU_IB_FLAG_CE) |
4082 | header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); | |
4083 | else | |
4084 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | |
b1023571 | 4085 | |
c4f46f22 | 4086 | control |= ib->length_dw | (vmid << 24); |
b1023571 | 4087 | |
635e7132 | 4088 | if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) { |
eaa05d52 | 4089 | control |= INDIRECT_BUFFER_PRE_ENB(1); |
9ccd52eb | 4090 | |
635e7132 ML |
4091 | if (!(ib->flags & AMDGPU_IB_FLAG_CE)) |
4092 | gfx_v9_0_ring_emit_de_meta(ring); | |
4093 | } | |
4094 | ||
eaa05d52 | 4095 | amdgpu_ring_write(ring, header); |
72408a41 | 4096 | BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ |
eaa05d52 | 4097 | amdgpu_ring_write(ring, |
b1023571 | 4098 | #ifdef __BIG_ENDIAN |
eaa05d52 | 4099 | (2 << 0) | |
b1023571 | 4100 | #endif |
eaa05d52 ML |
4101 | lower_32_bits(ib->gpu_addr)); |
4102 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
4103 | amdgpu_ring_write(ring, control); | |
b1023571 KW |
4104 | } |
4105 | ||
b1023571 KW |
4106 | static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, |
4107 | struct amdgpu_ib *ib, | |
c4f46f22 | 4108 | unsigned vmid, bool ctx_switch) |
b1023571 | 4109 | { |
c4f46f22 | 4110 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); |
b1023571 KW |
4111 | |
4112 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | |
4113 | BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ | |
4114 | amdgpu_ring_write(ring, | |
4115 | #ifdef __BIG_ENDIAN | |
4116 | (2 << 0) | | |
4117 | #endif | |
4118 | lower_32_bits(ib->gpu_addr)); | |
4119 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | |
4120 | amdgpu_ring_write(ring, control); | |
4121 | } | |
4122 | ||
4123 | static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, | |
4124 | u64 seq, unsigned flags) | |
4125 | { | |
4126 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; | |
4127 | bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; | |
d240cd9e | 4128 | bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; |
b1023571 KW |
4129 | |
4130 | /* RELEASE_MEM - flush caches, send int */ | |
4131 | amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); | |
d240cd9e MO |
4132 | amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | |
4133 | EOP_TC_NC_ACTION_EN) : | |
4134 | (EOP_TCL1_ACTION_EN | | |
4135 | EOP_TC_ACTION_EN | | |
4136 | EOP_TC_WB_ACTION_EN | | |
4137 | EOP_TC_MD_ACTION_EN)) | | |
b1023571 KW |
4138 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | |
4139 | EVENT_INDEX(5))); | |
4140 | amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); | |
4141 | ||
4142 | /* | |
4143 | * the address should be Qword aligned if 64bit write, Dword | |
4144 | * aligned if only send 32bit data low (discard data high) | |
4145 | */ | |
4146 | if (write64bit) | |
4147 | BUG_ON(addr & 0x7); | |
4148 | else | |
4149 | BUG_ON(addr & 0x3); | |
4150 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
4151 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
4152 | amdgpu_ring_write(ring, lower_32_bits(seq)); | |
4153 | amdgpu_ring_write(ring, upper_32_bits(seq)); | |
4154 | amdgpu_ring_write(ring, 0); | |
4155 | } | |
4156 | ||
4157 | static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) | |
4158 | { | |
4159 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); | |
4160 | uint32_t seq = ring->fence_drv.sync_seq; | |
4161 | uint64_t addr = ring->fence_drv.gpu_addr; | |
4162 | ||
4163 | gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0, | |
4164 | lower_32_bits(addr), upper_32_bits(addr), | |
4165 | seq, 0xffffffff, 4); | |
4166 | } | |
4167 | ||
4168 | static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |
c633c00b | 4169 | unsigned vmid, uint64_t pd_addr) |
b1023571 | 4170 | { |
c633c00b | 4171 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
b1023571 | 4172 | |
b1023571 | 4173 | /* compute doesn't have PFP */ |
9096d6e5 | 4174 | if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) { |
b1023571 KW |
4175 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ |
4176 | amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | |
4177 | amdgpu_ring_write(ring, 0x0); | |
b1023571 KW |
4178 | } |
4179 | } | |
4180 | ||
4181 | static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring) | |
4182 | { | |
4183 | return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ | |
4184 | } | |
4185 | ||
4186 | static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring) | |
4187 | { | |
4188 | u64 wptr; | |
4189 | ||
4190 | /* XXX check if swapping is necessary on BE */ | |
4191 | if (ring->use_doorbell) | |
4192 | wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); | |
4193 | else | |
4194 | BUG(); | |
4195 | return wptr; | |
4196 | } | |
4197 | ||
761c77c1 AR |
4198 | static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring, |
4199 | bool acquire) | |
4200 | { | |
4201 | struct amdgpu_device *adev = ring->adev; | |
4202 | int pipe_num, tmp, reg; | |
4203 | int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1; | |
4204 | ||
4205 | pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe; | |
4206 | ||
4207 | /* first me only has 2 entries, GFX and HP3D */ | |
4208 | if (ring->me > 0) | |
4209 | pipe_num -= 2; | |
4210 | ||
4211 | reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num; | |
4212 | tmp = RREG32(reg); | |
4213 | tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent); | |
4214 | WREG32(reg, tmp); | |
4215 | } | |
4216 | ||
4217 | static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev, | |
4218 | struct amdgpu_ring *ring, | |
4219 | bool acquire) | |
4220 | { | |
4221 | int i, pipe; | |
4222 | bool reserve; | |
4223 | struct amdgpu_ring *iring; | |
4224 | ||
4225 | mutex_lock(&adev->gfx.pipe_reserve_mutex); | |
4226 | pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0); | |
4227 | if (acquire) | |
4228 | set_bit(pipe, adev->gfx.pipe_reserve_bitmap); | |
4229 | else | |
4230 | clear_bit(pipe, adev->gfx.pipe_reserve_bitmap); | |
4231 | ||
4232 | if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) { | |
4233 | /* Clear all reservations - everyone reacquires all resources */ | |
4234 | for (i = 0; i < adev->gfx.num_gfx_rings; ++i) | |
4235 | gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i], | |
4236 | true); | |
4237 | ||
4238 | for (i = 0; i < adev->gfx.num_compute_rings; ++i) | |
4239 | gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i], | |
4240 | true); | |
4241 | } else { | |
4242 | /* Lower all pipes without a current reservation */ | |
4243 | for (i = 0; i < adev->gfx.num_gfx_rings; ++i) { | |
4244 | iring = &adev->gfx.gfx_ring[i]; | |
4245 | pipe = amdgpu_gfx_queue_to_bit(adev, | |
4246 | iring->me, | |
4247 | iring->pipe, | |
4248 | 0); | |
4249 | reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); | |
4250 | gfx_v9_0_ring_set_pipe_percent(iring, reserve); | |
4251 | } | |
4252 | ||
4253 | for (i = 0; i < adev->gfx.num_compute_rings; ++i) { | |
4254 | iring = &adev->gfx.compute_ring[i]; | |
4255 | pipe = amdgpu_gfx_queue_to_bit(adev, | |
4256 | iring->me, | |
4257 | iring->pipe, | |
4258 | 0); | |
4259 | reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap); | |
4260 | gfx_v9_0_ring_set_pipe_percent(iring, reserve); | |
4261 | } | |
4262 | } | |
4263 | ||
4264 | mutex_unlock(&adev->gfx.pipe_reserve_mutex); | |
4265 | } | |
4266 | ||
4267 | static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev, | |
4268 | struct amdgpu_ring *ring, | |
4269 | bool acquire) | |
4270 | { | |
4271 | uint32_t pipe_priority = acquire ? 0x2 : 0x0; | |
4272 | uint32_t queue_priority = acquire ? 0xf : 0x0; | |
4273 | ||
4274 | mutex_lock(&adev->srbm_mutex); | |
4275 | soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0); | |
4276 | ||
4277 | WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority); | |
4278 | WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority); | |
4279 | ||
4280 | soc15_grbm_select(adev, 0, 0, 0, 0); | |
4281 | mutex_unlock(&adev->srbm_mutex); | |
4282 | } | |
4283 | ||
4284 | static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring, | |
4285 | enum drm_sched_priority priority) | |
4286 | { | |
4287 | struct amdgpu_device *adev = ring->adev; | |
4288 | bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW; | |
4289 | ||
4290 | if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE) | |
4291 | return; | |
4292 | ||
4293 | gfx_v9_0_hqd_set_priority(adev, ring, acquire); | |
4294 | gfx_v9_0_pipe_reserve_resources(adev, ring, acquire); | |
4295 | } | |
4296 | ||
b1023571 KW |
4297 | static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) |
4298 | { | |
4299 | struct amdgpu_device *adev = ring->adev; | |
4300 | ||
4301 | /* XXX check if swapping is necessary on BE */ | |
4302 | if (ring->use_doorbell) { | |
4303 | atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr); | |
4304 | WDOORBELL64(ring->doorbell_index, ring->wptr); | |
4305 | } else{ | |
4306 | BUG(); /* only DOORBELL method supported on gfx9 now */ | |
4307 | } | |
4308 | } | |
4309 | ||
aa6faa44 XY |
4310 | static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, |
4311 | u64 seq, unsigned int flags) | |
4312 | { | |
cd29253f SL |
4313 | struct amdgpu_device *adev = ring->adev; |
4314 | ||
aa6faa44 XY |
4315 | /* we only allocate 32bit for each seq wb address */ |
4316 | BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); | |
4317 | ||
4318 | /* write fence seq to the "addr" */ | |
4319 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
4320 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
4321 | WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); | |
4322 | amdgpu_ring_write(ring, lower_32_bits(addr)); | |
4323 | amdgpu_ring_write(ring, upper_32_bits(addr)); | |
4324 | amdgpu_ring_write(ring, lower_32_bits(seq)); | |
4325 | ||
4326 | if (flags & AMDGPU_FENCE_FLAG_INT) { | |
4327 | /* set register to trigger INT */ | |
4328 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | |
4329 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | |
4330 | WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); | |
4331 | amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS)); | |
4332 | amdgpu_ring_write(ring, 0); | |
4333 | amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ | |
4334 | } | |
4335 | } | |
4336 | ||
b1023571 KW |
4337 | static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring) |
4338 | { | |
4339 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | |
4340 | amdgpu_ring_write(ring, 0); | |
4341 | } | |
4342 | ||
cca02cd3 XY |
4343 | static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring) |
4344 | { | |
d81a2209 | 4345 | struct v9_ce_ib_state ce_payload = {0}; |
cca02cd3 XY |
4346 | uint64_t csa_addr; |
4347 | int cnt; | |
4348 | ||
4349 | cnt = (sizeof(ce_payload) >> 2) + 4 - 2; | |
6f05c4e9 | 4350 | csa_addr = amdgpu_csa_vaddr(ring->adev); |
cca02cd3 XY |
4351 | |
4352 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); | |
4353 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | | |
4354 | WRITE_DATA_DST_SEL(8) | | |
4355 | WR_CONFIRM) | | |
4356 | WRITE_DATA_CACHE_POLICY(0)); | |
4357 | amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); | |
4358 | amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload))); | |
4359 | amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2); | |
4360 | } | |
4361 | ||
4362 | static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring) | |
4363 | { | |
d81a2209 | 4364 | struct v9_de_ib_state de_payload = {0}; |
cca02cd3 XY |
4365 | uint64_t csa_addr, gds_addr; |
4366 | int cnt; | |
4367 | ||
6f05c4e9 | 4368 | csa_addr = amdgpu_csa_vaddr(ring->adev); |
cca02cd3 XY |
4369 | gds_addr = csa_addr + 4096; |
4370 | de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); | |
4371 | de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); | |
4372 | ||
4373 | cnt = (sizeof(de_payload) >> 2) + 4 - 2; | |
4374 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); | |
4375 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | | |
4376 | WRITE_DATA_DST_SEL(8) | | |
4377 | WR_CONFIRM) | | |
4378 | WRITE_DATA_CACHE_POLICY(0)); | |
4379 | amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); | |
4380 | amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload))); | |
4381 | amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2); | |
4382 | } | |
4383 | ||
2ea6ab27 ML |
4384 | static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start) |
4385 | { | |
4386 | amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0)); | |
4387 | amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */ | |
4388 | } | |
4389 | ||
b1023571 KW |
4390 | static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) |
4391 | { | |
4392 | uint32_t dw2 = 0; | |
4393 | ||
cca02cd3 XY |
4394 | if (amdgpu_sriov_vf(ring->adev)) |
4395 | gfx_v9_0_ring_emit_ce_meta(ring); | |
4396 | ||
2ea6ab27 ML |
4397 | gfx_v9_0_ring_emit_tmz(ring, true); |
4398 | ||
b1023571 KW |
4399 | dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ |
4400 | if (flags & AMDGPU_HAVE_CTX_SWITCH) { | |
4401 | /* set load_global_config & load_global_uconfig */ | |
4402 | dw2 |= 0x8001; | |
4403 | /* set load_cs_sh_regs */ | |
4404 | dw2 |= 0x01000000; | |
4405 | /* set load_per_context_state & load_gfx_sh_regs for GFX */ | |
4406 | dw2 |= 0x10002; | |
4407 | ||
4408 | /* set load_ce_ram if preamble presented */ | |
4409 | if (AMDGPU_PREAMBLE_IB_PRESENT & flags) | |
4410 | dw2 |= 0x10000000; | |
4411 | } else { | |
4412 | /* still load_ce_ram if this is the first time preamble presented | |
4413 | * although there is no context switch happens. | |
4414 | */ | |
4415 | if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags) | |
4416 | dw2 |= 0x10000000; | |
4417 | } | |
4418 | ||
4419 | amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); | |
4420 | amdgpu_ring_write(ring, dw2); | |
4421 | amdgpu_ring_write(ring, 0); | |
4422 | } | |
4423 | ||
9a5e02b5 ML |
4424 | static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring) |
4425 | { | |
4426 | unsigned ret; | |
4427 | amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3)); | |
4428 | amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr)); | |
4429 | amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr)); | |
4430 | amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */ | |
4431 | ret = ring->wptr & ring->buf_mask; | |
4432 | amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */ | |
4433 | return ret; | |
4434 | } | |
4435 | ||
4436 | static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset) | |
4437 | { | |
4438 | unsigned cur; | |
4439 | BUG_ON(offset > ring->buf_mask); | |
4440 | BUG_ON(ring->ring[offset] != 0x55aa55aa); | |
4441 | ||
4442 | cur = (ring->wptr & ring->buf_mask) - 1; | |
4443 | if (likely(cur > offset)) | |
4444 | ring->ring[offset] = cur - offset; | |
4445 | else | |
4446 | ring->ring[offset] = (ring->ring_size>>2) - offset + cur; | |
4447 | } | |
4448 | ||
aa6faa44 XY |
4449 | static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) |
4450 | { | |
4451 | struct amdgpu_device *adev = ring->adev; | |
4452 | ||
4453 | amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); | |
4454 | amdgpu_ring_write(ring, 0 | /* src: register*/ | |
4455 | (5 << 8) | /* dst: memory */ | |
4456 | (1 << 20)); /* write confirm */ | |
4457 | amdgpu_ring_write(ring, reg); | |
4458 | amdgpu_ring_write(ring, 0); | |
4459 | amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + | |
4460 | adev->virt.reg_val_offs * 4)); | |
4461 | amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + | |
4462 | adev->virt.reg_val_offs * 4)); | |
4463 | } | |
4464 | ||
4465 | static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, | |
254e825b | 4466 | uint32_t val) |
aa6faa44 | 4467 | { |
254e825b CK |
4468 | uint32_t cmd = 0; |
4469 | ||
4470 | switch (ring->funcs->type) { | |
4471 | case AMDGPU_RING_TYPE_GFX: | |
4472 | cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; | |
4473 | break; | |
4474 | case AMDGPU_RING_TYPE_KIQ: | |
4475 | cmd = (1 << 16); /* no inc addr */ | |
4476 | break; | |
4477 | default: | |
4478 | cmd = WR_CONFIRM; | |
4479 | break; | |
4480 | } | |
aa6faa44 | 4481 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
254e825b | 4482 | amdgpu_ring_write(ring, cmd); |
aa6faa44 XY |
4483 | amdgpu_ring_write(ring, reg); |
4484 | amdgpu_ring_write(ring, 0); | |
4485 | amdgpu_ring_write(ring, val); | |
4486 | } | |
4487 | ||
230fcc34 CK |
4488 | static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, |
4489 | uint32_t val, uint32_t mask) | |
4490 | { | |
4491 | gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); | |
4492 | } | |
4493 | ||
10ed3c31 AD |
4494 | static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, |
4495 | uint32_t reg0, uint32_t reg1, | |
4496 | uint32_t ref, uint32_t mask) | |
4497 | { | |
4498 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); | |
39b62541 ED |
4499 | struct amdgpu_device *adev = ring->adev; |
4500 | bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ? | |
4501 | adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait; | |
10ed3c31 | 4502 | |
39b62541 | 4503 | if (fw_version_ok) |
58cd8fbc CK |
4504 | gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, |
4505 | ref, mask, 0x20); | |
4506 | else | |
4507 | amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, | |
4508 | ref, mask); | |
10ed3c31 AD |
4509 | } |
4510 | ||
80dbea47 CK |
4511 | static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) |
4512 | { | |
4513 | struct amdgpu_device *adev = ring->adev; | |
4514 | uint32_t value = 0; | |
4515 | ||
4516 | value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); | |
4517 | value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); | |
4518 | value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); | |
4519 | value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); | |
4520 | WREG32(mmSQ_CMD, value); | |
4521 | } | |
4522 | ||
b1023571 KW |
4523 | static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, |
4524 | enum amdgpu_interrupt_state state) | |
4525 | { | |
b1023571 KW |
4526 | switch (state) { |
4527 | case AMDGPU_IRQ_STATE_DISABLE: | |
b1023571 | 4528 | case AMDGPU_IRQ_STATE_ENABLE: |
9da2c652 TSD |
4529 | WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, |
4530 | TIME_STAMP_INT_ENABLE, | |
4531 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); | |
b1023571 KW |
4532 | break; |
4533 | default: | |
4534 | break; | |
4535 | } | |
4536 | } | |
4537 | ||
4538 | static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, | |
4539 | int me, int pipe, | |
4540 | enum amdgpu_interrupt_state state) | |
4541 | { | |
4542 | u32 mec_int_cntl, mec_int_cntl_reg; | |
4543 | ||
4544 | /* | |
d0c55cdf AD |
4545 | * amdgpu controls only the first MEC. That's why this function only |
4546 | * handles the setting of interrupts for this specific MEC. All other | |
b1023571 KW |
4547 | * pipes' interrupts are set by amdkfd. |
4548 | */ | |
4549 | ||
4550 | if (me == 1) { | |
4551 | switch (pipe) { | |
4552 | case 0: | |
4553 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); | |
4554 | break; | |
d0c55cdf AD |
4555 | case 1: |
4556 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL); | |
4557 | break; | |
4558 | case 2: | |
4559 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL); | |
4560 | break; | |
4561 | case 3: | |
4562 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL); | |
4563 | break; | |
b1023571 KW |
4564 | default: |
4565 | DRM_DEBUG("invalid pipe %d\n", pipe); | |
4566 | return; | |
4567 | } | |
4568 | } else { | |
4569 | DRM_DEBUG("invalid me %d\n", me); | |
4570 | return; | |
4571 | } | |
4572 | ||
4573 | switch (state) { | |
4574 | case AMDGPU_IRQ_STATE_DISABLE: | |
4575 | mec_int_cntl = RREG32(mec_int_cntl_reg); | |
4576 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, | |
4577 | TIME_STAMP_INT_ENABLE, 0); | |
4578 | WREG32(mec_int_cntl_reg, mec_int_cntl); | |
4579 | break; | |
4580 | case AMDGPU_IRQ_STATE_ENABLE: | |
4581 | mec_int_cntl = RREG32(mec_int_cntl_reg); | |
4582 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, | |
4583 | TIME_STAMP_INT_ENABLE, 1); | |
4584 | WREG32(mec_int_cntl_reg, mec_int_cntl); | |
4585 | break; | |
4586 | default: | |
4587 | break; | |
4588 | } | |
4589 | } | |
4590 | ||
4591 | static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev, | |
4592 | struct amdgpu_irq_src *source, | |
4593 | unsigned type, | |
4594 | enum amdgpu_interrupt_state state) | |
4595 | { | |
b1023571 KW |
4596 | switch (state) { |
4597 | case AMDGPU_IRQ_STATE_DISABLE: | |
b1023571 | 4598 | case AMDGPU_IRQ_STATE_ENABLE: |
8dd553e1 TSD |
4599 | WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, |
4600 | PRIV_REG_INT_ENABLE, | |
4601 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); | |
b1023571 KW |
4602 | break; |
4603 | default: | |
4604 | break; | |
4605 | } | |
4606 | ||
4607 | return 0; | |
4608 | } | |
4609 | ||
4610 | static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev, | |
4611 | struct amdgpu_irq_src *source, | |
4612 | unsigned type, | |
4613 | enum amdgpu_interrupt_state state) | |
4614 | { | |
b1023571 KW |
4615 | switch (state) { |
4616 | case AMDGPU_IRQ_STATE_DISABLE: | |
b1023571 | 4617 | case AMDGPU_IRQ_STATE_ENABLE: |
98709ca6 TSD |
4618 | WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0, |
4619 | PRIV_INSTR_INT_ENABLE, | |
4620 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); | |
b1023571 KW |
4621 | default: |
4622 | break; | |
4623 | } | |
4624 | ||
4625 | return 0; | |
4626 | } | |
4627 | ||
4628 | static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev, | |
4629 | struct amdgpu_irq_src *src, | |
4630 | unsigned type, | |
4631 | enum amdgpu_interrupt_state state) | |
4632 | { | |
4633 | switch (type) { | |
4634 | case AMDGPU_CP_IRQ_GFX_EOP: | |
4635 | gfx_v9_0_set_gfx_eop_interrupt_state(adev, state); | |
4636 | break; | |
4637 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: | |
4638 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state); | |
4639 | break; | |
4640 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: | |
4641 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state); | |
4642 | break; | |
4643 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: | |
4644 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state); | |
4645 | break; | |
4646 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: | |
4647 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state); | |
4648 | break; | |
4649 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: | |
4650 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state); | |
4651 | break; | |
4652 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: | |
4653 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state); | |
4654 | break; | |
4655 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: | |
4656 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state); | |
4657 | break; | |
4658 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: | |
4659 | gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state); | |
4660 | break; | |
4661 | default: | |
4662 | break; | |
4663 | } | |
4664 | return 0; | |
4665 | } | |
4666 | ||
4667 | static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, | |
4668 | struct amdgpu_irq_src *source, | |
4669 | struct amdgpu_iv_entry *entry) | |
4670 | { | |
4671 | int i; | |
4672 | u8 me_id, pipe_id, queue_id; | |
4673 | struct amdgpu_ring *ring; | |
4674 | ||
4675 | DRM_DEBUG("IH: CP EOP\n"); | |
4676 | me_id = (entry->ring_id & 0x0c) >> 2; | |
4677 | pipe_id = (entry->ring_id & 0x03) >> 0; | |
4678 | queue_id = (entry->ring_id & 0x70) >> 4; | |
4679 | ||
4680 | switch (me_id) { | |
4681 | case 0: | |
4682 | amdgpu_fence_process(&adev->gfx.gfx_ring[0]); | |
4683 | break; | |
4684 | case 1: | |
4685 | case 2: | |
4686 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | |
4687 | ring = &adev->gfx.compute_ring[i]; | |
4688 | /* Per-queue interrupt is supported for MEC starting from VI. | |
4689 | * The interrupt can only be enabled/disabled per pipe instead of per queue. | |
4690 | */ | |
4691 | if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) | |
4692 | amdgpu_fence_process(ring); | |
4693 | } | |
4694 | break; | |
4695 | } | |
4696 | return 0; | |
4697 | } | |
4698 | ||
4699 | static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, | |
4700 | struct amdgpu_irq_src *source, | |
4701 | struct amdgpu_iv_entry *entry) | |
4702 | { | |
4703 | DRM_ERROR("Illegal register access in command stream\n"); | |
4704 | schedule_work(&adev->reset_work); | |
4705 | return 0; | |
4706 | } | |
4707 | ||
4708 | static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, | |
4709 | struct amdgpu_irq_src *source, | |
4710 | struct amdgpu_iv_entry *entry) | |
4711 | { | |
4712 | DRM_ERROR("Illegal instruction in command stream\n"); | |
4713 | schedule_work(&adev->reset_work); | |
4714 | return 0; | |
4715 | } | |
4716 | ||
fa04b6ba | 4717 | static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { |
b1023571 KW |
4718 | .name = "gfx_v9_0", |
4719 | .early_init = gfx_v9_0_early_init, | |
4720 | .late_init = gfx_v9_0_late_init, | |
4721 | .sw_init = gfx_v9_0_sw_init, | |
4722 | .sw_fini = gfx_v9_0_sw_fini, | |
4723 | .hw_init = gfx_v9_0_hw_init, | |
4724 | .hw_fini = gfx_v9_0_hw_fini, | |
4725 | .suspend = gfx_v9_0_suspend, | |
4726 | .resume = gfx_v9_0_resume, | |
4727 | .is_idle = gfx_v9_0_is_idle, | |
4728 | .wait_for_idle = gfx_v9_0_wait_for_idle, | |
4729 | .soft_reset = gfx_v9_0_soft_reset, | |
4730 | .set_clockgating_state = gfx_v9_0_set_clockgating_state, | |
4731 | .set_powergating_state = gfx_v9_0_set_powergating_state, | |
12ad27fa | 4732 | .get_clockgating_state = gfx_v9_0_get_clockgating_state, |
b1023571 KW |
4733 | }; |
4734 | ||
4735 | static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { | |
4736 | .type = AMDGPU_RING_TYPE_GFX, | |
4737 | .align_mask = 0xff, | |
4738 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | |
4739 | .support_64bit_ptrs = true, | |
0eeb68b3 | 4740 | .vmhub = AMDGPU_GFXHUB, |
b1023571 KW |
4741 | .get_rptr = gfx_v9_0_ring_get_rptr_gfx, |
4742 | .get_wptr = gfx_v9_0_ring_get_wptr_gfx, | |
4743 | .set_wptr = gfx_v9_0_ring_set_wptr_gfx, | |
e9d672b2 ML |
4744 | .emit_frame_size = /* totally 242 maximum if 16 IBs */ |
4745 | 5 + /* COND_EXEC */ | |
4746 | 7 + /* PIPELINE_SYNC */ | |
f732b6b3 CK |
4747 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + |
4748 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + | |
4749 | 2 + /* VM_FLUSH */ | |
e9d672b2 ML |
4750 | 8 + /* FENCE for VM_FLUSH */ |
4751 | 20 + /* GDS switch */ | |
4752 | 4 + /* double SWITCH_BUFFER, | |
4753 | the first COND_EXEC jump to the place just | |
4754 | prior to this double SWITCH_BUFFER */ | |
4755 | 5 + /* COND_EXEC */ | |
4756 | 7 + /* HDP_flush */ | |
4757 | 4 + /* VGT_flush */ | |
4758 | 14 + /* CE_META */ | |
4759 | 31 + /* DE_META */ | |
4760 | 3 + /* CNTX_CTRL */ | |
4761 | 5 + /* HDP_INVL */ | |
4762 | 8 + 8 + /* FENCE x2 */ | |
4763 | 2, /* SWITCH_BUFFER */ | |
b1023571 KW |
4764 | .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ |
4765 | .emit_ib = gfx_v9_0_ring_emit_ib_gfx, | |
4766 | .emit_fence = gfx_v9_0_ring_emit_fence, | |
4767 | .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, | |
4768 | .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, | |
4769 | .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, | |
4770 | .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, | |
b1023571 KW |
4771 | .test_ring = gfx_v9_0_ring_test_ring, |
4772 | .test_ib = gfx_v9_0_ring_test_ib, | |
4773 | .insert_nop = amdgpu_ring_insert_nop, | |
4774 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
4775 | .emit_switch_buffer = gfx_v9_ring_emit_sb, | |
4776 | .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl, | |
9a5e02b5 ML |
4777 | .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, |
4778 | .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, | |
3b4d68e9 | 4779 | .emit_tmz = gfx_v9_0_ring_emit_tmz, |
254e825b | 4780 | .emit_wreg = gfx_v9_0_ring_emit_wreg, |
230fcc34 | 4781 | .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, |
10ed3c31 | 4782 | .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, |
80dbea47 | 4783 | .soft_recovery = gfx_v9_0_ring_soft_recovery, |
b1023571 KW |
4784 | }; |
4785 | ||
4786 | static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { | |
4787 | .type = AMDGPU_RING_TYPE_COMPUTE, | |
4788 | .align_mask = 0xff, | |
4789 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | |
4790 | .support_64bit_ptrs = true, | |
0eeb68b3 | 4791 | .vmhub = AMDGPU_GFXHUB, |
b1023571 KW |
4792 | .get_rptr = gfx_v9_0_ring_get_rptr_compute, |
4793 | .get_wptr = gfx_v9_0_ring_get_wptr_compute, | |
4794 | .set_wptr = gfx_v9_0_ring_set_wptr_compute, | |
4795 | .emit_frame_size = | |
4796 | 20 + /* gfx_v9_0_ring_emit_gds_switch */ | |
4797 | 7 + /* gfx_v9_0_ring_emit_hdp_flush */ | |
2ee150cd | 4798 | 5 + /* hdp invalidate */ |
b1023571 | 4799 | 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ |
f732b6b3 CK |
4800 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + |
4801 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + | |
4802 | 2 + /* gfx_v9_0_ring_emit_vm_flush */ | |
b1023571 KW |
4803 | 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ |
4804 | .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ | |
4805 | .emit_ib = gfx_v9_0_ring_emit_ib_compute, | |
4806 | .emit_fence = gfx_v9_0_ring_emit_fence, | |
4807 | .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync, | |
4808 | .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, | |
4809 | .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, | |
4810 | .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, | |
b1023571 KW |
4811 | .test_ring = gfx_v9_0_ring_test_ring, |
4812 | .test_ib = gfx_v9_0_ring_test_ib, | |
4813 | .insert_nop = amdgpu_ring_insert_nop, | |
4814 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
761c77c1 | 4815 | .set_priority = gfx_v9_0_ring_set_priority_compute, |
254e825b | 4816 | .emit_wreg = gfx_v9_0_ring_emit_wreg, |
230fcc34 | 4817 | .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, |
10ed3c31 | 4818 | .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, |
b1023571 KW |
4819 | }; |
4820 | ||
aa6faa44 XY |
4821 | static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { |
4822 | .type = AMDGPU_RING_TYPE_KIQ, | |
4823 | .align_mask = 0xff, | |
4824 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), | |
4825 | .support_64bit_ptrs = true, | |
0eeb68b3 | 4826 | .vmhub = AMDGPU_GFXHUB, |
aa6faa44 XY |
4827 | .get_rptr = gfx_v9_0_ring_get_rptr_compute, |
4828 | .get_wptr = gfx_v9_0_ring_get_wptr_compute, | |
4829 | .set_wptr = gfx_v9_0_ring_set_wptr_compute, | |
4830 | .emit_frame_size = | |
4831 | 20 + /* gfx_v9_0_ring_emit_gds_switch */ | |
4832 | 7 + /* gfx_v9_0_ring_emit_hdp_flush */ | |
2ee150cd | 4833 | 5 + /* hdp invalidate */ |
aa6faa44 | 4834 | 7 + /* gfx_v9_0_ring_emit_pipeline_sync */ |
f732b6b3 CK |
4835 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + |
4836 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + | |
4837 | 2 + /* gfx_v9_0_ring_emit_vm_flush */ | |
aa6faa44 XY |
4838 | 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ |
4839 | .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ | |
4840 | .emit_ib = gfx_v9_0_ring_emit_ib_compute, | |
4841 | .emit_fence = gfx_v9_0_ring_emit_fence_kiq, | |
aa6faa44 XY |
4842 | .test_ring = gfx_v9_0_ring_test_ring, |
4843 | .test_ib = gfx_v9_0_ring_test_ib, | |
4844 | .insert_nop = amdgpu_ring_insert_nop, | |
4845 | .pad_ib = amdgpu_ring_generic_pad_ib, | |
4846 | .emit_rreg = gfx_v9_0_ring_emit_rreg, | |
4847 | .emit_wreg = gfx_v9_0_ring_emit_wreg, | |
230fcc34 | 4848 | .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, |
10ed3c31 | 4849 | .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, |
aa6faa44 | 4850 | }; |
b1023571 KW |
4851 | |
4852 | static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) | |
4853 | { | |
4854 | int i; | |
4855 | ||
aa6faa44 XY |
4856 | adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq; |
4857 | ||
b1023571 KW |
4858 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
4859 | adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx; | |
4860 | ||
4861 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | |
4862 | adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; | |
4863 | } | |
4864 | ||
4865 | static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = { | |
4866 | .set = gfx_v9_0_set_eop_interrupt_state, | |
4867 | .process = gfx_v9_0_eop_irq, | |
4868 | }; | |
4869 | ||
4870 | static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = { | |
4871 | .set = gfx_v9_0_set_priv_reg_fault_state, | |
4872 | .process = gfx_v9_0_priv_reg_irq, | |
4873 | }; | |
4874 | ||
4875 | static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { | |
4876 | .set = gfx_v9_0_set_priv_inst_fault_state, | |
4877 | .process = gfx_v9_0_priv_inst_irq, | |
4878 | }; | |
4879 | ||
4880 | static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) | |
4881 | { | |
4882 | adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; | |
4883 | adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs; | |
4884 | ||
4885 | adev->gfx.priv_reg_irq.num_types = 1; | |
4886 | adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs; | |
4887 | ||
4888 | adev->gfx.priv_inst_irq.num_types = 1; | |
4889 | adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; | |
4890 | } | |
4891 | ||
4892 | static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) | |
4893 | { | |
4894 | switch (adev->asic_type) { | |
4895 | case CHIP_VEGA10: | |
8b399477 | 4896 | case CHIP_VEGA12: |
61324ddc | 4897 | case CHIP_VEGA20: |
a4dc61f5 | 4898 | case CHIP_RAVEN: |
b1023571 KW |
4899 | adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; |
4900 | break; | |
4901 | default: | |
4902 | break; | |
4903 | } | |
4904 | } | |
4905 | ||
4906 | static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) | |
4907 | { | |
4908 | /* init asci gds info */ | |
8bda1013 ED |
4909 | switch (adev->asic_type) { |
4910 | case CHIP_VEGA10: | |
4911 | case CHIP_VEGA12: | |
4912 | case CHIP_VEGA20: | |
4913 | adev->gds.mem.total_size = 0x10000; | |
4914 | break; | |
4915 | case CHIP_RAVEN: | |
4916 | adev->gds.mem.total_size = 0x1000; | |
4917 | break; | |
4918 | default: | |
4919 | adev->gds.mem.total_size = 0x10000; | |
4920 | break; | |
4921 | } | |
4922 | ||
b1023571 KW |
4923 | adev->gds.gws.total_size = 64; |
4924 | adev->gds.oa.total_size = 16; | |
4925 | ||
4926 | if (adev->gds.mem.total_size == 64 * 1024) { | |
4927 | adev->gds.mem.gfx_partition_size = 4096; | |
4928 | adev->gds.mem.cs_partition_size = 4096; | |
4929 | ||
4930 | adev->gds.gws.gfx_partition_size = 4; | |
4931 | adev->gds.gws.cs_partition_size = 4; | |
4932 | ||
4933 | adev->gds.oa.gfx_partition_size = 4; | |
4934 | adev->gds.oa.cs_partition_size = 1; | |
4935 | } else { | |
4936 | adev->gds.mem.gfx_partition_size = 1024; | |
4937 | adev->gds.mem.cs_partition_size = 1024; | |
4938 | ||
4939 | adev->gds.gws.gfx_partition_size = 16; | |
4940 | adev->gds.gws.cs_partition_size = 16; | |
4941 | ||
4942 | adev->gds.oa.gfx_partition_size = 4; | |
4943 | adev->gds.oa.cs_partition_size = 4; | |
4944 | } | |
4945 | } | |
4946 | ||
c94d38f0 NH |
4947 | static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, |
4948 | u32 bitmap) | |
4949 | { | |
4950 | u32 data; | |
4951 | ||
4952 | if (!bitmap) | |
4953 | return; | |
4954 | ||
4955 | data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; | |
4956 | data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; | |
4957 | ||
4958 | WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data); | |
4959 | } | |
4960 | ||
b1023571 KW |
4961 | static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev) |
4962 | { | |
4963 | u32 data, mask; | |
4964 | ||
5e78835a TSD |
4965 | data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG); |
4966 | data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG); | |
b1023571 KW |
4967 | |
4968 | data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; | |
4969 | data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; | |
4970 | ||
378506a7 | 4971 | mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh); |
b1023571 KW |
4972 | |
4973 | return (~data) & mask; | |
4974 | } | |
4975 | ||
4976 | static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, | |
4977 | struct amdgpu_cu_info *cu_info) | |
4978 | { | |
4979 | int i, j, k, counter, active_cu_number = 0; | |
4980 | u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; | |
c94d38f0 | 4981 | unsigned disable_masks[4 * 2]; |
b1023571 KW |
4982 | |
4983 | if (!adev || !cu_info) | |
4984 | return -EINVAL; | |
4985 | ||
c94d38f0 NH |
4986 | amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); |
4987 | ||
b1023571 KW |
4988 | mutex_lock(&adev->grbm_idx_mutex); |
4989 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | |
4990 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | |
4991 | mask = 1; | |
4992 | ao_bitmap = 0; | |
4993 | counter = 0; | |
4994 | gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); | |
c94d38f0 NH |
4995 | if (i < 4 && j < 2) |
4996 | gfx_v9_0_set_user_cu_inactive_bitmap( | |
4997 | adev, disable_masks[i * 2 + j]); | |
b1023571 KW |
4998 | bitmap = gfx_v9_0_get_cu_active_bitmap(adev); |
4999 | cu_info->bitmap[i][j] = bitmap; | |
5000 | ||
fe723cd3 | 5001 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { |
b1023571 | 5002 | if (bitmap & mask) { |
fe723cd3 | 5003 | if (counter < adev->gfx.config.max_cu_per_sh) |
b1023571 KW |
5004 | ao_bitmap |= mask; |
5005 | counter ++; | |
5006 | } | |
5007 | mask <<= 1; | |
5008 | } | |
5009 | active_cu_number += counter; | |
dbfe85ea FC |
5010 | if (i < 2 && j < 2) |
5011 | ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); | |
5012 | cu_info->ao_cu_bitmap[i][j] = ao_bitmap; | |
b1023571 KW |
5013 | } |
5014 | } | |
5015 | gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); | |
5016 | mutex_unlock(&adev->grbm_idx_mutex); | |
5017 | ||
5018 | cu_info->number = active_cu_number; | |
5019 | cu_info->ao_cu_mask = ao_cu_mask; | |
d5a114a6 | 5020 | cu_info->simd_per_cu = NUM_SIMD_PER_CU; |
b1023571 KW |
5021 | |
5022 | return 0; | |
5023 | } | |
5024 | ||
b1023571 KW |
5025 | const struct amdgpu_ip_block_version gfx_v9_0_ip_block = |
5026 | { | |
5027 | .type = AMD_IP_BLOCK_TYPE_GFX, | |
5028 | .major = 9, | |
5029 | .minor = 0, | |
5030 | .rev = 0, | |
5031 | .funcs = &gfx_v9_0_ip_funcs, | |
5032 | }; |