]>
Commit | Line | Data |
---|---|---|
599a7e9f RZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
7bd55429 | 23 | #include "pp_debug.h" |
f90dee20 MY |
24 | #include <linux/delay.h> |
25 | #include <linux/fb.h> | |
599a7e9f RZ |
26 | #include <linux/module.h> |
27 | #include <linux/slab.h> | |
599a7e9f | 28 | #include <asm/div64.h> |
86457c3b | 29 | #include <drm/amdgpu_drm.h> |
599a7e9f RZ |
30 | #include "ppatomctrl.h" |
31 | #include "atombios.h" | |
32 | #include "pptable_v1_0.h" | |
33 | #include "pppcielanes.h" | |
34 | #include "amd_pcie_helpers.h" | |
35 | #include "hardwaremanager.h" | |
36 | #include "process_pptables_v1_0.h" | |
37 | #include "cgs_common.h" | |
38 | ||
39 | #include "smu7_common.h" | |
40 | ||
41 | #include "hwmgr.h" | |
42 | #include "smu7_hwmgr.h" | |
e81f7494 | 43 | #include "smu_ucode_xfer_vi.h" |
599a7e9f RZ |
44 | #include "smu7_powertune.h" |
45 | #include "smu7_dyn_defaults.h" | |
46 | #include "smu7_thermal.h" | |
47 | #include "smu7_clockpowergating.h" | |
48 | #include "processpptables.h" | |
4ba08257 | 49 | #include "pp_thermal.h" |
599a7e9f | 50 | |
091aec0b AG |
51 | #include "ivsrcid/ivsrcid_vislands30.h" |
52 | ||
599a7e9f RZ |
53 | #define MC_CG_ARB_FREQ_F0 0x0a |
54 | #define MC_CG_ARB_FREQ_F1 0x0b | |
55 | #define MC_CG_ARB_FREQ_F2 0x0c | |
56 | #define MC_CG_ARB_FREQ_F3 0x0d | |
57 | ||
58 | #define MC_CG_SEQ_DRAMCONF_S0 0x05 | |
59 | #define MC_CG_SEQ_DRAMCONF_S1 0x06 | |
60 | #define MC_CG_SEQ_YCLK_SUSPEND 0x04 | |
61 | #define MC_CG_SEQ_YCLK_RESUME 0x0a | |
62 | ||
63 | #define SMC_CG_IND_START 0xc0030000 | |
64 | #define SMC_CG_IND_END 0xc0040000 | |
65 | ||
599a7e9f RZ |
66 | #define MEM_FREQ_LOW_LATENCY 25000 |
67 | #define MEM_FREQ_HIGH_LATENCY 80000 | |
68 | ||
69 | #define MEM_LATENCY_HIGH 45 | |
70 | #define MEM_LATENCY_LOW 35 | |
71 | #define MEM_LATENCY_ERR 0xFFFF | |
72 | ||
73 | #define MC_SEQ_MISC0_GDDR5_SHIFT 28 | |
74 | #define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 | |
75 | #define MC_SEQ_MISC0_GDDR5_VALUE 5 | |
76 | ||
77 | #define PCIE_BUS_CLK 10000 | |
78 | #define TCLK (PCIE_BUS_CLK / 10) | |
79 | ||
4aa8c41b | 80 | static const struct profile_mode_setting smu7_profiling[6] = |
5d24af84 RZ |
81 | {{1, 0, 100, 30, 1, 0, 100, 10}, |
82 | {1, 10, 0, 30, 0, 0, 0, 0}, | |
83 | {0, 0, 0, 0, 1, 10, 16, 31}, | |
84 | {1, 0, 11, 50, 1, 0, 100, 10}, | |
85 | {1, 0, 5, 30, 0, 0, 0, 0}, | |
4aa8c41b | 86 | {0, 0, 0, 0, 0, 0, 0, 0}, |
5d24af84 | 87 | }; |
599a7e9f | 88 | |
0c24e7ef EH |
89 | #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) |
90 | ||
91 | #define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 | |
92 | #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L | |
93 | #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L | |
94 | #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 | |
95 | #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 | |
96 | ||
599a7e9f RZ |
97 | /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ |
98 | enum DPM_EVENT_SRC { | |
99 | DPM_EVENT_SRC_ANALOG = 0, | |
100 | DPM_EVENT_SRC_EXTERNAL = 1, | |
101 | DPM_EVENT_SRC_DIGITAL = 2, | |
102 | DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | |
103 | DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 | |
104 | }; | |
105 | ||
106 | static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic); | |
570272d2 RZ |
107 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, |
108 | enum pp_clock_type type, uint32_t mask); | |
599a7e9f | 109 | |
f8a4c11b | 110 | static struct smu7_power_state *cast_phw_smu7_power_state( |
599a7e9f RZ |
111 | struct pp_hw_power_state *hw_ps) |
112 | { | |
113 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | |
114 | "Invalid Powerstate Type!", | |
115 | return NULL); | |
116 | ||
117 | return (struct smu7_power_state *)hw_ps; | |
118 | } | |
119 | ||
f8a4c11b | 120 | static const struct smu7_power_state *cast_const_phw_smu7_power_state( |
599a7e9f RZ |
121 | const struct pp_hw_power_state *hw_ps) |
122 | { | |
123 | PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic), | |
124 | "Invalid Powerstate Type!", | |
125 | return NULL); | |
126 | ||
127 | return (const struct smu7_power_state *)hw_ps; | |
128 | } | |
129 | ||
130 | /** | |
131 | * Find the MC microcode version and store it in the HwMgr struct | |
132 | * | |
133 | * @param hwmgr the address of the powerplay hardware manager. | |
134 | * @return always 0 | |
135 | */ | |
f8a4c11b | 136 | static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
137 | { |
138 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F); | |
139 | ||
140 | hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); | |
141 | ||
142 | return 0; | |
143 | } | |
144 | ||
f8a4c11b | 145 | static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
146 | { |
147 | uint32_t speedCntl = 0; | |
148 | ||
149 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ | |
150 | speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE, | |
151 | ixPCIE_LC_SPEED_CNTL); | |
152 | return((uint16_t)PHM_GET_FIELD(speedCntl, | |
153 | PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE)); | |
154 | } | |
155 | ||
f8a4c11b | 156 | static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
157 | { |
158 | uint32_t link_width; | |
159 | ||
160 | /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */ | |
161 | link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, | |
162 | PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD); | |
163 | ||
164 | PP_ASSERT_WITH_CODE((7 >= link_width), | |
165 | "Invalid PCIe lane width!", return 0); | |
166 | ||
167 | return decode_pcie_lane_width(link_width); | |
168 | } | |
169 | ||
170 | /** | |
171 | * Enable voltage control | |
172 | * | |
173 | * @param pHwMgr the address of the powerplay hardware manager. | |
174 | * @return always PP_Result_OK | |
175 | */ | |
f8a4c11b | 176 | static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) |
599a7e9f | 177 | { |
0c24e7ef EH |
178 | if (hwmgr->chip_id == CHIP_VEGAM) { |
179 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, | |
180 | CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); | |
181 | PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, | |
182 | CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); | |
183 | } | |
184 | ||
599a7e9f | 185 | if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) |
d3f8c0ab | 186 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); |
599a7e9f RZ |
187 | |
188 | return 0; | |
189 | } | |
190 | ||
191 | /** | |
192 | * Checks if we want to support voltage control | |
193 | * | |
194 | * @param hwmgr the address of the powerplay hardware manager. | |
195 | */ | |
196 | static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr) | |
197 | { | |
198 | const struct smu7_hwmgr *data = | |
199 | (const struct smu7_hwmgr *)(hwmgr->backend); | |
200 | ||
201 | return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control); | |
202 | } | |
203 | ||
204 | /** | |
205 | * Enable voltage control | |
206 | * | |
207 | * @param hwmgr the address of the powerplay hardware manager. | |
208 | * @return always 0 | |
209 | */ | |
210 | static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr) | |
211 | { | |
212 | /* enable voltage control */ | |
213 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
214 | GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1); | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table, | |
220 | struct phm_clock_voltage_dependency_table *voltage_dependency_table | |
221 | ) | |
222 | { | |
223 | uint32_t i; | |
224 | ||
225 | PP_ASSERT_WITH_CODE((NULL != voltage_table), | |
226 | "Voltage Dependency Table empty.", return -EINVAL;); | |
227 | ||
228 | voltage_table->mask_low = 0; | |
229 | voltage_table->phase_delay = 0; | |
230 | voltage_table->count = voltage_dependency_table->count; | |
231 | ||
232 | for (i = 0; i < voltage_dependency_table->count; i++) { | |
233 | voltage_table->entries[i].value = | |
234 | voltage_dependency_table->entries[i].v; | |
235 | voltage_table->entries[i].smio_low = 0; | |
236 | } | |
237 | ||
238 | return 0; | |
239 | } | |
240 | ||
241 | ||
242 | /** | |
243 | * Create Voltage Tables. | |
244 | * | |
245 | * @param hwmgr the address of the powerplay hardware manager. | |
246 | * @return always 0 | |
247 | */ | |
248 | static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr) | |
249 | { | |
250 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
251 | struct phm_ppt_v1_information *table_info = | |
252 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
253 | int result = 0; | |
254 | uint32_t tmp; | |
255 | ||
256 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { | |
257 | result = atomctrl_get_voltage_table_v3(hwmgr, | |
258 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, | |
259 | &(data->mvdd_voltage_table)); | |
260 | PP_ASSERT_WITH_CODE((0 == result), | |
261 | "Failed to retrieve MVDD table.", | |
262 | return result); | |
263 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { | |
264 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
265 | result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table), | |
266 | table_info->vdd_dep_on_mclk); | |
267 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
268 | result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table), | |
269 | hwmgr->dyn_state.mvdd_dependency_on_mclk); | |
270 | ||
271 | PP_ASSERT_WITH_CODE((0 == result), | |
272 | "Failed to retrieve SVI2 MVDD table from dependancy table.", | |
273 | return result;); | |
274 | } | |
275 | ||
276 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { | |
277 | result = atomctrl_get_voltage_table_v3(hwmgr, | |
278 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, | |
279 | &(data->vddci_voltage_table)); | |
280 | PP_ASSERT_WITH_CODE((0 == result), | |
281 | "Failed to retrieve VDDCI table.", | |
282 | return result); | |
283 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { | |
284 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
285 | result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table), | |
286 | table_info->vdd_dep_on_mclk); | |
287 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
288 | result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table), | |
289 | hwmgr->dyn_state.vddci_dependency_on_mclk); | |
290 | PP_ASSERT_WITH_CODE((0 == result), | |
291 | "Failed to retrieve SVI2 VDDCI table from dependancy table.", | |
292 | return result); | |
293 | } | |
294 | ||
295 | if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) { | |
296 | /* VDDGFX has only SVI2 voltage control */ | |
297 | result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table), | |
298 | table_info->vddgfx_lookup_table); | |
299 | PP_ASSERT_WITH_CODE((0 == result), | |
300 | "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;); | |
301 | } | |
302 | ||
303 | ||
304 | if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) { | |
305 | result = atomctrl_get_voltage_table_v3(hwmgr, | |
306 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT, | |
307 | &data->vddc_voltage_table); | |
308 | PP_ASSERT_WITH_CODE((0 == result), | |
309 | "Failed to retrieve VDDC table.", return result;); | |
310 | } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { | |
311 | ||
312 | if (hwmgr->pp_table_version == PP_TABLE_V0) | |
313 | result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table, | |
314 | hwmgr->dyn_state.vddc_dependency_on_mclk); | |
315 | else if (hwmgr->pp_table_version == PP_TABLE_V1) | |
316 | result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table), | |
317 | table_info->vddc_lookup_table); | |
318 | ||
319 | PP_ASSERT_WITH_CODE((0 == result), | |
320 | "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;); | |
321 | } | |
322 | ||
d3f8c0ab | 323 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC); |
599a7e9f RZ |
324 | PP_ASSERT_WITH_CODE( |
325 | (data->vddc_voltage_table.count <= tmp), | |
326 | "Too many voltage values for VDDC. Trimming to fit state table.", | |
327 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
328 | &(data->vddc_voltage_table))); | |
329 | ||
d3f8c0ab | 330 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); |
599a7e9f RZ |
331 | PP_ASSERT_WITH_CODE( |
332 | (data->vddgfx_voltage_table.count <= tmp), | |
333 | "Too many voltage values for VDDC. Trimming to fit state table.", | |
334 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
335 | &(data->vddgfx_voltage_table))); | |
336 | ||
d3f8c0ab | 337 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI); |
599a7e9f RZ |
338 | PP_ASSERT_WITH_CODE( |
339 | (data->vddci_voltage_table.count <= tmp), | |
340 | "Too many voltage values for VDDCI. Trimming to fit state table.", | |
341 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
342 | &(data->vddci_voltage_table))); | |
343 | ||
d3f8c0ab | 344 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD); |
599a7e9f RZ |
345 | PP_ASSERT_WITH_CODE( |
346 | (data->mvdd_voltage_table.count <= tmp), | |
347 | "Too many voltage values for MVDD. Trimming to fit state table.", | |
348 | phm_trim_voltage_table_to_fit_state_table(tmp, | |
349 | &(data->mvdd_voltage_table))); | |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | /** | |
355 | * Programs static screed detection parameters | |
356 | * | |
357 | * @param hwmgr the address of the powerplay hardware manager. | |
358 | * @return always 0 | |
359 | */ | |
360 | static int smu7_program_static_screen_threshold_parameters( | |
361 | struct pp_hwmgr *hwmgr) | |
362 | { | |
363 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
364 | ||
365 | /* Set static screen threshold unit */ | |
366 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
367 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT, | |
368 | data->static_screen_threshold_unit); | |
369 | /* Set static screen threshold */ | |
370 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
371 | CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD, | |
372 | data->static_screen_threshold); | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
377 | /** | |
378 | * Setup display gap for glitch free memory clock switching. | |
379 | * | |
380 | * @param hwmgr the address of the powerplay hardware manager. | |
381 | * @return always 0 | |
382 | */ | |
383 | static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr) | |
384 | { | |
385 | uint32_t display_gap = | |
386 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
387 | ixCG_DISPLAY_GAP_CNTL); | |
388 | ||
389 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, | |
390 | DISP_GAP, DISPLAY_GAP_IGNORE); | |
391 | ||
392 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, | |
393 | DISP_GAP_MCHG, DISPLAY_GAP_VBLANK); | |
394 | ||
395 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
396 | ixCG_DISPLAY_GAP_CNTL, display_gap); | |
397 | ||
398 | return 0; | |
399 | } | |
400 | ||
401 | /** | |
402 | * Programs activity state transition voting clients | |
403 | * | |
404 | * @param hwmgr the address of the powerplay hardware manager. | |
405 | * @return always 0 | |
406 | */ | |
407 | static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr) | |
408 | { | |
409 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
0596df6b | 410 | int i; |
599a7e9f RZ |
411 | |
412 | /* Clear reset for voting clients before enabling DPM */ | |
413 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
414 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0); | |
415 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
416 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0); | |
417 | ||
0596df6b RZ |
418 | for (i = 0; i < 8; i++) |
419 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
420 | ixCG_FREQ_TRAN_VOTING_0 + i * 4, | |
421 | data->voting_rights_clients[i]); | |
599a7e9f RZ |
422 | return 0; |
423 | } | |
424 | ||
425 | static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr) | |
426 | { | |
0596df6b RZ |
427 | int i; |
428 | ||
599a7e9f RZ |
429 | /* Reset voting clients before disabling DPM */ |
430 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
431 | SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); | |
432 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
433 | SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); | |
434 | ||
0596df6b RZ |
435 | for (i = 0; i < 8; i++) |
436 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
437 | ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0); | |
599a7e9f RZ |
438 | |
439 | return 0; | |
440 | } | |
441 | ||
442 | /* Copy one arb setting to another and then switch the active set. | |
443 | * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants. | |
444 | */ | |
445 | static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, | |
446 | uint32_t arb_src, uint32_t arb_dest) | |
447 | { | |
448 | uint32_t mc_arb_dram_timing; | |
449 | uint32_t mc_arb_dram_timing2; | |
450 | uint32_t burst_time; | |
451 | uint32_t mc_cg_config; | |
452 | ||
453 | switch (arb_src) { | |
454 | case MC_CG_ARB_FREQ_F0: | |
455 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); | |
456 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); | |
457 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0); | |
458 | break; | |
459 | case MC_CG_ARB_FREQ_F1: | |
460 | mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1); | |
461 | mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1); | |
462 | burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1); | |
463 | break; | |
464 | default: | |
465 | return -EINVAL; | |
466 | } | |
467 | ||
468 | switch (arb_dest) { | |
469 | case MC_CG_ARB_FREQ_F0: | |
470 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing); | |
471 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); | |
472 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time); | |
473 | break; | |
474 | case MC_CG_ARB_FREQ_F1: | |
475 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); | |
476 | cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); | |
477 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time); | |
478 | break; | |
479 | default: | |
480 | return -EINVAL; | |
481 | } | |
482 | ||
483 | mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG); | |
484 | mc_cg_config |= 0x0000000F; | |
485 | cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config); | |
486 | PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest); | |
487 | ||
488 | return 0; | |
489 | } | |
490 | ||
491 | static int smu7_reset_to_default(struct pp_hwmgr *hwmgr) | |
492 | { | |
d3f8c0ab | 493 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults); |
599a7e9f RZ |
494 | } |
495 | ||
496 | /** | |
497 | * Initial switch from ARB F0->F1 | |
498 | * | |
499 | * @param hwmgr the address of the powerplay hardware manager. | |
500 | * @return always 0 | |
501 | * This function is to be called from the SetPowerState table. | |
502 | */ | |
503 | static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) | |
504 | { | |
505 | return smu7_copy_and_switch_arb_sets(hwmgr, | |
506 | MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); | |
507 | } | |
508 | ||
509 | static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) | |
510 | { | |
511 | uint32_t tmp; | |
512 | ||
513 | tmp = (cgs_read_ind_register(hwmgr->device, | |
514 | CGS_IND_REG__SMC, ixSMC_SCRATCH9) & | |
515 | 0x0000ff00) >> 8; | |
516 | ||
517 | if (tmp == MC_CG_ARB_FREQ_F0) | |
518 | return 0; | |
519 | ||
520 | return smu7_copy_and_switch_arb_sets(hwmgr, | |
521 | tmp, MC_CG_ARB_FREQ_F0); | |
522 | } | |
523 | ||
524 | static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr) | |
525 | { | |
526 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
527 | ||
528 | struct phm_ppt_v1_information *table_info = | |
529 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
530 | struct phm_ppt_v1_pcie_table *pcie_table = NULL; | |
531 | ||
532 | uint32_t i, max_entry; | |
533 | uint32_t tmp; | |
534 | ||
535 | PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels || | |
536 | data->use_pcie_power_saving_levels), "No pcie performance levels!", | |
537 | return -EINVAL); | |
538 | ||
539 | if (table_info != NULL) | |
540 | pcie_table = table_info->pcie_table; | |
541 | ||
542 | if (data->use_pcie_performance_levels && | |
543 | !data->use_pcie_power_saving_levels) { | |
544 | data->pcie_gen_power_saving = data->pcie_gen_performance; | |
545 | data->pcie_lane_power_saving = data->pcie_lane_performance; | |
546 | } else if (!data->use_pcie_performance_levels && | |
547 | data->use_pcie_power_saving_levels) { | |
548 | data->pcie_gen_performance = data->pcie_gen_power_saving; | |
549 | data->pcie_lane_performance = data->pcie_lane_power_saving; | |
550 | } | |
d3f8c0ab | 551 | tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK); |
599a7e9f RZ |
552 | phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table, |
553 | tmp, | |
554 | MAX_REGULAR_DPM_NUMBER); | |
555 | ||
556 | if (pcie_table != NULL) { | |
557 | /* max_entry is used to make sure we reserve one PCIE level | |
558 | * for boot level (fix for A+A PSPP issue). | |
559 | * If PCIE table from PPTable have ULV entry + 8 entries, | |
560 | * then ignore the last entry.*/ | |
561 | max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count; | |
562 | for (i = 1; i < max_entry; i++) { | |
563 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1, | |
564 | get_pcie_gen_support(data->pcie_gen_cap, | |
565 | pcie_table->entries[i].gen_speed), | |
566 | get_pcie_lane_support(data->pcie_lane_cap, | |
567 | pcie_table->entries[i].lane_width)); | |
568 | } | |
569 | data->dpm_table.pcie_speed_table.count = max_entry - 1; | |
570 | smum_update_smc_table(hwmgr, SMU_BIF_TABLE); | |
571 | } else { | |
572 | /* Hardcode Pcie Table */ | |
573 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0, | |
574 | get_pcie_gen_support(data->pcie_gen_cap, | |
575 | PP_Min_PCIEGen), | |
576 | get_pcie_lane_support(data->pcie_lane_cap, | |
577 | PP_Max_PCIELane)); | |
578 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1, | |
579 | get_pcie_gen_support(data->pcie_gen_cap, | |
580 | PP_Min_PCIEGen), | |
581 | get_pcie_lane_support(data->pcie_lane_cap, | |
582 | PP_Max_PCIELane)); | |
583 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2, | |
584 | get_pcie_gen_support(data->pcie_gen_cap, | |
585 | PP_Max_PCIEGen), | |
586 | get_pcie_lane_support(data->pcie_lane_cap, | |
587 | PP_Max_PCIELane)); | |
588 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3, | |
589 | get_pcie_gen_support(data->pcie_gen_cap, | |
590 | PP_Max_PCIEGen), | |
591 | get_pcie_lane_support(data->pcie_lane_cap, | |
592 | PP_Max_PCIELane)); | |
593 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4, | |
594 | get_pcie_gen_support(data->pcie_gen_cap, | |
595 | PP_Max_PCIEGen), | |
596 | get_pcie_lane_support(data->pcie_lane_cap, | |
597 | PP_Max_PCIELane)); | |
598 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5, | |
599 | get_pcie_gen_support(data->pcie_gen_cap, | |
600 | PP_Max_PCIEGen), | |
601 | get_pcie_lane_support(data->pcie_lane_cap, | |
602 | PP_Max_PCIELane)); | |
603 | ||
604 | data->dpm_table.pcie_speed_table.count = 6; | |
605 | } | |
606 | /* Populate last level for boot PCIE level, but do not increment count. */ | |
86457c3b RZ |
607 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
608 | for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++) | |
609 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i, | |
610 | get_pcie_gen_support(data->pcie_gen_cap, | |
611 | PP_Max_PCIEGen), | |
612 | data->vbios_boot_state.pcie_lane_bootup_value); | |
613 | } else { | |
614 | phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, | |
599a7e9f RZ |
615 | data->dpm_table.pcie_speed_table.count, |
616 | get_pcie_gen_support(data->pcie_gen_cap, | |
617 | PP_Min_PCIEGen), | |
618 | get_pcie_lane_support(data->pcie_lane_cap, | |
619 | PP_Max_PCIELane)); | |
86457c3b | 620 | } |
599a7e9f RZ |
621 | return 0; |
622 | } | |
623 | ||
624 | static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr) | |
625 | { | |
626 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
627 | ||
628 | memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table)); | |
629 | ||
630 | phm_reset_single_dpm_table( | |
631 | &data->dpm_table.sclk_table, | |
d3f8c0ab | 632 | smum_get_mac_definition(hwmgr, |
599a7e9f RZ |
633 | SMU_MAX_LEVELS_GRAPHICS), |
634 | MAX_REGULAR_DPM_NUMBER); | |
635 | phm_reset_single_dpm_table( | |
636 | &data->dpm_table.mclk_table, | |
d3f8c0ab | 637 | smum_get_mac_definition(hwmgr, |
599a7e9f RZ |
638 | SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER); |
639 | ||
640 | phm_reset_single_dpm_table( | |
641 | &data->dpm_table.vddc_table, | |
d3f8c0ab | 642 | smum_get_mac_definition(hwmgr, |
599a7e9f RZ |
643 | SMU_MAX_LEVELS_VDDC), |
644 | MAX_REGULAR_DPM_NUMBER); | |
645 | phm_reset_single_dpm_table( | |
646 | &data->dpm_table.vddci_table, | |
d3f8c0ab | 647 | smum_get_mac_definition(hwmgr, |
599a7e9f RZ |
648 | SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER); |
649 | ||
650 | phm_reset_single_dpm_table( | |
651 | &data->dpm_table.mvdd_table, | |
d3f8c0ab | 652 | smum_get_mac_definition(hwmgr, |
599a7e9f RZ |
653 | SMU_MAX_LEVELS_MVDD), |
654 | MAX_REGULAR_DPM_NUMBER); | |
655 | return 0; | |
656 | } | |
657 | /* | |
658 | * This function is to initialize all DPM state tables | |
659 | * for SMU7 based on the dependency table. | |
660 | * Dynamic state patching function will then trim these | |
661 | * state tables to the allowed range based | |
662 | * on the power policy or external client requests, | |
663 | * such as UVD request, etc. | |
664 | */ | |
665 | ||
666 | static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr) | |
667 | { | |
668 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
669 | struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table = | |
670 | hwmgr->dyn_state.vddc_dependency_on_sclk; | |
671 | struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table = | |
672 | hwmgr->dyn_state.vddc_dependency_on_mclk; | |
673 | struct phm_cac_leakage_table *std_voltage_table = | |
674 | hwmgr->dyn_state.cac_leakage_table; | |
675 | uint32_t i; | |
676 | ||
677 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, | |
678 | "SCLK dependency table is missing. This table is mandatory", return -EINVAL); | |
679 | PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1, | |
680 | "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); | |
681 | ||
682 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, | |
683 | "MCLK dependency table is missing. This table is mandatory", return -EINVAL); | |
684 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1, | |
685 | "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL); | |
686 | ||
687 | ||
688 | /* Initialize Sclk DPM table based on allow Sclk values*/ | |
689 | data->dpm_table.sclk_table.count = 0; | |
690 | ||
691 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | |
692 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != | |
693 | allowed_vdd_sclk_table->entries[i].clk) { | |
694 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = | |
695 | allowed_vdd_sclk_table->entries[i].clk; | |
86457c3b | 696 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; |
599a7e9f RZ |
697 | data->dpm_table.sclk_table.count++; |
698 | } | |
699 | } | |
700 | ||
701 | PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL, | |
702 | "MCLK dependency table is missing. This table is mandatory", return -EINVAL); | |
703 | /* Initialize Mclk DPM table based on allow Mclk values */ | |
704 | data->dpm_table.mclk_table.count = 0; | |
705 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | |
706 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value != | |
707 | allowed_vdd_mclk_table->entries[i].clk) { | |
708 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = | |
709 | allowed_vdd_mclk_table->entries[i].clk; | |
86457c3b | 710 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0; |
599a7e9f RZ |
711 | data->dpm_table.mclk_table.count++; |
712 | } | |
713 | } | |
714 | ||
715 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ | |
716 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | |
717 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; | |
718 | data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; | |
719 | /* param1 is for corresponding std voltage */ | |
720 | data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; | |
721 | } | |
722 | ||
723 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; | |
724 | allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk; | |
725 | ||
726 | if (NULL != allowed_vdd_mclk_table) { | |
727 | /* Initialize Vddci DPM table based on allow Mclk values */ | |
728 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | |
729 | data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; | |
730 | data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; | |
731 | } | |
732 | data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; | |
733 | } | |
734 | ||
735 | allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk; | |
736 | ||
737 | if (NULL != allowed_vdd_mclk_table) { | |
738 | /* | |
739 | * Initialize MVDD DPM table based on allow Mclk | |
740 | * values | |
741 | */ | |
742 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | |
743 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; | |
744 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; | |
745 | } | |
746 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; | |
747 | } | |
748 | ||
749 | return 0; | |
750 | } | |
751 | ||
752 | static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) | |
753 | { | |
754 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
755 | struct phm_ppt_v1_information *table_info = | |
756 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
757 | uint32_t i; | |
758 | ||
759 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; | |
760 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
761 | ||
762 | if (table_info == NULL) | |
763 | return -EINVAL; | |
764 | ||
765 | dep_sclk_table = table_info->vdd_dep_on_sclk; | |
766 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
767 | ||
768 | PP_ASSERT_WITH_CODE(dep_sclk_table != NULL, | |
769 | "SCLK dependency table is missing.", | |
770 | return -EINVAL); | |
771 | PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1, | |
772 | "SCLK dependency table count is 0.", | |
773 | return -EINVAL); | |
774 | ||
775 | PP_ASSERT_WITH_CODE(dep_mclk_table != NULL, | |
776 | "MCLK dependency table is missing.", | |
777 | return -EINVAL); | |
778 | PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, | |
779 | "MCLK dependency table count is 0", | |
780 | return -EINVAL); | |
781 | ||
782 | /* Initialize Sclk DPM table based on allow Sclk values */ | |
783 | data->dpm_table.sclk_table.count = 0; | |
784 | for (i = 0; i < dep_sclk_table->count; i++) { | |
785 | if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value != | |
786 | dep_sclk_table->entries[i].clk) { | |
787 | ||
788 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = | |
789 | dep_sclk_table->entries[i].clk; | |
790 | ||
791 | data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = | |
792 | (i == 0) ? true : false; | |
793 | data->dpm_table.sclk_table.count++; | |
794 | } | |
795 | } | |
46defdd6 RZ |
796 | if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) |
797 | hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; | |
599a7e9f RZ |
798 | /* Initialize Mclk DPM table based on allow Mclk values */ |
799 | data->dpm_table.mclk_table.count = 0; | |
800 | for (i = 0; i < dep_mclk_table->count; i++) { | |
801 | if (i == 0 || data->dpm_table.mclk_table.dpm_levels | |
802 | [data->dpm_table.mclk_table.count - 1].value != | |
803 | dep_mclk_table->entries[i].clk) { | |
804 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = | |
805 | dep_mclk_table->entries[i].clk; | |
806 | data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = | |
807 | (i == 0) ? true : false; | |
808 | data->dpm_table.mclk_table.count++; | |
809 | } | |
810 | } | |
811 | ||
46defdd6 RZ |
812 | if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) |
813 | hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; | |
599a7e9f RZ |
814 | return 0; |
815 | } | |
816 | ||
5d97cf39 RZ |
817 | static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) |
818 | { | |
819 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
820 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); | |
821 | struct phm_ppt_v1_information *table_info = | |
822 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
823 | uint32_t i; | |
824 | ||
825 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; | |
826 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
5b293355 | 827 | struct phm_odn_performance_level *entries; |
5d97cf39 RZ |
828 | |
829 | if (table_info == NULL) | |
830 | return -EINVAL; | |
831 | ||
832 | dep_sclk_table = table_info->vdd_dep_on_sclk; | |
833 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
834 | ||
835 | odn_table->odn_core_clock_dpm_levels.num_of_pl = | |
836 | data->golden_dpm_table.sclk_table.count; | |
5b293355 | 837 | entries = odn_table->odn_core_clock_dpm_levels.entries; |
5d97cf39 | 838 | for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) { |
5b293355 CIK |
839 | entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value; |
840 | entries[i].enabled = true; | |
841 | entries[i].vddc = dep_sclk_table->entries[i].vddc; | |
5d97cf39 RZ |
842 | } |
843 | ||
63c2f7ed | 844 | smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, |
5d97cf39 RZ |
845 | (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); |
846 | ||
847 | odn_table->odn_memory_clock_dpm_levels.num_of_pl = | |
848 | data->golden_dpm_table.mclk_table.count; | |
5b293355 CIK |
849 | entries = odn_table->odn_memory_clock_dpm_levels.entries; |
850 | for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) { | |
851 | entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value; | |
852 | entries[i].enabled = true; | |
853 | entries[i].vddc = dep_mclk_table->entries[i].vddc; | |
5d97cf39 RZ |
854 | } |
855 | ||
63c2f7ed | 856 | smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, |
5d97cf39 RZ |
857 | (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); |
858 | ||
859 | return 0; | |
860 | } | |
861 | ||
d389d607 RZ |
862 | static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) |
863 | { | |
864 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
865 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; | |
866 | struct phm_ppt_v1_information *table_info = | |
867 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
63e138ab RZ |
868 | uint32_t min_vddc = 0; |
869 | uint32_t max_vddc = 0; | |
d389d607 RZ |
870 | |
871 | if (!table_info) | |
872 | return; | |
873 | ||
874 | dep_sclk_table = table_info->vdd_dep_on_sclk; | |
875 | ||
876 | atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); | |
877 | ||
878 | if (min_vddc == 0 || min_vddc > 2000 | |
879 | || min_vddc > dep_sclk_table->entries[0].vddc) | |
880 | min_vddc = dep_sclk_table->entries[0].vddc; | |
881 | ||
882 | if (max_vddc == 0 || max_vddc > 2000 | |
883 | || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) | |
884 | max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; | |
885 | ||
886 | data->odn_dpm_table.min_vddc = min_vddc; | |
887 | data->odn_dpm_table.max_vddc = max_vddc; | |
888 | } | |
889 | ||
ecfee95a RZ |
890 | static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) |
891 | { | |
892 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
893 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); | |
894 | struct phm_ppt_v1_information *table_info = | |
895 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
896 | uint32_t i; | |
897 | ||
898 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; | |
899 | struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; | |
900 | ||
901 | if (table_info == NULL) | |
902 | return; | |
903 | ||
904 | for (i = 0; i < data->dpm_table.sclk_table.count; i++) { | |
905 | if (odn_table->odn_core_clock_dpm_levels.entries[i].clock != | |
906 | data->dpm_table.sclk_table.dpm_levels[i].value) { | |
907 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; | |
908 | break; | |
909 | } | |
910 | } | |
911 | ||
912 | for (i = 0; i < data->dpm_table.mclk_table.count; i++) { | |
913 | if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock != | |
914 | data->dpm_table.mclk_table.dpm_levels[i].value) { | |
915 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; | |
916 | break; | |
917 | } | |
918 | } | |
919 | ||
920 | dep_table = table_info->vdd_dep_on_mclk; | |
921 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk); | |
922 | ||
923 | for (i = 0; i < dep_table->count; i++) { | |
924 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { | |
925 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; | |
926 | return; | |
927 | } | |
928 | } | |
929 | ||
930 | dep_table = table_info->vdd_dep_on_sclk; | |
931 | odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); | |
932 | for (i = 0; i < dep_table->count; i++) { | |
933 | if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { | |
934 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; | |
935 | return; | |
936 | } | |
937 | } | |
938 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { | |
939 | data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; | |
940 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; | |
941 | } | |
942 | } | |
943 | ||
f8a4c11b | 944 | static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
945 | { |
946 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
947 | ||
948 | smu7_reset_dpm_tables(hwmgr); | |
949 | ||
950 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
951 | smu7_setup_dpm_tables_v1(hwmgr); | |
952 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
953 | smu7_setup_dpm_tables_v0(hwmgr); | |
954 | ||
955 | smu7_setup_default_pcie_table(hwmgr); | |
956 | ||
957 | /* save a copy of the default DPM table */ | |
958 | memcpy(&(data->golden_dpm_table), &(data->dpm_table), | |
959 | sizeof(struct smu7_dpm_table)); | |
5d97cf39 RZ |
960 | |
961 | /* initialize ODN table */ | |
d389d607 | 962 | if (hwmgr->od_enabled) { |
ecfee95a RZ |
963 | if (data->odn_dpm_table.max_vddc) { |
964 | smu7_check_dpm_table_updated(hwmgr); | |
965 | } else { | |
966 | smu7_setup_voltage_range_from_vbios(hwmgr); | |
967 | smu7_odn_initial_default_setting(hwmgr); | |
968 | } | |
d389d607 | 969 | } |
599a7e9f RZ |
970 | return 0; |
971 | } | |
972 | ||
599a7e9f RZ |
973 | static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr) |
974 | { | |
975 | ||
976 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
977 | PHM_PlatformCaps_RegulatorHot)) | |
d3f8c0ab | 978 | return smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
979 | PPSMC_MSG_EnableVRHotGPIOInterrupt); |
980 | ||
981 | return 0; | |
982 | } | |
983 | ||
984 | static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr) | |
985 | { | |
986 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, | |
987 | SCLK_PWRMGT_OFF, 0); | |
988 | return 0; | |
989 | } | |
990 | ||
991 | static int smu7_enable_ulv(struct pp_hwmgr *hwmgr) | |
992 | { | |
993 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
994 | ||
995 | if (data->ulv_supported) | |
d3f8c0ab | 996 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV); |
599a7e9f RZ |
997 | |
998 | return 0; | |
999 | } | |
1000 | ||
1001 | static int smu7_disable_ulv(struct pp_hwmgr *hwmgr) | |
1002 | { | |
1003 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1004 | ||
1005 | if (data->ulv_supported) | |
d3f8c0ab | 1006 | return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV); |
599a7e9f RZ |
1007 | |
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) | |
1012 | { | |
1013 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1014 | PHM_PlatformCaps_SclkDeepSleep)) { | |
d3f8c0ab | 1015 | if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON)) |
599a7e9f RZ |
1016 | PP_ASSERT_WITH_CODE(false, |
1017 | "Attempt to enable Master Deep Sleep switch failed!", | |
1018 | return -EINVAL); | |
1019 | } else { | |
d3f8c0ab | 1020 | if (smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
1021 | PPSMC_MSG_MASTER_DeepSleep_OFF)) { |
1022 | PP_ASSERT_WITH_CODE(false, | |
1023 | "Attempt to disable Master Deep Sleep switch failed!", | |
1024 | return -EINVAL); | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) | |
1032 | { | |
1033 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1034 | PHM_PlatformCaps_SclkDeepSleep)) { | |
d3f8c0ab | 1035 | if (smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
1036 | PPSMC_MSG_MASTER_DeepSleep_OFF)) { |
1037 | PP_ASSERT_WITH_CODE(false, | |
1038 | "Attempt to disable Master Deep Sleep switch failed!", | |
1039 | return -EINVAL); | |
1040 | } | |
1041 | } | |
1042 | ||
1043 | return 0; | |
1044 | } | |
1045 | ||
0c24e7ef EH |
1046 | static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) |
1047 | { | |
1048 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1049 | uint32_t soft_register_value = 0; | |
1050 | uint32_t handshake_disables_offset = data->soft_regs_start | |
1051 | + smum_get_offsetof(hwmgr, | |
1052 | SMU_SoftRegisters, HandshakeDisables); | |
1053 | ||
1054 | soft_register_value = cgs_read_ind_register(hwmgr->device, | |
1055 | CGS_IND_REG__SMC, handshake_disables_offset); | |
1056 | soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; | |
1057 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
1058 | handshake_disables_offset, soft_register_value); | |
1059 | return 0; | |
1060 | } | |
1061 | ||
599a7e9f RZ |
1062 | static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) |
1063 | { | |
1064 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1065 | uint32_t soft_register_value = 0; | |
1066 | uint32_t handshake_disables_offset = data->soft_regs_start | |
d3f8c0ab | 1067 | + smum_get_offsetof(hwmgr, |
599a7e9f RZ |
1068 | SMU_SoftRegisters, HandshakeDisables); |
1069 | ||
1070 | soft_register_value = cgs_read_ind_register(hwmgr->device, | |
1071 | CGS_IND_REG__SMC, handshake_disables_offset); | |
d3f8c0ab | 1072 | soft_register_value |= smum_get_mac_definition(hwmgr, |
599a7e9f RZ |
1073 | SMU_UVD_MCLK_HANDSHAKE_DISABLE); |
1074 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
1075 | handshake_disables_offset, soft_register_value); | |
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
1080 | { | |
1081 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1082 | ||
1083 | /* enable SCLK dpm */ | |
fa19a6e9 | 1084 | if (!data->sclk_dpm_key_disabled) { |
0c24e7ef EH |
1085 | if (hwmgr->chip_id == CHIP_VEGAM) |
1086 | smu7_disable_sclk_vce_handshake(hwmgr); | |
1087 | ||
599a7e9f | 1088 | PP_ASSERT_WITH_CODE( |
d3f8c0ab | 1089 | (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), |
599a7e9f RZ |
1090 | "Failed to enable SCLK DPM during DPM Start Function!", |
1091 | return -EINVAL); | |
fa19a6e9 | 1092 | } |
599a7e9f RZ |
1093 | |
1094 | /* enable MCLK dpm */ | |
1095 | if (0 == data->mclk_dpm_key_disabled) { | |
1096 | if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) | |
1097 | smu7_disable_handshake_uvd(hwmgr); | |
0c24e7ef | 1098 | |
599a7e9f | 1099 | PP_ASSERT_WITH_CODE( |
d3f8c0ab | 1100 | (0 == smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
1101 | PPSMC_MSG_MCLKDPM_Enable)), |
1102 | "Failed to enable MCLK DPM during DPM Start Function!", | |
1103 | return -EINVAL); | |
1104 | ||
0c24e7ef EH |
1105 | if (hwmgr->chip_family != CHIP_VEGAM) |
1106 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); | |
599a7e9f | 1107 | |
86457c3b RZ |
1108 | |
1109 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { | |
1110 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5); | |
1111 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5); | |
1112 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005); | |
1113 | udelay(10); | |
1114 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005); | |
1115 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005); | |
1116 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005); | |
1117 | } else { | |
1118 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); | |
1119 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); | |
1120 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); | |
1121 | udelay(10); | |
0c24e7ef EH |
1122 | if (hwmgr->chip_id == CHIP_VEGAM) { |
1123 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); | |
1124 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); | |
1125 | } else { | |
1126 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); | |
1127 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); | |
1128 | } | |
86457c3b RZ |
1129 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); |
1130 | } | |
599a7e9f RZ |
1131 | } |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | static int smu7_start_dpm(struct pp_hwmgr *hwmgr) | |
1137 | { | |
1138 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1139 | ||
1140 | /*enable general power management */ | |
1141 | ||
1142 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1143 | GLOBAL_PWRMGT_EN, 1); | |
1144 | ||
1145 | /* enable sclk deep sleep */ | |
1146 | ||
1147 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, | |
1148 | DYNAMIC_PM_EN, 1); | |
1149 | ||
1150 | /* prepare for PCIE DPM */ | |
1151 | ||
1152 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
1153 | data->soft_regs_start + | |
d3f8c0ab | 1154 | smum_get_offsetof(hwmgr, SMU_SoftRegisters, |
599a7e9f RZ |
1155 | VoltageChangeTimeout), 0x1000); |
1156 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, | |
1157 | SWRST_COMMAND_1, RESETLC, 0x0); | |
1158 | ||
86457c3b RZ |
1159 | if (hwmgr->chip_family == AMDGPU_FAMILY_CI) |
1160 | cgs_write_register(hwmgr->device, 0x1488, | |
1161 | (cgs_read_register(hwmgr->device, 0x1488) & ~0x1)); | |
1162 | ||
599a7e9f | 1163 | if (smu7_enable_sclk_mclk_dpm(hwmgr)) { |
b5c11b8e | 1164 | pr_err("Failed to enable Sclk DPM and Mclk DPM!"); |
599a7e9f RZ |
1165 | return -EINVAL; |
1166 | } | |
1167 | ||
1168 | /* enable PCIE dpm */ | |
1169 | if (0 == data->pcie_dpm_key_disabled) { | |
1170 | PP_ASSERT_WITH_CODE( | |
d3f8c0ab | 1171 | (0 == smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
1172 | PPSMC_MSG_PCIeDPM_Enable)), |
1173 | "Failed to enable pcie DPM during DPM Start Function!", | |
1174 | return -EINVAL); | |
1175 | } | |
1176 | ||
1177 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1178 | PHM_PlatformCaps_Falcon_QuickTransition)) { | |
d3f8c0ab | 1179 | PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
1180 | PPSMC_MSG_EnableACDCGPIOInterrupt)), |
1181 | "Failed to enable AC DC GPIO Interrupt!", | |
1182 | ); | |
1183 | } | |
1184 | ||
1185 | return 0; | |
1186 | } | |
1187 | ||
1188 | static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
1189 | { | |
1190 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1191 | ||
1192 | /* disable SCLK dpm */ | |
f28a9b65 RZ |
1193 | if (!data->sclk_dpm_key_disabled) { |
1194 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
1195 | "Trying to disable SCLK DPM when DPM is disabled", | |
1196 | return 0); | |
d3f8c0ab | 1197 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable); |
f28a9b65 | 1198 | } |
599a7e9f RZ |
1199 | |
1200 | /* disable MCLK dpm */ | |
1201 | if (!data->mclk_dpm_key_disabled) { | |
f28a9b65 RZ |
1202 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), |
1203 | "Trying to disable MCLK DPM when DPM is disabled", | |
1204 | return 0); | |
d3f8c0ab | 1205 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable); |
599a7e9f RZ |
1206 | } |
1207 | ||
1208 | return 0; | |
1209 | } | |
1210 | ||
1211 | static int smu7_stop_dpm(struct pp_hwmgr *hwmgr) | |
1212 | { | |
1213 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1214 | ||
1215 | /* disable general power management */ | |
1216 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1217 | GLOBAL_PWRMGT_EN, 0); | |
1218 | /* disable sclk deep sleep */ | |
1219 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, | |
1220 | DYNAMIC_PM_EN, 0); | |
1221 | ||
1222 | /* disable PCIE dpm */ | |
1223 | if (!data->pcie_dpm_key_disabled) { | |
1224 | PP_ASSERT_WITH_CODE( | |
d3f8c0ab | 1225 | (smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
1226 | PPSMC_MSG_PCIeDPM_Disable) == 0), |
1227 | "Failed to disable pcie DPM during DPM Stop Function!", | |
1228 | return -EINVAL); | |
1229 | } | |
1230 | ||
f28a9b65 RZ |
1231 | smu7_disable_sclk_mclk_dpm(hwmgr); |
1232 | ||
1233 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
1234 | "Trying to disable voltage DPM when DPM is disabled", | |
1235 | return 0); | |
1236 | ||
d3f8c0ab | 1237 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable); |
599a7e9f RZ |
1238 | |
1239 | return 0; | |
1240 | } | |
1241 | ||
1242 | static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) | |
1243 | { | |
1244 | bool protection; | |
1245 | enum DPM_EVENT_SRC src; | |
1246 | ||
1247 | switch (sources) { | |
1248 | default: | |
b5c11b8e | 1249 | pr_err("Unknown throttling event sources."); |
599a7e9f RZ |
1250 | /* fall through */ |
1251 | case 0: | |
1252 | protection = false; | |
1253 | /* src is unused */ | |
1254 | break; | |
1255 | case (1 << PHM_AutoThrottleSource_Thermal): | |
1256 | protection = true; | |
1257 | src = DPM_EVENT_SRC_DIGITAL; | |
1258 | break; | |
1259 | case (1 << PHM_AutoThrottleSource_External): | |
1260 | protection = true; | |
1261 | src = DPM_EVENT_SRC_EXTERNAL; | |
1262 | break; | |
1263 | case (1 << PHM_AutoThrottleSource_External) | | |
1264 | (1 << PHM_AutoThrottleSource_Thermal): | |
1265 | protection = true; | |
1266 | src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL; | |
1267 | break; | |
1268 | } | |
1269 | /* Order matters - don't enable thermal protection for the wrong source. */ | |
1270 | if (protection) { | |
1271 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL, | |
1272 | DPM_EVENT_SRC, src); | |
1273 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1274 | THERMAL_PROTECTION_DIS, | |
1275 | !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1276 | PHM_PlatformCaps_ThermalController)); | |
1277 | } else | |
1278 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, | |
1279 | THERMAL_PROTECTION_DIS, 1); | |
1280 | } | |
1281 | ||
1282 | static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr, | |
1283 | PHM_AutoThrottleSource source) | |
1284 | { | |
1285 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1286 | ||
1287 | if (!(data->active_auto_throttle_sources & (1 << source))) { | |
1288 | data->active_auto_throttle_sources |= 1 << source; | |
1289 | smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); | |
1290 | } | |
1291 | return 0; | |
1292 | } | |
1293 | ||
1294 | static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) | |
1295 | { | |
1296 | return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); | |
1297 | } | |
1298 | ||
1299 | static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, | |
1300 | PHM_AutoThrottleSource source) | |
1301 | { | |
1302 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1303 | ||
1304 | if (data->active_auto_throttle_sources & (1 << source)) { | |
1305 | data->active_auto_throttle_sources &= ~(1 << source); | |
1306 | smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); | |
1307 | } | |
1308 | return 0; | |
1309 | } | |
1310 | ||
1311 | static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) | |
1312 | { | |
1313 | return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); | |
1314 | } | |
1315 | ||
f8a4c11b | 1316 | static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
1317 | { |
1318 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1319 | data->pcie_performance_request = true; | |
1320 | ||
1321 | return 0; | |
1322 | } | |
1323 | ||
f8a4c11b | 1324 | static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
1325 | { |
1326 | int tmp_result = 0; | |
1327 | int result = 0; | |
1328 | ||
599a7e9f RZ |
1329 | if (smu7_voltage_control(hwmgr)) { |
1330 | tmp_result = smu7_enable_voltage_control(hwmgr); | |
1331 | PP_ASSERT_WITH_CODE(tmp_result == 0, | |
1332 | "Failed to enable voltage control!", | |
1333 | result = tmp_result); | |
1334 | ||
1335 | tmp_result = smu7_construct_voltage_tables(hwmgr); | |
1336 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3d3c4f1b | 1337 | "Failed to construct voltage tables!", |
599a7e9f RZ |
1338 | result = tmp_result); |
1339 | } | |
1340 | smum_initialize_mc_reg_table(hwmgr); | |
1341 | ||
1342 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1343 | PHM_PlatformCaps_EngineSpreadSpectrumSupport)) | |
1344 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1345 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1); | |
1346 | ||
1347 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1348 | PHM_PlatformCaps_ThermalController)) | |
1349 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1350 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0); | |
1351 | ||
1352 | tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr); | |
1353 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1354 | "Failed to program static screen threshold parameters!", | |
1355 | result = tmp_result); | |
1356 | ||
1357 | tmp_result = smu7_enable_display_gap(hwmgr); | |
1358 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1359 | "Failed to enable display gap!", result = tmp_result); | |
1360 | ||
1361 | tmp_result = smu7_program_voting_clients(hwmgr); | |
1362 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1363 | "Failed to program voting clients!", result = tmp_result); | |
1364 | ||
1365 | tmp_result = smum_process_firmware_header(hwmgr); | |
1366 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1367 | "Failed to process firmware header!", result = tmp_result); | |
1368 | ||
0c24e7ef EH |
1369 | if (hwmgr->chip_id != CHIP_VEGAM) { |
1370 | tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); | |
1371 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1372 | "Failed to initialize switch from ArbF0 to F1!", | |
1373 | result = tmp_result); | |
1374 | } | |
599a7e9f RZ |
1375 | |
1376 | result = smu7_setup_default_dpm_tables(hwmgr); | |
1377 | PP_ASSERT_WITH_CODE(0 == result, | |
1378 | "Failed to setup default DPM tables!", return result); | |
1379 | ||
1380 | tmp_result = smum_init_smc_table(hwmgr); | |
1381 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1382 | "Failed to initialize SMC table!", result = tmp_result); | |
1383 | ||
1384 | tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr); | |
1385 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1386 | "Failed to enable VR hot GPIO interrupt!", result = tmp_result); | |
1387 | ||
d3f8c0ab | 1388 | smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay); |
599a7e9f RZ |
1389 | |
1390 | tmp_result = smu7_enable_sclk_control(hwmgr); | |
1391 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1392 | "Failed to enable SCLK control!", result = tmp_result); | |
1393 | ||
1394 | tmp_result = smu7_enable_smc_voltage_controller(hwmgr); | |
1395 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1396 | "Failed to enable voltage control!", result = tmp_result); | |
1397 | ||
1398 | tmp_result = smu7_enable_ulv(hwmgr); | |
1399 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1400 | "Failed to enable ULV!", result = tmp_result); | |
1401 | ||
1402 | tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr); | |
1403 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1404 | "Failed to enable deep sleep master switch!", result = tmp_result); | |
1405 | ||
1406 | tmp_result = smu7_enable_didt_config(hwmgr); | |
1407 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1408 | "Failed to enable deep sleep master switch!", result = tmp_result); | |
1409 | ||
1410 | tmp_result = smu7_start_dpm(hwmgr); | |
1411 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1412 | "Failed to start DPM!", result = tmp_result); | |
1413 | ||
1414 | tmp_result = smu7_enable_smc_cac(hwmgr); | |
1415 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1416 | "Failed to enable SMC CAC!", result = tmp_result); | |
1417 | ||
1418 | tmp_result = smu7_enable_power_containment(hwmgr); | |
1419 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1420 | "Failed to enable power containment!", result = tmp_result); | |
1421 | ||
1422 | tmp_result = smu7_power_control_set_level(hwmgr); | |
1423 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1424 | "Failed to power control set level!", result = tmp_result); | |
1425 | ||
1426 | tmp_result = smu7_enable_thermal_auto_throttle(hwmgr); | |
1427 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1428 | "Failed to enable thermal auto throttle!", result = tmp_result); | |
1429 | ||
1430 | tmp_result = smu7_pcie_performance_request(hwmgr); | |
1431 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
1432 | "pcie performance request failed!", result = tmp_result); | |
1433 | ||
1434 | return 0; | |
1435 | } | |
1436 | ||
3c9d1fde RZ |
1437 | static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable) |
1438 | { | |
116af450 | 1439 | if (!hwmgr->avfs_supported) |
3c9d1fde RZ |
1440 | return 0; |
1441 | ||
1442 | if (enable) { | |
1443 | if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, | |
1444 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { | |
1445 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( | |
1446 | hwmgr, PPSMC_MSG_EnableAvfs), | |
1447 | "Failed to enable AVFS!", | |
1448 | return -EINVAL); | |
1449 | } | |
1450 | } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, | |
1451 | CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) { | |
1452 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc( | |
1453 | hwmgr, PPSMC_MSG_DisableAvfs), | |
1454 | "Failed to disable AVFS!", | |
1455 | return -EINVAL); | |
1456 | } | |
1457 | ||
1458 | return 0; | |
1459 | } | |
1460 | ||
1461 | static int smu7_update_avfs(struct pp_hwmgr *hwmgr) | |
1462 | { | |
3c9d1fde RZ |
1463 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
1464 | ||
116af450 | 1465 | if (!hwmgr->avfs_supported) |
3c9d1fde RZ |
1466 | return 0; |
1467 | ||
1468 | if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { | |
1469 | smu7_avfs_control(hwmgr, false); | |
1470 | } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { | |
1471 | smu7_avfs_control(hwmgr, false); | |
1472 | smu7_avfs_control(hwmgr, true); | |
1473 | } else { | |
1474 | smu7_avfs_control(hwmgr, true); | |
1475 | } | |
1476 | ||
1477 | return 0; | |
1478 | } | |
1479 | ||
599a7e9f RZ |
1480 | int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr) |
1481 | { | |
1482 | int tmp_result, result = 0; | |
1483 | ||
599a7e9f RZ |
1484 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1485 | PHM_PlatformCaps_ThermalController)) | |
1486 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1487 | GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); | |
1488 | ||
1489 | tmp_result = smu7_disable_power_containment(hwmgr); | |
1490 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1491 | "Failed to disable power containment!", result = tmp_result); | |
1492 | ||
1493 | tmp_result = smu7_disable_smc_cac(hwmgr); | |
1494 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1495 | "Failed to disable SMC CAC!", result = tmp_result); | |
1496 | ||
7f61bed0 RZ |
1497 | tmp_result = smu7_disable_didt_config(hwmgr); |
1498 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1499 | "Failed to disable DIDT!", result = tmp_result); | |
1500 | ||
599a7e9f RZ |
1501 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, |
1502 | CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); | |
1503 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
1504 | GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); | |
1505 | ||
1506 | tmp_result = smu7_disable_thermal_auto_throttle(hwmgr); | |
1507 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1508 | "Failed to disable thermal auto throttle!", result = tmp_result); | |
1509 | ||
35011d39 EH |
1510 | tmp_result = smu7_avfs_control(hwmgr, false); |
1511 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1512 | "Failed to disable AVFS!", result = tmp_result); | |
f28a9b65 | 1513 | |
599a7e9f RZ |
1514 | tmp_result = smu7_stop_dpm(hwmgr); |
1515 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1516 | "Failed to stop DPM!", result = tmp_result); | |
1517 | ||
1518 | tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr); | |
1519 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1520 | "Failed to disable deep sleep master switch!", result = tmp_result); | |
1521 | ||
1522 | tmp_result = smu7_disable_ulv(hwmgr); | |
1523 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1524 | "Failed to disable ULV!", result = tmp_result); | |
1525 | ||
1526 | tmp_result = smu7_clear_voting_clients(hwmgr); | |
1527 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1528 | "Failed to clear voting clients!", result = tmp_result); | |
1529 | ||
1530 | tmp_result = smu7_reset_to_default(hwmgr); | |
1531 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1532 | "Failed to reset to default!", result = tmp_result); | |
1533 | ||
1534 | tmp_result = smu7_force_switch_to_arbf0(hwmgr); | |
1535 | PP_ASSERT_WITH_CODE((tmp_result == 0), | |
1536 | "Failed to force to switch arbf0!", result = tmp_result); | |
1537 | ||
1538 | return result; | |
1539 | } | |
1540 | ||
1541 | int smu7_reset_asic_tasks(struct pp_hwmgr *hwmgr) | |
1542 | { | |
1543 | ||
1544 | return 0; | |
1545 | } | |
1546 | ||
1547 | static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) | |
1548 | { | |
1549 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1550 | struct phm_ppt_v1_information *table_info = | |
1551 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
ada6770e | 1552 | struct amdgpu_device *adev = hwmgr->adev; |
599a7e9f RZ |
1553 | |
1554 | data->dll_default_on = false; | |
1555 | data->mclk_dpm0_activity_target = 0xa; | |
599a7e9f RZ |
1556 | data->vddc_vddgfx_delta = 300; |
1557 | data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT; | |
1558 | data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT; | |
0596df6b RZ |
1559 | data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0; |
1560 | data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1; | |
1561 | data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2; | |
1562 | data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3; | |
1563 | data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4; | |
1564 | data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5; | |
1565 | data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6; | |
1566 | data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7; | |
599a7e9f RZ |
1567 | |
1568 | data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; | |
1569 | data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; | |
1570 | data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true; | |
1571 | /* need to set voltage control types before EVV patching */ | |
1572 | data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE; | |
1573 | data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE; | |
1574 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE; | |
1575 | data->enable_tdc_limit_feature = true; | |
1576 | data->enable_pkg_pwr_tracking_feature = true; | |
1577 | data->force_pcie_gen = PP_PCIEGenInvalid; | |
1578 | data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; | |
c7429b3a RZ |
1579 | data->current_profile_setting.bupdate_sclk = 1; |
1580 | data->current_profile_setting.sclk_up_hyst = 0; | |
1581 | data->current_profile_setting.sclk_down_hyst = 100; | |
1582 | data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT; | |
9861023c | 1583 | data->current_profile_setting.bupdate_mclk = 1; |
c7429b3a RZ |
1584 | data->current_profile_setting.mclk_up_hyst = 0; |
1585 | data->current_profile_setting.mclk_down_hyst = 100; | |
1586 | data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT; | |
052fe96d RZ |
1587 | hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; |
1588 | hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | |
1589 | hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; | |
599a7e9f | 1590 | |
b3b03052 | 1591 | if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) { |
187368a5 RZ |
1592 | uint8_t tmp1, tmp2; |
1593 | uint16_t tmp3 = 0; | |
1594 | atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, | |
1595 | &tmp3); | |
1596 | tmp3 = (tmp3 >> 5) & 0x3; | |
1597 | data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; | |
86457c3b RZ |
1598 | } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { |
1599 | data->vddc_phase_shed_control = 1; | |
1600 | } else { | |
1601 | data->vddc_phase_shed_control = 0; | |
1602 | } | |
1603 | ||
1604 | if (hwmgr->chip_id == CHIP_HAWAII) { | |
1605 | data->thermal_temp_setting.temperature_low = 94500; | |
1606 | data->thermal_temp_setting.temperature_high = 95000; | |
1607 | data->thermal_temp_setting.temperature_shutdown = 104000; | |
1608 | } else { | |
1609 | data->thermal_temp_setting.temperature_low = 99500; | |
1610 | data->thermal_temp_setting.temperature_high = 100000; | |
1611 | data->thermal_temp_setting.temperature_shutdown = 104000; | |
187368a5 RZ |
1612 | } |
1613 | ||
599a7e9f | 1614 | data->fast_watermark_threshold = 100; |
e71b7ae6 | 1615 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
599a7e9f RZ |
1616 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
1617 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
86457c3b RZ |
1618 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
1619 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT)) | |
1620 | data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; | |
599a7e9f RZ |
1621 | |
1622 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1623 | PHM_PlatformCaps_ControlVDDGFX)) { | |
e71b7ae6 | 1624 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
599a7e9f RZ |
1625 | VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) { |
1626 | data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1627 | } | |
1628 | } | |
1629 | ||
1630 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1631 | PHM_PlatformCaps_EnableMVDDControl)) { | |
e71b7ae6 | 1632 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
599a7e9f RZ |
1633 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) |
1634 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; | |
e71b7ae6 | 1635 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
599a7e9f RZ |
1636 | VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) |
1637 | data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1638 | } | |
1639 | ||
86457c3b | 1640 | if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) |
599a7e9f RZ |
1641 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, |
1642 | PHM_PlatformCaps_ControlVDDGFX); | |
599a7e9f RZ |
1643 | |
1644 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1645 | PHM_PlatformCaps_ControlVDDCI)) { | |
e71b7ae6 | 1646 | if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
599a7e9f RZ |
1647 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) |
1648 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO; | |
e71b7ae6 | 1649 | else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr, |
599a7e9f RZ |
1650 | VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2)) |
1651 | data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2; | |
1652 | } | |
1653 | ||
1654 | if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE) | |
1655 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | |
1656 | PHM_PlatformCaps_EnableMVDDControl); | |
1657 | ||
1658 | if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) | |
1659 | phm_cap_unset(hwmgr->platform_descriptor.platformCaps, | |
1660 | PHM_PlatformCaps_ControlVDDCI); | |
1661 | ||
53b963b6 | 1662 | if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK) |
599a7e9f RZ |
1663 | && (table_info->cac_dtp_table->usClockStretchAmount != 0)) |
1664 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
1665 | PHM_PlatformCaps_ClockStretcher); | |
1666 | ||
1667 | data->pcie_gen_performance.max = PP_PCIEGen1; | |
1668 | data->pcie_gen_performance.min = PP_PCIEGen3; | |
1669 | data->pcie_gen_power_saving.max = PP_PCIEGen1; | |
1670 | data->pcie_gen_power_saving.min = PP_PCIEGen3; | |
1671 | data->pcie_lane_performance.max = 0; | |
1672 | data->pcie_lane_performance.min = 16; | |
1673 | data->pcie_lane_power_saving.max = 0; | |
1674 | data->pcie_lane_power_saving.min = 16; | |
97f40ef0 | 1675 | |
ada6770e RZ |
1676 | |
1677 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD) | |
1678 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
1679 | PHM_PlatformCaps_UVDPowerGating); | |
1680 | if (adev->pg_flags & AMD_PG_SUPPORT_VCE) | |
1681 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
1682 | PHM_PlatformCaps_VCEPowerGating); | |
599a7e9f RZ |
1683 | } |
1684 | ||
1685 | /** | |
1686 | * Get Leakage VDDC based on leakage ID. | |
1687 | * | |
1688 | * @param hwmgr the address of the powerplay hardware manager. | |
1689 | * @return always 0 | |
1690 | */ | |
1691 | static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) | |
1692 | { | |
1693 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1694 | uint16_t vv_id; | |
1695 | uint16_t vddc = 0; | |
1696 | uint16_t vddgfx = 0; | |
1697 | uint16_t i, j; | |
1698 | uint32_t sclk = 0; | |
1699 | struct phm_ppt_v1_information *table_info = | |
1700 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
1701 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; | |
1702 | ||
1703 | ||
599a7e9f RZ |
1704 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { |
1705 | vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; | |
1706 | ||
1707 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
0f12f73c AD |
1708 | if ((hwmgr->pp_table_version == PP_TABLE_V1) |
1709 | && !phm_get_sclk_for_voltage_evv(hwmgr, | |
599a7e9f RZ |
1710 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { |
1711 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1712 | PHM_PlatformCaps_ClockStretcher)) { | |
0f12f73c AD |
1713 | sclk_table = table_info->vdd_dep_on_sclk; |
1714 | ||
599a7e9f RZ |
1715 | for (j = 1; j < sclk_table->count; j++) { |
1716 | if (sclk_table->entries[j].clk == sclk && | |
1717 | sclk_table->entries[j].cks_enable == 0) { | |
1718 | sclk += 5000; | |
1719 | break; | |
1720 | } | |
1721 | } | |
1722 | } | |
1723 | if (0 == atomctrl_get_voltage_evv_on_sclk | |
1724 | (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk, | |
1725 | vv_id, &vddgfx)) { | |
1726 | /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */ | |
1727 | PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL); | |
1728 | ||
1729 | /* the voltage should not be zero nor equal to leakage ID */ | |
1730 | if (vddgfx != 0 && vddgfx != vv_id) { | |
1731 | data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx; | |
1732 | data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id; | |
1733 | data->vddcgfx_leakage.count++; | |
1734 | } | |
1735 | } else { | |
b5c11b8e | 1736 | pr_info("Error retrieving EVV voltage value!\n"); |
599a7e9f RZ |
1737 | } |
1738 | } | |
1739 | } else { | |
599a7e9f RZ |
1740 | if ((hwmgr->pp_table_version == PP_TABLE_V0) |
1741 | || !phm_get_sclk_for_voltage_evv(hwmgr, | |
1742 | table_info->vddc_lookup_table, vv_id, &sclk)) { | |
1743 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
1744 | PHM_PlatformCaps_ClockStretcher)) { | |
0f12f73c AD |
1745 | if (table_info == NULL) |
1746 | return -EINVAL; | |
1747 | sclk_table = table_info->vdd_dep_on_sclk; | |
1748 | ||
599a7e9f RZ |
1749 | for (j = 1; j < sclk_table->count; j++) { |
1750 | if (sclk_table->entries[j].clk == sclk && | |
1751 | sclk_table->entries[j].cks_enable == 0) { | |
1752 | sclk += 5000; | |
1753 | break; | |
1754 | } | |
1755 | } | |
1756 | } | |
1757 | ||
1758 | if (phm_get_voltage_evv_on_sclk(hwmgr, | |
1759 | VOLTAGE_TYPE_VDDC, | |
1760 | sclk, vv_id, &vddc) == 0) { | |
1761 | if (vddc >= 2000 || vddc == 0) | |
1762 | return -EINVAL; | |
1763 | } else { | |
89c67699 | 1764 | pr_debug("failed to retrieving EVV voltage!\n"); |
599a7e9f RZ |
1765 | continue; |
1766 | } | |
1767 | ||
1768 | /* the voltage should not be zero nor equal to leakage ID */ | |
1769 | if (vddc != 0 && vddc != vv_id) { | |
1770 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc); | |
1771 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; | |
1772 | data->vddc_leakage.count++; | |
1773 | } | |
1774 | } | |
1775 | } | |
1776 | } | |
1777 | ||
1778 | return 0; | |
1779 | } | |
1780 | ||
1781 | /** | |
1782 | * Change virtual leakage voltage to actual value. | |
1783 | * | |
1784 | * @param hwmgr the address of the powerplay hardware manager. | |
1785 | * @param pointer to changing voltage | |
1786 | * @param pointer to leakage table | |
1787 | */ | |
1788 | static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr, | |
1789 | uint16_t *voltage, struct smu7_leakage_voltage *leakage_table) | |
1790 | { | |
1791 | uint32_t index; | |
1792 | ||
1793 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ | |
1794 | for (index = 0; index < leakage_table->count; index++) { | |
1795 | /* if this voltage matches a leakage voltage ID */ | |
1796 | /* patch with actual leakage voltage */ | |
1797 | if (leakage_table->leakage_id[index] == *voltage) { | |
1798 | *voltage = leakage_table->actual_voltage[index]; | |
1799 | break; | |
1800 | } | |
1801 | } | |
1802 | ||
1803 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) | |
b5c11b8e | 1804 | pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); |
599a7e9f RZ |
1805 | } |
1806 | ||
1807 | /** | |
1808 | * Patch voltage lookup table by EVV leakages. | |
1809 | * | |
1810 | * @param hwmgr the address of the powerplay hardware manager. | |
1811 | * @param pointer to voltage lookup table | |
1812 | * @param pointer to leakage table | |
1813 | * @return always 0 | |
1814 | */ | |
1815 | static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, | |
1816 | phm_ppt_v1_voltage_lookup_table *lookup_table, | |
1817 | struct smu7_leakage_voltage *leakage_table) | |
1818 | { | |
1819 | uint32_t i; | |
1820 | ||
1821 | for (i = 0; i < lookup_table->count; i++) | |
1822 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, | |
1823 | &lookup_table->entries[i].us_vdd, leakage_table); | |
1824 | ||
1825 | return 0; | |
1826 | } | |
1827 | ||
1828 | static int smu7_patch_clock_voltage_limits_with_vddc_leakage( | |
1829 | struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table, | |
1830 | uint16_t *vddc) | |
1831 | { | |
1832 | struct phm_ppt_v1_information *table_info = | |
1833 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1834 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); | |
1835 | hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = | |
1836 | table_info->max_clock_voltage_on_dc.vddc; | |
1837 | return 0; | |
1838 | } | |
1839 | ||
1840 | static int smu7_patch_voltage_dependency_tables_with_lookup_table( | |
1841 | struct pp_hwmgr *hwmgr) | |
1842 | { | |
1843 | uint8_t entry_id; | |
1844 | uint8_t voltage_id; | |
1845 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1846 | struct phm_ppt_v1_information *table_info = | |
1847 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1848 | ||
1849 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | |
1850 | table_info->vdd_dep_on_sclk; | |
1851 | struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = | |
1852 | table_info->vdd_dep_on_mclk; | |
1853 | struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = | |
1854 | table_info->mm_dep_table; | |
1855 | ||
1856 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1857 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { | |
1858 | voltage_id = sclk_table->entries[entry_id].vddInd; | |
1859 | sclk_table->entries[entry_id].vddgfx = | |
1860 | table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd; | |
1861 | } | |
1862 | } else { | |
1863 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { | |
1864 | voltage_id = sclk_table->entries[entry_id].vddInd; | |
1865 | sclk_table->entries[entry_id].vddc = | |
1866 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; | |
1867 | } | |
1868 | } | |
1869 | ||
1870 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { | |
1871 | voltage_id = mclk_table->entries[entry_id].vddInd; | |
1872 | mclk_table->entries[entry_id].vddc = | |
1873 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; | |
1874 | } | |
1875 | ||
1876 | for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { | |
1877 | voltage_id = mm_table->entries[entry_id].vddcInd; | |
1878 | mm_table->entries[entry_id].vddc = | |
1879 | table_info->vddc_lookup_table->entries[voltage_id].us_vdd; | |
1880 | } | |
1881 | ||
1882 | return 0; | |
1883 | ||
1884 | } | |
1885 | ||
1886 | static int phm_add_voltage(struct pp_hwmgr *hwmgr, | |
1887 | phm_ppt_v1_voltage_lookup_table *look_up_table, | |
1888 | phm_ppt_v1_voltage_lookup_record *record) | |
1889 | { | |
1890 | uint32_t i; | |
1891 | ||
1892 | PP_ASSERT_WITH_CODE((NULL != look_up_table), | |
1893 | "Lookup Table empty.", return -EINVAL); | |
1894 | PP_ASSERT_WITH_CODE((0 != look_up_table->count), | |
1895 | "Lookup Table empty.", return -EINVAL); | |
1896 | ||
d3f8c0ab | 1897 | i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX); |
599a7e9f RZ |
1898 | PP_ASSERT_WITH_CODE((i >= look_up_table->count), |
1899 | "Lookup Table is full.", return -EINVAL); | |
1900 | ||
1901 | /* This is to avoid entering duplicate calculated records. */ | |
1902 | for (i = 0; i < look_up_table->count; i++) { | |
1903 | if (look_up_table->entries[i].us_vdd == record->us_vdd) { | |
1904 | if (look_up_table->entries[i].us_calculated == 1) | |
1905 | return 0; | |
1906 | break; | |
1907 | } | |
1908 | } | |
1909 | ||
1910 | look_up_table->entries[i].us_calculated = 1; | |
1911 | look_up_table->entries[i].us_vdd = record->us_vdd; | |
1912 | look_up_table->entries[i].us_cac_low = record->us_cac_low; | |
1913 | look_up_table->entries[i].us_cac_mid = record->us_cac_mid; | |
1914 | look_up_table->entries[i].us_cac_high = record->us_cac_high; | |
1915 | /* Only increment the count when we're appending, not replacing duplicate entry. */ | |
1916 | if (i == look_up_table->count) | |
1917 | look_up_table->count++; | |
1918 | ||
1919 | return 0; | |
1920 | } | |
1921 | ||
1922 | ||
1923 | static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr) | |
1924 | { | |
1925 | uint8_t entry_id; | |
1926 | struct phm_ppt_v1_voltage_lookup_record v_record; | |
1927 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1928 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1929 | ||
1930 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk; | |
1931 | phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk; | |
1932 | ||
1933 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1934 | for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) { | |
1935 | if (sclk_table->entries[entry_id].vdd_offset & (1 << 15)) | |
1936 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + | |
1937 | sclk_table->entries[entry_id].vdd_offset - 0xFFFF; | |
1938 | else | |
1939 | v_record.us_vdd = sclk_table->entries[entry_id].vddgfx + | |
1940 | sclk_table->entries[entry_id].vdd_offset; | |
1941 | ||
1942 | sclk_table->entries[entry_id].vddc = | |
1943 | v_record.us_cac_low = v_record.us_cac_mid = | |
1944 | v_record.us_cac_high = v_record.us_vdd; | |
1945 | ||
1946 | phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record); | |
1947 | } | |
1948 | ||
1949 | for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { | |
1950 | if (mclk_table->entries[entry_id].vdd_offset & (1 << 15)) | |
1951 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + | |
1952 | mclk_table->entries[entry_id].vdd_offset - 0xFFFF; | |
1953 | else | |
1954 | v_record.us_vdd = mclk_table->entries[entry_id].vddc + | |
1955 | mclk_table->entries[entry_id].vdd_offset; | |
1956 | ||
1957 | mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low = | |
1958 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; | |
1959 | phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); | |
1960 | } | |
1961 | } | |
1962 | return 0; | |
1963 | } | |
1964 | ||
1965 | static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr) | |
1966 | { | |
1967 | uint8_t entry_id; | |
1968 | struct phm_ppt_v1_voltage_lookup_record v_record; | |
1969 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
1970 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
1971 | phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table; | |
1972 | ||
1973 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
1974 | for (entry_id = 0; entry_id < mm_table->count; entry_id++) { | |
1975 | if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15)) | |
1976 | v_record.us_vdd = mm_table->entries[entry_id].vddc + | |
1977 | mm_table->entries[entry_id].vddgfx_offset - 0xFFFF; | |
1978 | else | |
1979 | v_record.us_vdd = mm_table->entries[entry_id].vddc + | |
1980 | mm_table->entries[entry_id].vddgfx_offset; | |
1981 | ||
1982 | /* Add the calculated VDDGFX to the VDDGFX lookup table */ | |
1983 | mm_table->entries[entry_id].vddgfx = v_record.us_cac_low = | |
1984 | v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd; | |
1985 | phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record); | |
1986 | } | |
1987 | } | |
1988 | return 0; | |
1989 | } | |
1990 | ||
1991 | static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, | |
1992 | struct phm_ppt_v1_voltage_lookup_table *lookup_table) | |
1993 | { | |
1994 | uint32_t table_size, i, j; | |
1995 | struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; | |
1996 | table_size = lookup_table->count; | |
1997 | ||
1998 | PP_ASSERT_WITH_CODE(0 != lookup_table->count, | |
1999 | "Lookup table is empty", return -EINVAL); | |
2000 | ||
2001 | /* Sorting voltages */ | |
2002 | for (i = 0; i < table_size - 1; i++) { | |
2003 | for (j = i + 1; j > 0; j--) { | |
2004 | if (lookup_table->entries[j].us_vdd < | |
2005 | lookup_table->entries[j - 1].us_vdd) { | |
2006 | tmp_voltage_lookup_record = lookup_table->entries[j - 1]; | |
2007 | lookup_table->entries[j - 1] = lookup_table->entries[j]; | |
2008 | lookup_table->entries[j] = tmp_voltage_lookup_record; | |
2009 | } | |
2010 | } | |
2011 | } | |
2012 | ||
2013 | return 0; | |
2014 | } | |
2015 | ||
2016 | static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr) | |
2017 | { | |
2018 | int result = 0; | |
2019 | int tmp_result; | |
2020 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2021 | struct phm_ppt_v1_information *table_info = | |
2022 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2023 | ||
2024 | if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { | |
2025 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, | |
2026 | table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage)); | |
2027 | if (tmp_result != 0) | |
2028 | result = tmp_result; | |
2029 | ||
2030 | smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, | |
2031 | &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage)); | |
2032 | } else { | |
2033 | ||
2034 | tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr, | |
2035 | table_info->vddc_lookup_table, &(data->vddc_leakage)); | |
2036 | if (tmp_result) | |
2037 | result = tmp_result; | |
2038 | ||
2039 | tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, | |
2040 | &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); | |
2041 | if (tmp_result) | |
2042 | result = tmp_result; | |
2043 | } | |
2044 | ||
2045 | tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr); | |
2046 | if (tmp_result) | |
2047 | result = tmp_result; | |
2048 | ||
2049 | tmp_result = smu7_calc_voltage_dependency_tables(hwmgr); | |
2050 | if (tmp_result) | |
2051 | result = tmp_result; | |
2052 | ||
2053 | tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr); | |
2054 | if (tmp_result) | |
2055 | result = tmp_result; | |
2056 | ||
2057 | tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table); | |
2058 | if (tmp_result) | |
2059 | result = tmp_result; | |
2060 | ||
2061 | tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); | |
2062 | if (tmp_result) | |
2063 | result = tmp_result; | |
2064 | ||
2065 | return result; | |
2066 | } | |
2067 | ||
2068 | static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr) | |
2069 | { | |
2070 | struct phm_ppt_v1_information *table_info = | |
2071 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2072 | ||
2073 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = | |
2074 | table_info->vdd_dep_on_sclk; | |
2075 | struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = | |
2076 | table_info->vdd_dep_on_mclk; | |
2077 | ||
2078 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, | |
2079 | "VDD dependency on SCLK table is missing.", | |
2080 | return -EINVAL); | |
2081 | PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, | |
2082 | "VDD dependency on SCLK table has to have is missing.", | |
2083 | return -EINVAL); | |
2084 | ||
2085 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, | |
2086 | "VDD dependency on MCLK table is missing", | |
2087 | return -EINVAL); | |
2088 | PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, | |
2089 | "VDD dependency on MCLK table has to have is missing.", | |
2090 | return -EINVAL); | |
2091 | ||
2092 | table_info->max_clock_voltage_on_ac.sclk = | |
2093 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; | |
2094 | table_info->max_clock_voltage_on_ac.mclk = | |
2095 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; | |
2096 | table_info->max_clock_voltage_on_ac.vddc = | |
2097 | allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; | |
2098 | table_info->max_clock_voltage_on_ac.vddci = | |
2099 | allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; | |
2100 | ||
2101 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk; | |
2102 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk; | |
2103 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc; | |
2104 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci; | |
2105 | ||
2106 | return 0; | |
2107 | } | |
2108 | ||
f8a4c11b | 2109 | static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
2110 | { |
2111 | struct phm_ppt_v1_information *table_info = | |
2112 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2113 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
2114 | struct phm_ppt_v1_voltage_lookup_table *lookup_table; | |
2115 | uint32_t i; | |
2116 | uint32_t hw_revision, sub_vendor_id, sub_sys_id; | |
ada6770e | 2117 | struct amdgpu_device *adev = hwmgr->adev; |
599a7e9f RZ |
2118 | |
2119 | if (table_info != NULL) { | |
2120 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
2121 | lookup_table = table_info->vddc_lookup_table; | |
2122 | } else | |
2123 | return 0; | |
2124 | ||
ada6770e RZ |
2125 | hw_revision = adev->pdev->revision; |
2126 | sub_sys_id = adev->pdev->subsystem_device; | |
2127 | sub_vendor_id = adev->pdev->subsystem_vendor; | |
599a7e9f RZ |
2128 | |
2129 | if (hwmgr->chip_id == CHIP_POLARIS10 && hw_revision == 0xC7 && | |
2130 | ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) || | |
2131 | (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) || | |
2132 | (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) { | |
2133 | if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000) | |
2134 | return 0; | |
2135 | ||
2136 | for (i = 0; i < lookup_table->count; i++) { | |
2137 | if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) { | |
2138 | dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i; | |
2139 | return 0; | |
2140 | } | |
2141 | } | |
2142 | } | |
2143 | return 0; | |
2144 | } | |
2145 | ||
2146 | static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) | |
2147 | { | |
2148 | struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; | |
2149 | uint32_t temp_reg; | |
2150 | struct phm_ppt_v1_information *table_info = | |
2151 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2152 | ||
2153 | ||
2154 | if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { | |
2155 | temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); | |
2156 | switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) { | |
2157 | case 0: | |
2158 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1); | |
2159 | break; | |
2160 | case 1: | |
2161 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2); | |
2162 | break; | |
2163 | case 2: | |
2164 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1); | |
2165 | break; | |
2166 | case 3: | |
2167 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1); | |
2168 | break; | |
2169 | case 4: | |
2170 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); | |
2171 | break; | |
2172 | default: | |
599a7e9f RZ |
2173 | break; |
2174 | } | |
2175 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); | |
2176 | } | |
2177 | ||
2178 | if (table_info == NULL) | |
2179 | return 0; | |
2180 | ||
2181 | if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 && | |
2182 | hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) { | |
2183 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit = | |
2184 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; | |
2185 | ||
2186 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit = | |
2187 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; | |
2188 | ||
2189 | hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1; | |
2190 | ||
2191 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100; | |
2192 | ||
2193 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit = | |
2194 | (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit; | |
2195 | ||
2196 | hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1; | |
2197 | ||
2198 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ? | |
2199 | (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0; | |
2200 | ||
2201 | table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp; | |
2202 | table_info->cac_dtp_table->usOperatingTempStep = 1; | |
2203 | table_info->cac_dtp_table->usOperatingTempHyst = 1; | |
2204 | ||
2205 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM = | |
2206 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM; | |
2207 | ||
2208 | hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = | |
2209 | hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM; | |
2210 | ||
2211 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit = | |
2212 | table_info->cac_dtp_table->usOperatingTempMinLimit; | |
2213 | ||
2214 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit = | |
2215 | table_info->cac_dtp_table->usOperatingTempMaxLimit; | |
2216 | ||
2217 | hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp = | |
2218 | table_info->cac_dtp_table->usDefaultTargetOperatingTemp; | |
2219 | ||
2220 | hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep = | |
2221 | table_info->cac_dtp_table->usOperatingTempStep; | |
2222 | ||
2223 | hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp = | |
2224 | table_info->cac_dtp_table->usTargetOperatingTemp; | |
cf54d6d9 RZ |
2225 | if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK) |
2226 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | |
2227 | PHM_PlatformCaps_ODFuzzyFanControlSupport); | |
599a7e9f RZ |
2228 | } |
2229 | ||
2230 | return 0; | |
2231 | } | |
2232 | ||
2233 | /** | |
2234 | * Change virtual leakage voltage to actual value. | |
2235 | * | |
2236 | * @param hwmgr the address of the powerplay hardware manager. | |
2237 | * @param pointer to changing voltage | |
2238 | * @param pointer to leakage table | |
2239 | */ | |
2240 | static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr, | |
2241 | uint32_t *voltage, struct smu7_leakage_voltage *leakage_table) | |
2242 | { | |
2243 | uint32_t index; | |
2244 | ||
2245 | /* search for leakage voltage ID 0xff01 ~ 0xff08 */ | |
2246 | for (index = 0; index < leakage_table->count; index++) { | |
2247 | /* if this voltage matches a leakage voltage ID */ | |
2248 | /* patch with actual leakage voltage */ | |
2249 | if (leakage_table->leakage_id[index] == *voltage) { | |
2250 | *voltage = leakage_table->actual_voltage[index]; | |
2251 | break; | |
2252 | } | |
2253 | } | |
2254 | ||
2255 | if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) | |
b5c11b8e | 2256 | pr_err("Voltage value looks like a Leakage ID but it's not patched \n"); |
599a7e9f RZ |
2257 | } |
2258 | ||
2259 | ||
2260 | static int smu7_patch_vddc(struct pp_hwmgr *hwmgr, | |
2261 | struct phm_clock_voltage_dependency_table *tab) | |
2262 | { | |
2263 | uint16_t i; | |
2264 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2265 | ||
2266 | if (tab) | |
2267 | for (i = 0; i < tab->count; i++) | |
2268 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2269 | &data->vddc_leakage); | |
2270 | ||
2271 | return 0; | |
2272 | } | |
2273 | ||
2274 | static int smu7_patch_vddci(struct pp_hwmgr *hwmgr, | |
2275 | struct phm_clock_voltage_dependency_table *tab) | |
2276 | { | |
2277 | uint16_t i; | |
2278 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2279 | ||
2280 | if (tab) | |
2281 | for (i = 0; i < tab->count; i++) | |
2282 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2283 | &data->vddci_leakage); | |
2284 | ||
2285 | return 0; | |
2286 | } | |
2287 | ||
2288 | static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr, | |
2289 | struct phm_vce_clock_voltage_dependency_table *tab) | |
2290 | { | |
2291 | uint16_t i; | |
2292 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2293 | ||
2294 | if (tab) | |
2295 | for (i = 0; i < tab->count; i++) | |
2296 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2297 | &data->vddc_leakage); | |
2298 | ||
2299 | return 0; | |
2300 | } | |
2301 | ||
2302 | ||
2303 | static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr, | |
2304 | struct phm_uvd_clock_voltage_dependency_table *tab) | |
2305 | { | |
2306 | uint16_t i; | |
2307 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2308 | ||
2309 | if (tab) | |
2310 | for (i = 0; i < tab->count; i++) | |
2311 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2312 | &data->vddc_leakage); | |
2313 | ||
2314 | return 0; | |
2315 | } | |
2316 | ||
2317 | static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr, | |
2318 | struct phm_phase_shedding_limits_table *tab) | |
2319 | { | |
2320 | uint16_t i; | |
2321 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2322 | ||
2323 | if (tab) | |
2324 | for (i = 0; i < tab->count; i++) | |
2325 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage, | |
2326 | &data->vddc_leakage); | |
2327 | ||
2328 | return 0; | |
2329 | } | |
2330 | ||
2331 | static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr, | |
2332 | struct phm_samu_clock_voltage_dependency_table *tab) | |
2333 | { | |
2334 | uint16_t i; | |
2335 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2336 | ||
2337 | if (tab) | |
2338 | for (i = 0; i < tab->count; i++) | |
2339 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2340 | &data->vddc_leakage); | |
2341 | ||
2342 | return 0; | |
2343 | } | |
2344 | ||
2345 | static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, | |
2346 | struct phm_acp_clock_voltage_dependency_table *tab) | |
2347 | { | |
2348 | uint16_t i; | |
2349 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2350 | ||
2351 | if (tab) | |
2352 | for (i = 0; i < tab->count; i++) | |
2353 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v, | |
2354 | &data->vddc_leakage); | |
2355 | ||
2356 | return 0; | |
2357 | } | |
2358 | ||
2359 | static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, | |
77f7f71f | 2360 | struct phm_clock_and_voltage_limits *tab) |
599a7e9f | 2361 | { |
77f7f71f | 2362 | uint32_t vddc, vddci; |
599a7e9f RZ |
2363 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
2364 | ||
2365 | if (tab) { | |
a29d1260 | 2366 | vddc = tab->vddc; |
77f7f71f AD |
2367 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, |
2368 | &data->vddc_leakage); | |
2369 | tab->vddc = vddc; | |
a29d1260 | 2370 | vddci = tab->vddci; |
77f7f71f AD |
2371 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, |
2372 | &data->vddci_leakage); | |
2373 | tab->vddci = vddci; | |
599a7e9f RZ |
2374 | } |
2375 | ||
2376 | return 0; | |
2377 | } | |
2378 | ||
2379 | static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab) | |
2380 | { | |
2381 | uint32_t i; | |
2382 | uint32_t vddc; | |
2383 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2384 | ||
2385 | if (tab) { | |
2386 | for (i = 0; i < tab->count; i++) { | |
2387 | vddc = (uint32_t)(tab->entries[i].Vddc); | |
2388 | smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage); | |
2389 | tab->entries[i].Vddc = (uint16_t)vddc; | |
2390 | } | |
2391 | } | |
2392 | ||
2393 | return 0; | |
2394 | } | |
2395 | ||
2396 | static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr) | |
2397 | { | |
2398 | int tmp; | |
2399 | ||
2400 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk); | |
2401 | if (tmp) | |
2402 | return -EINVAL; | |
2403 | ||
2404 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk); | |
2405 | if (tmp) | |
2406 | return -EINVAL; | |
2407 | ||
2408 | tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); | |
2409 | if (tmp) | |
2410 | return -EINVAL; | |
2411 | ||
2412 | tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); | |
2413 | if (tmp) | |
2414 | return -EINVAL; | |
2415 | ||
2416 | tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table); | |
2417 | if (tmp) | |
2418 | return -EINVAL; | |
2419 | ||
2420 | tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table); | |
2421 | if (tmp) | |
2422 | return -EINVAL; | |
2423 | ||
2424 | tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table); | |
2425 | if (tmp) | |
2426 | return -EINVAL; | |
2427 | ||
2428 | tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table); | |
2429 | if (tmp) | |
2430 | return -EINVAL; | |
2431 | ||
2432 | tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table); | |
2433 | if (tmp) | |
2434 | return -EINVAL; | |
2435 | ||
2436 | tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac); | |
2437 | if (tmp) | |
2438 | return -EINVAL; | |
2439 | ||
2440 | tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc); | |
2441 | if (tmp) | |
2442 | return -EINVAL; | |
2443 | ||
2444 | tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table); | |
2445 | if (tmp) | |
2446 | return -EINVAL; | |
2447 | ||
2448 | return 0; | |
2449 | } | |
2450 | ||
2451 | ||
2452 | static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr) | |
2453 | { | |
2454 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2455 | ||
2456 | struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk; | |
2457 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk; | |
2458 | struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk; | |
2459 | ||
2460 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL, | |
1446413f JL |
2461 | "VDDC dependency on SCLK table is missing. This table is mandatory", |
2462 | return -EINVAL); | |
599a7e9f | 2463 | PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1, |
1446413f JL |
2464 | "VDDC dependency on SCLK table has to have is missing. This table is mandatory", |
2465 | return -EINVAL); | |
599a7e9f RZ |
2466 | |
2467 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL, | |
1446413f JL |
2468 | "VDDC dependency on MCLK table is missing. This table is mandatory", |
2469 | return -EINVAL); | |
599a7e9f | 2470 | PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1, |
1446413f JL |
2471 | "VDD dependency on MCLK table has to have is missing. This table is mandatory", |
2472 | return -EINVAL); | |
599a7e9f RZ |
2473 | |
2474 | data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v; | |
2475 | data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; | |
2476 | ||
2477 | hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = | |
2478 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; | |
2479 | hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = | |
2480 | allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk; | |
2481 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = | |
2482 | allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; | |
2483 | ||
2484 | if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) { | |
2485 | data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v; | |
2486 | data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; | |
2487 | } | |
2488 | ||
86457c3b | 2489 | if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1) |
599a7e9f RZ |
2490 | hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v; |
2491 | ||
2492 | return 0; | |
2493 | } | |
2494 | ||
a0aa7046 RZ |
2495 | static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) |
2496 | { | |
ebe02de2 HJ |
2497 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); |
2498 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; | |
ebe02de2 HJ |
2499 | kfree(hwmgr->backend); |
2500 | hwmgr->backend = NULL; | |
a0aa7046 RZ |
2501 | |
2502 | return 0; | |
2503 | } | |
2504 | ||
86457c3b RZ |
2505 | static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr) |
2506 | { | |
2507 | uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id; | |
2508 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2509 | int i; | |
2510 | ||
2511 | if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) { | |
2512 | for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { | |
2513 | virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; | |
2514 | if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci, | |
2515 | virtual_voltage_id, | |
2516 | efuse_voltage_id) == 0) { | |
2517 | if (vddc != 0 && vddc != virtual_voltage_id) { | |
2518 | data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc; | |
2519 | data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id; | |
2520 | data->vddc_leakage.count++; | |
2521 | } | |
2522 | if (vddci != 0 && vddci != virtual_voltage_id) { | |
2523 | data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci; | |
2524 | data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id; | |
2525 | data->vddci_leakage.count++; | |
2526 | } | |
2527 | } | |
2528 | } | |
2529 | } | |
2530 | return 0; | |
2531 | } | |
2532 | ||
f8a4c11b | 2533 | static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
2534 | { |
2535 | struct smu7_hwmgr *data; | |
86457c3b | 2536 | int result = 0; |
599a7e9f RZ |
2537 | |
2538 | data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL); | |
2539 | if (data == NULL) | |
2540 | return -ENOMEM; | |
2541 | ||
2542 | hwmgr->backend = data; | |
599a7e9f RZ |
2543 | smu7_patch_voltage_workaround(hwmgr); |
2544 | smu7_init_dpm_defaults(hwmgr); | |
2545 | ||
2546 | /* Get leakage voltage based on leakage ID. */ | |
86457c3b RZ |
2547 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
2548 | PHM_PlatformCaps_EVV)) { | |
2549 | result = smu7_get_evv_voltages(hwmgr); | |
2550 | if (result) { | |
2551 | pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); | |
2552 | return -EINVAL; | |
2553 | } | |
2554 | } else { | |
2555 | smu7_get_elb_voltages(hwmgr); | |
599a7e9f RZ |
2556 | } |
2557 | ||
2558 | if (hwmgr->pp_table_version == PP_TABLE_V1) { | |
2559 | smu7_complete_dependency_tables(hwmgr); | |
2560 | smu7_set_private_data_based_on_pptable_v1(hwmgr); | |
2561 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { | |
2562 | smu7_patch_dependency_tables_with_leakage(hwmgr); | |
2563 | smu7_set_private_data_based_on_pptable_v0(hwmgr); | |
2564 | } | |
2565 | ||
2566 | /* Initalize Dynamic State Adjustment Rule Settings */ | |
2567 | result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); | |
2568 | ||
2569 | if (0 == result) { | |
ada6770e | 2570 | struct amdgpu_device *adev = hwmgr->adev; |
599a7e9f RZ |
2571 | |
2572 | data->is_tlu_enabled = false; | |
2573 | ||
2574 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = | |
2575 | SMU7_MAX_HARDWARE_POWERLEVELS; | |
2576 | hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; | |
2577 | hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; | |
2578 | ||
ada6770e | 2579 | data->pcie_gen_cap = adev->pm.pcie_gen_mask; |
599a7e9f RZ |
2580 | if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) |
2581 | data->pcie_spc_cap = 20; | |
ada6770e | 2582 | data->pcie_lane_cap = adev->pm.pcie_mlw_mask; |
599a7e9f RZ |
2583 | |
2584 | hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ | |
2585 | /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ | |
2586 | hwmgr->platform_descriptor.clockStep.engineClock = 500; | |
2587 | hwmgr->platform_descriptor.clockStep.memoryClock = 500; | |
2588 | smu7_thermal_parameter_init(hwmgr); | |
2589 | } else { | |
2590 | /* Ignore return value in here, we are cleaning up a mess. */ | |
a0aa7046 | 2591 | smu7_hwmgr_backend_fini(hwmgr); |
599a7e9f RZ |
2592 | } |
2593 | ||
2594 | return 0; | |
2595 | } | |
2596 | ||
2597 | static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr) | |
2598 | { | |
2599 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2600 | uint32_t level, tmp; | |
2601 | ||
2602 | if (!data->pcie_dpm_key_disabled) { | |
2603 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { | |
2604 | level = 0; | |
2605 | tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask; | |
2606 | while (tmp >>= 1) | |
2607 | level++; | |
2608 | ||
2609 | if (level) | |
d3f8c0ab | 2610 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2611 | PPSMC_MSG_PCIeDPM_ForceLevel, level); |
2612 | } | |
2613 | } | |
2614 | ||
2615 | if (!data->sclk_dpm_key_disabled) { | |
2616 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { | |
2617 | level = 0; | |
2618 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; | |
2619 | while (tmp >>= 1) | |
2620 | level++; | |
2621 | ||
2622 | if (level) | |
d3f8c0ab | 2623 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2624 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
2625 | (1 << level)); | |
2626 | } | |
2627 | } | |
2628 | ||
2629 | if (!data->mclk_dpm_key_disabled) { | |
2630 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { | |
2631 | level = 0; | |
2632 | tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask; | |
2633 | while (tmp >>= 1) | |
2634 | level++; | |
2635 | ||
2636 | if (level) | |
d3f8c0ab | 2637 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2638 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
2639 | (1 << level)); | |
2640 | } | |
2641 | } | |
2642 | ||
2643 | return 0; | |
2644 | } | |
2645 | ||
2646 | static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) | |
2647 | { | |
2648 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2649 | ||
2650 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
2651 | phm_apply_dal_min_voltage_request(hwmgr); | |
2652 | /* TO DO for v0 iceland and Ci*/ | |
2653 | ||
2654 | if (!data->sclk_dpm_key_disabled) { | |
2655 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) | |
d3f8c0ab | 2656 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2657 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
2658 | data->dpm_level_enable_mask.sclk_dpm_enable_mask); | |
2659 | } | |
2660 | ||
2661 | if (!data->mclk_dpm_key_disabled) { | |
2662 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) | |
d3f8c0ab | 2663 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2664 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
2665 | data->dpm_level_enable_mask.mclk_dpm_enable_mask); | |
2666 | } | |
2667 | ||
2668 | return 0; | |
2669 | } | |
2670 | ||
2671 | static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr) | |
2672 | { | |
2673 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2674 | ||
2675 | if (!smum_is_dpm_running(hwmgr)) | |
2676 | return -EINVAL; | |
2677 | ||
2678 | if (!data->pcie_dpm_key_disabled) { | |
d3f8c0ab | 2679 | smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
2680 | PPSMC_MSG_PCIeDPM_UnForceLevel); |
2681 | } | |
2682 | ||
2683 | return smu7_upload_dpm_level_enable_mask(hwmgr); | |
2684 | } | |
2685 | ||
2686 | static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr) | |
2687 | { | |
2688 | struct smu7_hwmgr *data = | |
2689 | (struct smu7_hwmgr *)(hwmgr->backend); | |
2690 | uint32_t level; | |
2691 | ||
2692 | if (!data->sclk_dpm_key_disabled) | |
2693 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { | |
2694 | level = phm_get_lowest_enabled_level(hwmgr, | |
2695 | data->dpm_level_enable_mask.sclk_dpm_enable_mask); | |
d3f8c0ab | 2696 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2697 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
2698 | (1 << level)); | |
2699 | ||
2700 | } | |
2701 | ||
2702 | if (!data->mclk_dpm_key_disabled) { | |
2703 | if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) { | |
2704 | level = phm_get_lowest_enabled_level(hwmgr, | |
2705 | data->dpm_level_enable_mask.mclk_dpm_enable_mask); | |
d3f8c0ab | 2706 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2707 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
2708 | (1 << level)); | |
2709 | } | |
2710 | } | |
2711 | ||
2712 | if (!data->pcie_dpm_key_disabled) { | |
2713 | if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) { | |
2714 | level = phm_get_lowest_enabled_level(hwmgr, | |
2715 | data->dpm_level_enable_mask.pcie_dpm_enable_mask); | |
d3f8c0ab | 2716 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
2717 | PPSMC_MSG_PCIeDPM_ForceLevel, |
2718 | (level)); | |
2719 | } | |
2720 | } | |
2721 | ||
2722 | return 0; | |
570272d2 RZ |
2723 | } |
2724 | ||
2725 | static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level, | |
2726 | uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask) | |
2727 | { | |
2728 | uint32_t percentage; | |
2729 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2730 | struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table; | |
2731 | int32_t tmp_mclk; | |
2732 | int32_t tmp_sclk; | |
2733 | int32_t count; | |
2734 | ||
2735 | if (golden_dpm_table->mclk_table.count < 1) | |
2736 | return -EINVAL; | |
2737 | ||
2738 | percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value / | |
2739 | golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; | |
599a7e9f | 2740 | |
570272d2 RZ |
2741 | if (golden_dpm_table->mclk_table.count == 1) { |
2742 | percentage = 70; | |
2743 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value; | |
2744 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; | |
2745 | } else { | |
2746 | tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value; | |
2747 | *mclk_mask = golden_dpm_table->mclk_table.count - 2; | |
2748 | } | |
2749 | ||
2750 | tmp_sclk = tmp_mclk * percentage / 100; | |
2751 | ||
2752 | if (hwmgr->pp_table_version == PP_TABLE_V0) { | |
2753 | for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; | |
2754 | count >= 0; count--) { | |
2755 | if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) { | |
2756 | tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk; | |
2757 | *sclk_mask = count; | |
2758 | break; | |
2759 | } | |
2760 | } | |
dd70949d | 2761 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { |
570272d2 | 2762 | *sclk_mask = 0; |
dd70949d RZ |
2763 | tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk; |
2764 | } | |
570272d2 RZ |
2765 | |
2766 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2767 | *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; | |
2768 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { | |
2769 | struct phm_ppt_v1_information *table_info = | |
2770 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2771 | ||
2772 | for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) { | |
2773 | if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) { | |
2774 | tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk; | |
2775 | *sclk_mask = count; | |
2776 | break; | |
2777 | } | |
2778 | } | |
dd70949d | 2779 | if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { |
570272d2 | 2780 | *sclk_mask = 0; |
dd70949d RZ |
2781 | tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; |
2782 | } | |
570272d2 RZ |
2783 | |
2784 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2785 | *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; | |
2786 | } | |
2787 | ||
2788 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) | |
2789 | *mclk_mask = 0; | |
2790 | else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2791 | *mclk_mask = golden_dpm_table->mclk_table.count - 1; | |
2792 | ||
2793 | *pcie_mask = data->dpm_table.pcie_speed_table.count - 1; | |
dd70949d RZ |
2794 | hwmgr->pstate_sclk = tmp_sclk; |
2795 | hwmgr->pstate_mclk = tmp_mclk; | |
2796 | ||
570272d2 | 2797 | return 0; |
599a7e9f | 2798 | } |
570272d2 | 2799 | |
599a7e9f RZ |
2800 | static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, |
2801 | enum amd_dpm_forced_level level) | |
2802 | { | |
2803 | int ret = 0; | |
570272d2 RZ |
2804 | uint32_t sclk_mask = 0; |
2805 | uint32_t mclk_mask = 0; | |
2806 | uint32_t pcie_mask = 0; | |
599a7e9f | 2807 | |
dd70949d RZ |
2808 | if (hwmgr->pstate_sclk == 0) |
2809 | smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); | |
2810 | ||
599a7e9f RZ |
2811 | switch (level) { |
2812 | case AMD_DPM_FORCED_LEVEL_HIGH: | |
2813 | ret = smu7_force_dpm_highest(hwmgr); | |
599a7e9f RZ |
2814 | break; |
2815 | case AMD_DPM_FORCED_LEVEL_LOW: | |
2816 | ret = smu7_force_dpm_lowest(hwmgr); | |
599a7e9f RZ |
2817 | break; |
2818 | case AMD_DPM_FORCED_LEVEL_AUTO: | |
2819 | ret = smu7_unforce_dpm_levels(hwmgr); | |
599a7e9f | 2820 | break; |
570272d2 RZ |
2821 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: |
2822 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | |
2823 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: | |
2824 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | |
2825 | ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask); | |
2826 | if (ret) | |
2827 | return ret; | |
570272d2 RZ |
2828 | smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); |
2829 | smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); | |
2830 | smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); | |
2831 | break; | |
cb256cc3 | 2832 | case AMD_DPM_FORCED_LEVEL_MANUAL: |
570272d2 | 2833 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: |
599a7e9f RZ |
2834 | default: |
2835 | break; | |
2836 | } | |
2837 | ||
9947f704 RZ |
2838 | if (!ret) { |
2839 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2840 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); | |
2841 | else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) | |
2842 | smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); | |
2843 | } | |
2844 | return ret; | |
599a7e9f RZ |
2845 | } |
2846 | ||
2847 | static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr) | |
2848 | { | |
2849 | return sizeof(struct smu7_power_state); | |
2850 | } | |
2851 | ||
09be4a52 AD |
2852 | static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, |
2853 | uint32_t vblank_time_us) | |
2854 | { | |
2855 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2856 | uint32_t switch_limit_us; | |
2857 | ||
2858 | switch (hwmgr->chip_id) { | |
2859 | case CHIP_POLARIS10: | |
2860 | case CHIP_POLARIS11: | |
2861 | case CHIP_POLARIS12: | |
2862 | switch_limit_us = data->is_memory_gddr5 ? 190 : 150; | |
2863 | break; | |
0c24e7ef EH |
2864 | case CHIP_VEGAM: |
2865 | switch_limit_us = 30; | |
2866 | break; | |
09be4a52 AD |
2867 | default: |
2868 | switch_limit_us = data->is_memory_gddr5 ? 450 : 150; | |
2869 | break; | |
2870 | } | |
2871 | ||
2872 | if (vblank_time_us < switch_limit_us) | |
2873 | return true; | |
2874 | else | |
2875 | return false; | |
2876 | } | |
599a7e9f RZ |
2877 | |
2878 | static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, | |
2879 | struct pp_power_state *request_ps, | |
2880 | const struct pp_power_state *current_ps) | |
2881 | { | |
600ae890 | 2882 | struct amdgpu_device *adev = hwmgr->adev; |
599a7e9f RZ |
2883 | struct smu7_power_state *smu7_ps = |
2884 | cast_phw_smu7_power_state(&request_ps->hardware); | |
2885 | uint32_t sclk; | |
2886 | uint32_t mclk; | |
2887 | struct PP_Clocks minimum_clocks = {0}; | |
2888 | bool disable_mclk_switching; | |
2889 | bool disable_mclk_switching_for_frame_lock; | |
599a7e9f RZ |
2890 | const struct phm_clock_and_voltage_limits *max_limits; |
2891 | uint32_t i; | |
2892 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
2893 | struct phm_ppt_v1_information *table_info = | |
2894 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
2895 | int32_t count; | |
2896 | int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; | |
2897 | ||
2898 | data->battery_state = (PP_StateUILabel_Battery == | |
2899 | request_ps->classification.ui_label); | |
2900 | ||
2901 | PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, | |
2902 | "VI should always have 2 performance levels", | |
2903 | ); | |
2904 | ||
600ae890 | 2905 | max_limits = adev->pm.ac_power ? |
599a7e9f RZ |
2906 | &(hwmgr->dyn_state.max_clock_voltage_on_ac) : |
2907 | &(hwmgr->dyn_state.max_clock_voltage_on_dc); | |
2908 | ||
2909 | /* Cap clock DPM tables at DC MAX if it is in DC. */ | |
600ae890 | 2910 | if (!adev->pm.ac_power) { |
599a7e9f RZ |
2911 | for (i = 0; i < smu7_ps->performance_level_count; i++) { |
2912 | if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk) | |
2913 | smu7_ps->performance_levels[i].memory_clock = max_limits->mclk; | |
2914 | if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk) | |
2915 | smu7_ps->performance_levels[i].engine_clock = max_limits->sclk; | |
2916 | } | |
2917 | } | |
2918 | ||
555fd70c RZ |
2919 | minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; |
2920 | minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; | |
599a7e9f RZ |
2921 | |
2922 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
2923 | PHM_PlatformCaps_StablePState)) { | |
2924 | max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); | |
2925 | stable_pstate_sclk = (max_limits->sclk * 75) / 100; | |
2926 | ||
2927 | for (count = table_info->vdd_dep_on_sclk->count - 1; | |
2928 | count >= 0; count--) { | |
2929 | if (stable_pstate_sclk >= | |
2930 | table_info->vdd_dep_on_sclk->entries[count].clk) { | |
2931 | stable_pstate_sclk = | |
2932 | table_info->vdd_dep_on_sclk->entries[count].clk; | |
2933 | break; | |
2934 | } | |
2935 | } | |
2936 | ||
2937 | if (count < 0) | |
2938 | stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; | |
2939 | ||
2940 | stable_pstate_mclk = max_limits->mclk; | |
2941 | ||
2942 | minimum_clocks.engineClock = stable_pstate_sclk; | |
2943 | minimum_clocks.memoryClock = stable_pstate_mclk; | |
2944 | } | |
2945 | ||
599a7e9f RZ |
2946 | disable_mclk_switching_for_frame_lock = phm_cap_enabled( |
2947 | hwmgr->platform_descriptor.platformCaps, | |
2948 | PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); | |
2949 | ||
2950 | ||
555fd70c | 2951 | if (hwmgr->display_config->num_display == 0) |
a9b3c001 AD |
2952 | disable_mclk_switching = false; |
2953 | else | |
555fd70c | 2954 | disable_mclk_switching = ((1 < hwmgr->display_config->num_display) || |
a9b3c001 | 2955 | disable_mclk_switching_for_frame_lock || |
555fd70c | 2956 | smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); |
599a7e9f RZ |
2957 | |
2958 | sclk = smu7_ps->performance_levels[0].engine_clock; | |
2959 | mclk = smu7_ps->performance_levels[0].memory_clock; | |
2960 | ||
2961 | if (disable_mclk_switching) | |
2962 | mclk = smu7_ps->performance_levels | |
2963 | [smu7_ps->performance_level_count - 1].memory_clock; | |
2964 | ||
2965 | if (sclk < minimum_clocks.engineClock) | |
2966 | sclk = (minimum_clocks.engineClock > max_limits->sclk) ? | |
2967 | max_limits->sclk : minimum_clocks.engineClock; | |
2968 | ||
2969 | if (mclk < minimum_clocks.memoryClock) | |
2970 | mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? | |
2971 | max_limits->mclk : minimum_clocks.memoryClock; | |
2972 | ||
2973 | smu7_ps->performance_levels[0].engine_clock = sclk; | |
2974 | smu7_ps->performance_levels[0].memory_clock = mclk; | |
2975 | ||
2976 | smu7_ps->performance_levels[1].engine_clock = | |
2977 | (smu7_ps->performance_levels[1].engine_clock >= | |
2978 | smu7_ps->performance_levels[0].engine_clock) ? | |
2979 | smu7_ps->performance_levels[1].engine_clock : | |
2980 | smu7_ps->performance_levels[0].engine_clock; | |
2981 | ||
2982 | if (disable_mclk_switching) { | |
2983 | if (mclk < smu7_ps->performance_levels[1].memory_clock) | |
2984 | mclk = smu7_ps->performance_levels[1].memory_clock; | |
2985 | ||
2986 | smu7_ps->performance_levels[0].memory_clock = mclk; | |
2987 | smu7_ps->performance_levels[1].memory_clock = mclk; | |
2988 | } else { | |
2989 | if (smu7_ps->performance_levels[1].memory_clock < | |
2990 | smu7_ps->performance_levels[0].memory_clock) | |
2991 | smu7_ps->performance_levels[1].memory_clock = | |
2992 | smu7_ps->performance_levels[0].memory_clock; | |
2993 | } | |
2994 | ||
2995 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
2996 | PHM_PlatformCaps_StablePState)) { | |
2997 | for (i = 0; i < smu7_ps->performance_level_count; i++) { | |
2998 | smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk; | |
2999 | smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk; | |
3000 | smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max; | |
3001 | smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max; | |
3002 | } | |
3003 | } | |
3004 | return 0; | |
3005 | } | |
3006 | ||
3007 | ||
f93f0c3a | 3008 | static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) |
599a7e9f RZ |
3009 | { |
3010 | struct pp_power_state *ps; | |
3011 | struct smu7_power_state *smu7_ps; | |
3012 | ||
3013 | if (hwmgr == NULL) | |
3014 | return -EINVAL; | |
3015 | ||
3016 | ps = hwmgr->request_ps; | |
3017 | ||
3018 | if (ps == NULL) | |
3019 | return -EINVAL; | |
3020 | ||
3021 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
3022 | ||
3023 | if (low) | |
3024 | return smu7_ps->performance_levels[0].memory_clock; | |
3025 | else | |
3026 | return smu7_ps->performance_levels | |
3027 | [smu7_ps->performance_level_count-1].memory_clock; | |
3028 | } | |
3029 | ||
f93f0c3a | 3030 | static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) |
599a7e9f RZ |
3031 | { |
3032 | struct pp_power_state *ps; | |
3033 | struct smu7_power_state *smu7_ps; | |
3034 | ||
3035 | if (hwmgr == NULL) | |
3036 | return -EINVAL; | |
3037 | ||
3038 | ps = hwmgr->request_ps; | |
3039 | ||
3040 | if (ps == NULL) | |
3041 | return -EINVAL; | |
3042 | ||
3043 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
3044 | ||
3045 | if (low) | |
3046 | return smu7_ps->performance_levels[0].engine_clock; | |
3047 | else | |
3048 | return smu7_ps->performance_levels | |
3049 | [smu7_ps->performance_level_count-1].engine_clock; | |
3050 | } | |
3051 | ||
3052 | static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, | |
3053 | struct pp_hw_power_state *hw_ps) | |
3054 | { | |
3055 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3056 | struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps; | |
3057 | ATOM_FIRMWARE_INFO_V2_2 *fw_info; | |
3058 | uint16_t size; | |
3059 | uint8_t frev, crev; | |
3060 | int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); | |
3061 | ||
3062 | /* First retrieve the Boot clocks and VDDC from the firmware info table. | |
3063 | * We assume here that fw_info is unchanged if this call fails. | |
3064 | */ | |
b3892e2b | 3065 | fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, |
599a7e9f RZ |
3066 | &size, &frev, &crev); |
3067 | if (!fw_info) | |
3068 | /* During a test, there is no firmware info table. */ | |
3069 | return 0; | |
3070 | ||
3071 | /* Patch the state. */ | |
3072 | data->vbios_boot_state.sclk_bootup_value = | |
3073 | le32_to_cpu(fw_info->ulDefaultEngineClock); | |
3074 | data->vbios_boot_state.mclk_bootup_value = | |
3075 | le32_to_cpu(fw_info->ulDefaultMemoryClock); | |
3076 | data->vbios_boot_state.mvdd_bootup_value = | |
3077 | le16_to_cpu(fw_info->usBootUpMVDDCVoltage); | |
3078 | data->vbios_boot_state.vddc_bootup_value = | |
3079 | le16_to_cpu(fw_info->usBootUpVDDCVoltage); | |
3080 | data->vbios_boot_state.vddci_bootup_value = | |
3081 | le16_to_cpu(fw_info->usBootUpVDDCIVoltage); | |
3082 | data->vbios_boot_state.pcie_gen_bootup_value = | |
3083 | smu7_get_current_pcie_speed(hwmgr); | |
3084 | ||
3085 | data->vbios_boot_state.pcie_lane_bootup_value = | |
3086 | (uint16_t)smu7_get_current_pcie_lane_number(hwmgr); | |
3087 | ||
3088 | /* set boot power state */ | |
3089 | ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value; | |
3090 | ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value; | |
3091 | ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value; | |
3092 | ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value; | |
3093 | ||
3094 | return 0; | |
3095 | } | |
3096 | ||
3097 | static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) | |
3098 | { | |
3099 | int result; | |
3100 | unsigned long ret = 0; | |
3101 | ||
3102 | if (hwmgr->pp_table_version == PP_TABLE_V0) { | |
3103 | result = pp_tables_get_num_of_entries(hwmgr, &ret); | |
3104 | return result ? 0 : ret; | |
3105 | } else if (hwmgr->pp_table_version == PP_TABLE_V1) { | |
3106 | result = get_number_of_powerplay_table_entries_v1_0(hwmgr); | |
3107 | return result; | |
3108 | } | |
3109 | return 0; | |
3110 | } | |
3111 | ||
3112 | static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr, | |
3113 | void *state, struct pp_power_state *power_state, | |
3114 | void *pp_table, uint32_t classification_flag) | |
3115 | { | |
3116 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3117 | struct smu7_power_state *smu7_power_state = | |
3118 | (struct smu7_power_state *)(&(power_state->hardware)); | |
3119 | struct smu7_performance_level *performance_level; | |
3120 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; | |
3121 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = | |
3122 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; | |
3123 | PPTable_Generic_SubTable_Header *sclk_dep_table = | |
3124 | (PPTable_Generic_SubTable_Header *) | |
3125 | (((unsigned long)powerplay_table) + | |
3126 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); | |
3127 | ||
3128 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = | |
3129 | (ATOM_Tonga_MCLK_Dependency_Table *) | |
3130 | (((unsigned long)powerplay_table) + | |
3131 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); | |
3132 | ||
3133 | /* The following fields are not initialized here: id orderedList allStatesList */ | |
3134 | power_state->classification.ui_label = | |
3135 | (le16_to_cpu(state_entry->usClassification) & | |
3136 | ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> | |
3137 | ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; | |
3138 | power_state->classification.flags = classification_flag; | |
3139 | /* NOTE: There is a classification2 flag in BIOS that is not being used right now */ | |
3140 | ||
3141 | power_state->classification.temporary_state = false; | |
3142 | power_state->classification.to_be_deleted = false; | |
3143 | ||
3144 | power_state->validation.disallowOnDC = | |
3145 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & | |
3146 | ATOM_Tonga_DISALLOW_ON_DC)); | |
3147 | ||
3148 | power_state->pcie.lanes = 0; | |
3149 | ||
3150 | power_state->display.disableFrameModulation = false; | |
3151 | power_state->display.limitRefreshrate = false; | |
3152 | power_state->display.enableVariBright = | |
3153 | (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) & | |
3154 | ATOM_Tonga_ENABLE_VARIBRIGHT)); | |
3155 | ||
3156 | power_state->validation.supportedPowerLevels = 0; | |
3157 | power_state->uvd_clocks.VCLK = 0; | |
3158 | power_state->uvd_clocks.DCLK = 0; | |
3159 | power_state->temperatures.min = 0; | |
3160 | power_state->temperatures.max = 0; | |
3161 | ||
3162 | performance_level = &(smu7_power_state->performance_levels | |
3163 | [smu7_power_state->performance_level_count++]); | |
3164 | ||
3165 | PP_ASSERT_WITH_CODE( | |
d3f8c0ab | 3166 | (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), |
599a7e9f RZ |
3167 | "Performance levels exceeds SMC limit!", |
3168 | return -EINVAL); | |
3169 | ||
3170 | PP_ASSERT_WITH_CODE( | |
3171 | (smu7_power_state->performance_level_count <= | |
3172 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), | |
3173 | "Performance levels exceeds Driver limit!", | |
3174 | return -EINVAL); | |
3175 | ||
3176 | /* Performance levels are arranged from low to high. */ | |
3177 | performance_level->memory_clock = mclk_dep_table->entries | |
3178 | [state_entry->ucMemoryClockIndexLow].ulMclk; | |
3179 | if (sclk_dep_table->ucRevId == 0) | |
3180 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3181 | [state_entry->ucEngineClockIndexLow].ulSclk; | |
3182 | else if (sclk_dep_table->ucRevId == 1) | |
3183 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3184 | [state_entry->ucEngineClockIndexLow].ulSclk; | |
3185 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | |
3186 | state_entry->ucPCIEGenLow); | |
3187 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, | |
ed54d954 | 3188 | state_entry->ucPCIELaneLow); |
599a7e9f RZ |
3189 | |
3190 | performance_level = &(smu7_power_state->performance_levels | |
3191 | [smu7_power_state->performance_level_count++]); | |
3192 | performance_level->memory_clock = mclk_dep_table->entries | |
3193 | [state_entry->ucMemoryClockIndexHigh].ulMclk; | |
3194 | ||
3195 | if (sclk_dep_table->ucRevId == 0) | |
3196 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3197 | [state_entry->ucEngineClockIndexHigh].ulSclk; | |
3198 | else if (sclk_dep_table->ucRevId == 1) | |
3199 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | |
3200 | [state_entry->ucEngineClockIndexHigh].ulSclk; | |
3201 | ||
3202 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | |
3203 | state_entry->ucPCIEGenHigh); | |
3204 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, | |
3205 | state_entry->ucPCIELaneHigh); | |
3206 | ||
3207 | return 0; | |
3208 | } | |
3209 | ||
3210 | static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr, | |
3211 | unsigned long entry_index, struct pp_power_state *state) | |
3212 | { | |
3213 | int result; | |
3214 | struct smu7_power_state *ps; | |
3215 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3216 | struct phm_ppt_v1_information *table_info = | |
3217 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
3218 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = | |
3219 | table_info->vdd_dep_on_mclk; | |
3220 | ||
3221 | state->hardware.magic = PHM_VIslands_Magic; | |
3222 | ||
3223 | ps = (struct smu7_power_state *)(&state->hardware); | |
3224 | ||
3225 | result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state, | |
3226 | smu7_get_pp_table_entry_callback_func_v1); | |
3227 | ||
3228 | /* This is the earliest time we have all the dependency table and the VBIOS boot state | |
3229 | * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state | |
3230 | * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state | |
3231 | */ | |
3232 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { | |
3233 | if (dep_mclk_table->entries[0].clk != | |
3234 | data->vbios_boot_state.mclk_bootup_value) | |
89c67699 | 3235 | pr_debug("Single MCLK entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3236 | "does not match VBIOS boot MCLK level"); |
3237 | if (dep_mclk_table->entries[0].vddci != | |
3238 | data->vbios_boot_state.vddci_bootup_value) | |
89c67699 | 3239 | pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3240 | "does not match VBIOS boot VDDCI level"); |
3241 | } | |
3242 | ||
3243 | /* set DC compatible flag if this state supports DC */ | |
3244 | if (!state->validation.disallowOnDC) | |
3245 | ps->dc_compatible = true; | |
3246 | ||
3247 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) | |
3248 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; | |
3249 | ||
3250 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; | |
3251 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; | |
3252 | ||
3253 | if (!result) { | |
3254 | uint32_t i; | |
3255 | ||
3256 | switch (state->classification.ui_label) { | |
3257 | case PP_StateUILabel_Performance: | |
3258 | data->use_pcie_performance_levels = true; | |
3259 | for (i = 0; i < ps->performance_level_count; i++) { | |
3260 | if (data->pcie_gen_performance.max < | |
3261 | ps->performance_levels[i].pcie_gen) | |
3262 | data->pcie_gen_performance.max = | |
3263 | ps->performance_levels[i].pcie_gen; | |
3264 | ||
3265 | if (data->pcie_gen_performance.min > | |
3266 | ps->performance_levels[i].pcie_gen) | |
3267 | data->pcie_gen_performance.min = | |
3268 | ps->performance_levels[i].pcie_gen; | |
3269 | ||
3270 | if (data->pcie_lane_performance.max < | |
3271 | ps->performance_levels[i].pcie_lane) | |
3272 | data->pcie_lane_performance.max = | |
3273 | ps->performance_levels[i].pcie_lane; | |
3274 | if (data->pcie_lane_performance.min > | |
3275 | ps->performance_levels[i].pcie_lane) | |
3276 | data->pcie_lane_performance.min = | |
3277 | ps->performance_levels[i].pcie_lane; | |
3278 | } | |
3279 | break; | |
3280 | case PP_StateUILabel_Battery: | |
3281 | data->use_pcie_power_saving_levels = true; | |
3282 | ||
3283 | for (i = 0; i < ps->performance_level_count; i++) { | |
3284 | if (data->pcie_gen_power_saving.max < | |
3285 | ps->performance_levels[i].pcie_gen) | |
3286 | data->pcie_gen_power_saving.max = | |
3287 | ps->performance_levels[i].pcie_gen; | |
3288 | ||
3289 | if (data->pcie_gen_power_saving.min > | |
3290 | ps->performance_levels[i].pcie_gen) | |
3291 | data->pcie_gen_power_saving.min = | |
3292 | ps->performance_levels[i].pcie_gen; | |
3293 | ||
3294 | if (data->pcie_lane_power_saving.max < | |
3295 | ps->performance_levels[i].pcie_lane) | |
3296 | data->pcie_lane_power_saving.max = | |
3297 | ps->performance_levels[i].pcie_lane; | |
3298 | ||
3299 | if (data->pcie_lane_power_saving.min > | |
3300 | ps->performance_levels[i].pcie_lane) | |
3301 | data->pcie_lane_power_saving.min = | |
3302 | ps->performance_levels[i].pcie_lane; | |
3303 | } | |
3304 | break; | |
3305 | default: | |
3306 | break; | |
3307 | } | |
3308 | } | |
3309 | return 0; | |
3310 | } | |
3311 | ||
3312 | static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, | |
3313 | struct pp_hw_power_state *power_state, | |
3314 | unsigned int index, const void *clock_info) | |
3315 | { | |
3316 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3317 | struct smu7_power_state *ps = cast_phw_smu7_power_state(power_state); | |
3318 | const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info; | |
3319 | struct smu7_performance_level *performance_level; | |
3320 | uint32_t engine_clock, memory_clock; | |
3321 | uint16_t pcie_gen_from_bios; | |
3322 | ||
3323 | engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow; | |
3324 | memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow; | |
3325 | ||
3326 | if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) | |
3327 | data->highest_mclk = memory_clock; | |
3328 | ||
599a7e9f | 3329 | PP_ASSERT_WITH_CODE( |
d3f8c0ab | 3330 | (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)), |
599a7e9f RZ |
3331 | "Performance levels exceeds SMC limit!", |
3332 | return -EINVAL); | |
3333 | ||
3334 | PP_ASSERT_WITH_CODE( | |
da7800a8 | 3335 | (ps->performance_level_count < |
599a7e9f | 3336 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
da7800a8 RZ |
3337 | "Performance levels exceeds Driver limit, Skip!", |
3338 | return 0); | |
3339 | ||
3340 | performance_level = &(ps->performance_levels | |
3341 | [ps->performance_level_count++]); | |
599a7e9f RZ |
3342 | |
3343 | /* Performance levels are arranged from low to high. */ | |
3344 | performance_level->memory_clock = memory_clock; | |
3345 | performance_level->engine_clock = engine_clock; | |
3346 | ||
3347 | pcie_gen_from_bios = visland_clk_info->ucPCIEGen; | |
3348 | ||
3349 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios); | |
3350 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane); | |
3351 | ||
3352 | return 0; | |
3353 | } | |
3354 | ||
3355 | static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr, | |
3356 | unsigned long entry_index, struct pp_power_state *state) | |
3357 | { | |
3358 | int result; | |
3359 | struct smu7_power_state *ps; | |
3360 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3361 | struct phm_clock_voltage_dependency_table *dep_mclk_table = | |
3362 | hwmgr->dyn_state.vddci_dependency_on_mclk; | |
3363 | ||
3364 | memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state)); | |
3365 | ||
3366 | state->hardware.magic = PHM_VIslands_Magic; | |
3367 | ||
3368 | ps = (struct smu7_power_state *)(&state->hardware); | |
3369 | ||
3370 | result = pp_tables_get_entry(hwmgr, entry_index, state, | |
3371 | smu7_get_pp_table_entry_callback_func_v0); | |
3372 | ||
3373 | /* | |
3374 | * This is the earliest time we have all the dependency table | |
3375 | * and the VBIOS boot state as | |
3376 | * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot | |
3377 | * state if there is only one VDDCI/MCLK level, check if it's | |
3378 | * the same as VBIOS boot state | |
3379 | */ | |
3380 | if (dep_mclk_table != NULL && dep_mclk_table->count == 1) { | |
3381 | if (dep_mclk_table->entries[0].clk != | |
3382 | data->vbios_boot_state.mclk_bootup_value) | |
89c67699 | 3383 | pr_debug("Single MCLK entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3384 | "does not match VBIOS boot MCLK level"); |
3385 | if (dep_mclk_table->entries[0].v != | |
3386 | data->vbios_boot_state.vddci_bootup_value) | |
89c67699 | 3387 | pr_debug("Single VDDCI entry VDDCI/MCLK dependency table " |
599a7e9f RZ |
3388 | "does not match VBIOS boot VDDCI level"); |
3389 | } | |
3390 | ||
3391 | /* set DC compatible flag if this state supports DC */ | |
3392 | if (!state->validation.disallowOnDC) | |
3393 | ps->dc_compatible = true; | |
3394 | ||
3395 | if (state->classification.flags & PP_StateClassificationFlag_ACPI) | |
3396 | data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen; | |
3397 | ||
3398 | ps->uvd_clks.vclk = state->uvd_clocks.VCLK; | |
3399 | ps->uvd_clks.dclk = state->uvd_clocks.DCLK; | |
3400 | ||
3401 | if (!result) { | |
3402 | uint32_t i; | |
3403 | ||
3404 | switch (state->classification.ui_label) { | |
3405 | case PP_StateUILabel_Performance: | |
3406 | data->use_pcie_performance_levels = true; | |
3407 | ||
3408 | for (i = 0; i < ps->performance_level_count; i++) { | |
3409 | if (data->pcie_gen_performance.max < | |
3410 | ps->performance_levels[i].pcie_gen) | |
3411 | data->pcie_gen_performance.max = | |
3412 | ps->performance_levels[i].pcie_gen; | |
3413 | ||
3414 | if (data->pcie_gen_performance.min > | |
3415 | ps->performance_levels[i].pcie_gen) | |
3416 | data->pcie_gen_performance.min = | |
3417 | ps->performance_levels[i].pcie_gen; | |
3418 | ||
3419 | if (data->pcie_lane_performance.max < | |
3420 | ps->performance_levels[i].pcie_lane) | |
3421 | data->pcie_lane_performance.max = | |
3422 | ps->performance_levels[i].pcie_lane; | |
3423 | ||
3424 | if (data->pcie_lane_performance.min > | |
3425 | ps->performance_levels[i].pcie_lane) | |
3426 | data->pcie_lane_performance.min = | |
3427 | ps->performance_levels[i].pcie_lane; | |
3428 | } | |
3429 | break; | |
3430 | case PP_StateUILabel_Battery: | |
3431 | data->use_pcie_power_saving_levels = true; | |
3432 | ||
3433 | for (i = 0; i < ps->performance_level_count; i++) { | |
3434 | if (data->pcie_gen_power_saving.max < | |
3435 | ps->performance_levels[i].pcie_gen) | |
3436 | data->pcie_gen_power_saving.max = | |
3437 | ps->performance_levels[i].pcie_gen; | |
3438 | ||
3439 | if (data->pcie_gen_power_saving.min > | |
3440 | ps->performance_levels[i].pcie_gen) | |
3441 | data->pcie_gen_power_saving.min = | |
3442 | ps->performance_levels[i].pcie_gen; | |
3443 | ||
3444 | if (data->pcie_lane_power_saving.max < | |
3445 | ps->performance_levels[i].pcie_lane) | |
3446 | data->pcie_lane_power_saving.max = | |
3447 | ps->performance_levels[i].pcie_lane; | |
3448 | ||
3449 | if (data->pcie_lane_power_saving.min > | |
3450 | ps->performance_levels[i].pcie_lane) | |
3451 | data->pcie_lane_power_saving.min = | |
3452 | ps->performance_levels[i].pcie_lane; | |
3453 | } | |
3454 | break; | |
3455 | default: | |
3456 | break; | |
3457 | } | |
3458 | } | |
3459 | return 0; | |
3460 | } | |
3461 | ||
3462 | static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |
3463 | unsigned long entry_index, struct pp_power_state *state) | |
3464 | { | |
3465 | if (hwmgr->pp_table_version == PP_TABLE_V0) | |
3466 | return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state); | |
3467 | else if (hwmgr->pp_table_version == PP_TABLE_V1) | |
3468 | return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state); | |
3469 | ||
3470 | return 0; | |
3471 | } | |
3472 | ||
5b79d048 | 3473 | static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) |
2245b60f | 3474 | { |
b89c71d1 | 3475 | int i; |
5b79d048 | 3476 | u32 tmp = 0; |
b89c71d1 RZ |
3477 | |
3478 | if (!query) | |
3479 | return -EINVAL; | |
3480 | ||
b89c71d1 | 3481 | smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); |
5b79d048 | 3482 | tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
9e70b539 | 3483 | *query = tmp; |
b89c71d1 | 3484 | |
5b79d048 | 3485 | if (tmp != 0) |
b89c71d1 RZ |
3486 | return 0; |
3487 | ||
3488 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); | |
3489 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
3490 | ixSMU_PM_STATUS_94, 0); | |
3491 | ||
5b79d048 | 3492 | for (i = 0; i < 10; i++) { |
b89c71d1 RZ |
3493 | mdelay(1); |
3494 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); | |
5b79d048 | 3495 | tmp = cgs_read_ind_register(hwmgr->device, |
b89c71d1 RZ |
3496 | CGS_IND_REG__SMC, |
3497 | ixSMU_PM_STATUS_94); | |
5b79d048 | 3498 | if (tmp != 0) |
b89c71d1 RZ |
3499 | break; |
3500 | } | |
5b79d048 | 3501 | *query = tmp; |
2245b60f EH |
3502 | |
3503 | return 0; | |
3504 | } | |
3505 | ||
9f8df7d7 TSD |
3506 | static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, |
3507 | void *value, int *size) | |
a6e36952 TSD |
3508 | { |
3509 | uint32_t sclk, mclk, activity_percent; | |
84877256 | 3510 | uint32_t offset, val_vid; |
a6e36952 TSD |
3511 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
3512 | ||
9f8df7d7 TSD |
3513 | /* size must be at least 4 bytes for all sensors */ |
3514 | if (*size < 4) | |
3515 | return -EINVAL; | |
3516 | ||
a6e36952 TSD |
3517 | switch (idx) { |
3518 | case AMDGPU_PP_SENSOR_GFX_SCLK: | |
d3f8c0ab | 3519 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); |
a6e36952 | 3520 | sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
cd7b0c66 | 3521 | *((uint32_t *)value) = sclk; |
9f8df7d7 | 3522 | *size = 4; |
a6e36952 TSD |
3523 | return 0; |
3524 | case AMDGPU_PP_SENSOR_GFX_MCLK: | |
d3f8c0ab | 3525 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); |
a6e36952 | 3526 | mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
cd7b0c66 | 3527 | *((uint32_t *)value) = mclk; |
9f8df7d7 | 3528 | *size = 4; |
a6e36952 TSD |
3529 | return 0; |
3530 | case AMDGPU_PP_SENSOR_GPU_LOAD: | |
d3f8c0ab | 3531 | offset = data->soft_regs_start + smum_get_offsetof(hwmgr, |
a6e36952 TSD |
3532 | SMU_SoftRegisters, |
3533 | AverageGraphicsActivity); | |
3534 | ||
3535 | activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset); | |
3536 | activity_percent += 0x80; | |
3537 | activity_percent >>= 8; | |
cd7b0c66 | 3538 | *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; |
9f8df7d7 | 3539 | *size = 4; |
a6e36952 TSD |
3540 | return 0; |
3541 | case AMDGPU_PP_SENSOR_GPU_TEMP: | |
cd7b0c66 | 3542 | *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr); |
9f8df7d7 | 3543 | *size = 4; |
a6e36952 | 3544 | return 0; |
3de4ec57 | 3545 | case AMDGPU_PP_SENSOR_UVD_POWER: |
cd7b0c66 | 3546 | *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; |
9f8df7d7 | 3547 | *size = 4; |
3de4ec57 TSD |
3548 | return 0; |
3549 | case AMDGPU_PP_SENSOR_VCE_POWER: | |
cd7b0c66 | 3550 | *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; |
9f8df7d7 | 3551 | *size = 4; |
3de4ec57 | 3552 | return 0; |
2245b60f | 3553 | case AMDGPU_PP_SENSOR_GPU_POWER: |
5b79d048 | 3554 | return smu7_get_gpu_power(hwmgr, (uint32_t *)value); |
84877256 RZ |
3555 | case AMDGPU_PP_SENSOR_VDDGFX: |
3556 | if ((data->vr_config & 0xff) == 0x2) | |
3557 | val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, | |
3558 | CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID); | |
3559 | else | |
3560 | val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, | |
3561 | CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID); | |
3562 | ||
3563 | *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid); | |
3564 | return 0; | |
a6e36952 TSD |
3565 | default: |
3566 | return -EINVAL; | |
3567 | } | |
3568 | } | |
3569 | ||
599a7e9f RZ |
3570 | static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) |
3571 | { | |
3572 | const struct phm_set_power_state_input *states = | |
3573 | (const struct phm_set_power_state_input *)input; | |
3574 | const struct smu7_power_state *smu7_ps = | |
3575 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3576 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3577 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
3578 | uint32_t sclk = smu7_ps->performance_levels | |
3579 | [smu7_ps->performance_level_count - 1].engine_clock; | |
3580 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
3581 | uint32_t mclk = smu7_ps->performance_levels | |
3582 | [smu7_ps->performance_level_count - 1].memory_clock; | |
3583 | struct PP_Clocks min_clocks = {0}; | |
3584 | uint32_t i; | |
599a7e9f | 3585 | |
599a7e9f RZ |
3586 | for (i = 0; i < sclk_table->count; i++) { |
3587 | if (sclk == sclk_table->dpm_levels[i].value) | |
3588 | break; | |
3589 | } | |
3590 | ||
17c7c7e7 | 3591 | if (i >= sclk_table->count) { |
599a7e9f | 3592 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; |
17c7c7e7 RZ |
3593 | sclk_table->dpm_levels[i-1].value = sclk; |
3594 | } else { | |
599a7e9f RZ |
3595 | /* TODO: Check SCLK in DAL's minimum clocks |
3596 | * in case DeepSleep divider update is required. | |
3597 | */ | |
3598 | if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR && | |
3599 | (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK || | |
3600 | data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) | |
3601 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; | |
3602 | } | |
3603 | ||
3604 | for (i = 0; i < mclk_table->count; i++) { | |
3605 | if (mclk == mclk_table->dpm_levels[i].value) | |
3606 | break; | |
3607 | } | |
3608 | ||
17c7c7e7 | 3609 | if (i >= mclk_table->count) { |
599a7e9f | 3610 | data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; |
17c7c7e7 RZ |
3611 | mclk_table->dpm_levels[i-1].value = mclk; |
3612 | } | |
599a7e9f | 3613 | |
555fd70c | 3614 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
599a7e9f RZ |
3615 | data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; |
3616 | ||
3617 | return 0; | |
3618 | } | |
3619 | ||
3620 | static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr, | |
3621 | const struct smu7_power_state *smu7_ps) | |
3622 | { | |
3623 | uint32_t i; | |
3624 | uint32_t sclk, max_sclk = 0; | |
3625 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3626 | struct smu7_dpm_table *dpm_table = &data->dpm_table; | |
3627 | ||
3628 | for (i = 0; i < smu7_ps->performance_level_count; i++) { | |
3629 | sclk = smu7_ps->performance_levels[i].engine_clock; | |
3630 | if (max_sclk < sclk) | |
3631 | max_sclk = sclk; | |
3632 | } | |
3633 | ||
3634 | for (i = 0; i < dpm_table->sclk_table.count; i++) { | |
3635 | if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk) | |
3636 | return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ? | |
3637 | dpm_table->pcie_speed_table.dpm_levels | |
3638 | [dpm_table->pcie_speed_table.count - 1].value : | |
3639 | dpm_table->pcie_speed_table.dpm_levels[i].value); | |
3640 | } | |
3641 | ||
3642 | return 0; | |
3643 | } | |
3644 | ||
3645 | static int smu7_request_link_speed_change_before_state_change( | |
3646 | struct pp_hwmgr *hwmgr, const void *input) | |
3647 | { | |
3648 | const struct phm_set_power_state_input *states = | |
3649 | (const struct phm_set_power_state_input *)input; | |
3650 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3651 | const struct smu7_power_state *smu7_nps = | |
3652 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3653 | const struct smu7_power_state *polaris10_cps = | |
3654 | cast_const_phw_smu7_power_state(states->pcurrent_state); | |
3655 | ||
3656 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps); | |
3657 | uint16_t current_link_speed; | |
3658 | ||
3659 | if (data->force_pcie_gen == PP_PCIEGenInvalid) | |
3660 | current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps); | |
3661 | else | |
3662 | current_link_speed = data->force_pcie_gen; | |
3663 | ||
3664 | data->force_pcie_gen = PP_PCIEGenInvalid; | |
3665 | data->pspp_notify_required = false; | |
3666 | ||
3667 | if (target_link_speed > current_link_speed) { | |
3668 | switch (target_link_speed) { | |
37a94791 | 3669 | #ifdef CONFIG_ACPI |
599a7e9f | 3670 | case PP_PCIEGen3: |
e1deba28 | 3671 | if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false)) |
599a7e9f RZ |
3672 | break; |
3673 | data->force_pcie_gen = PP_PCIEGen2; | |
3674 | if (current_link_speed == PP_PCIEGen2) | |
3675 | break; | |
3676 | case PP_PCIEGen2: | |
e1deba28 | 3677 | if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false)) |
599a7e9f | 3678 | break; |
37a94791 | 3679 | #endif |
599a7e9f RZ |
3680 | default: |
3681 | data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr); | |
3682 | break; | |
3683 | } | |
3684 | } else { | |
3685 | if (target_link_speed < current_link_speed) | |
3686 | data->pspp_notify_required = true; | |
3687 | } | |
3688 | ||
3689 | return 0; | |
3690 | } | |
3691 | ||
3692 | static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
3693 | { | |
3694 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3695 | ||
3696 | if (0 == data->need_update_smu7_dpm_table) | |
3697 | return 0; | |
3698 | ||
3699 | if ((0 == data->sclk_dpm_key_disabled) && | |
3700 | (data->need_update_smu7_dpm_table & | |
3701 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | |
3702 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3703 | "Trying to freeze SCLK DPM when DPM is disabled", | |
3704 | ); | |
d3f8c0ab | 3705 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
3706 | PPSMC_MSG_SCLKDPM_FreezeLevel), |
3707 | "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", | |
3708 | return -EINVAL); | |
3709 | } | |
3710 | ||
3711 | if ((0 == data->mclk_dpm_key_disabled) && | |
3712 | (data->need_update_smu7_dpm_table & | |
3713 | DPMTABLE_OD_UPDATE_MCLK)) { | |
3714 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3715 | "Trying to freeze MCLK DPM when DPM is disabled", | |
3716 | ); | |
d3f8c0ab | 3717 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
3718 | PPSMC_MSG_MCLKDPM_FreezeLevel), |
3719 | "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", | |
3720 | return -EINVAL); | |
3721 | } | |
3722 | ||
3723 | return 0; | |
3724 | } | |
3725 | ||
3726 | static int smu7_populate_and_upload_sclk_mclk_dpm_levels( | |
3727 | struct pp_hwmgr *hwmgr, const void *input) | |
3728 | { | |
3729 | int result = 0; | |
599a7e9f | 3730 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
599a7e9f | 3731 | struct smu7_dpm_table *dpm_table = &data->dpm_table; |
49fd66e5 RZ |
3732 | uint32_t count; |
3733 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); | |
3734 | struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); | |
3735 | struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); | |
599a7e9f RZ |
3736 | |
3737 | if (0 == data->need_update_smu7_dpm_table) | |
3738 | return 0; | |
3739 | ||
49fd66e5 RZ |
3740 | if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) { |
3741 | for (count = 0; count < dpm_table->sclk_table.count; count++) { | |
3742 | dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled; | |
3743 | dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock; | |
599a7e9f RZ |
3744 | } |
3745 | } | |
3746 | ||
49fd66e5 RZ |
3747 | if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { |
3748 | for (count = 0; count < dpm_table->mclk_table.count; count++) { | |
3749 | dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled; | |
3750 | dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock; | |
599a7e9f RZ |
3751 | } |
3752 | } | |
3753 | ||
3754 | if (data->need_update_smu7_dpm_table & | |
3755 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) { | |
3756 | result = smum_populate_all_graphic_levels(hwmgr); | |
3757 | PP_ASSERT_WITH_CODE((0 == result), | |
3758 | "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", | |
3759 | return result); | |
3760 | } | |
3761 | ||
3762 | if (data->need_update_smu7_dpm_table & | |
3763 | (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { | |
3764 | /*populate MCLK dpm table to SMU7 */ | |
3765 | result = smum_populate_all_memory_levels(hwmgr); | |
3766 | PP_ASSERT_WITH_CODE((0 == result), | |
3767 | "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", | |
3768 | return result); | |
3769 | } | |
3770 | ||
3771 | return result; | |
3772 | } | |
3773 | ||
3774 | static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr, | |
3775 | struct smu7_single_dpm_table *dpm_table, | |
3776 | uint32_t low_limit, uint32_t high_limit) | |
3777 | { | |
3778 | uint32_t i; | |
3779 | ||
3780 | for (i = 0; i < dpm_table->count; i++) { | |
ecfee95a RZ |
3781 | /*skip the trim if od is enabled*/ |
3782 | if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit | |
3783 | || dpm_table->dpm_levels[i].value > high_limit)) | |
599a7e9f RZ |
3784 | dpm_table->dpm_levels[i].enabled = false; |
3785 | else | |
3786 | dpm_table->dpm_levels[i].enabled = true; | |
3787 | } | |
3788 | ||
3789 | return 0; | |
3790 | } | |
3791 | ||
3792 | static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, | |
3793 | const struct smu7_power_state *smu7_ps) | |
3794 | { | |
3795 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3796 | uint32_t high_limit_count; | |
3797 | ||
3798 | PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1), | |
3799 | "power state did not have any performance level", | |
3800 | return -EINVAL); | |
3801 | ||
3802 | high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1; | |
3803 | ||
3804 | smu7_trim_single_dpm_states(hwmgr, | |
3805 | &(data->dpm_table.sclk_table), | |
3806 | smu7_ps->performance_levels[0].engine_clock, | |
3807 | smu7_ps->performance_levels[high_limit_count].engine_clock); | |
3808 | ||
3809 | smu7_trim_single_dpm_states(hwmgr, | |
3810 | &(data->dpm_table.mclk_table), | |
3811 | smu7_ps->performance_levels[0].memory_clock, | |
3812 | smu7_ps->performance_levels[high_limit_count].memory_clock); | |
3813 | ||
3814 | return 0; | |
3815 | } | |
3816 | ||
3817 | static int smu7_generate_dpm_level_enable_mask( | |
3818 | struct pp_hwmgr *hwmgr, const void *input) | |
3819 | { | |
5c16f36f | 3820 | int result = 0; |
599a7e9f RZ |
3821 | const struct phm_set_power_state_input *states = |
3822 | (const struct phm_set_power_state_input *)input; | |
3823 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3824 | const struct smu7_power_state *smu7_ps = | |
3825 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3826 | ||
5c16f36f | 3827 | |
ecfee95a | 3828 | result = smu7_trim_dpm_states(hwmgr, smu7_ps); |
599a7e9f RZ |
3829 | if (result) |
3830 | return result; | |
3831 | ||
3832 | data->dpm_level_enable_mask.sclk_dpm_enable_mask = | |
3833 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table); | |
3834 | data->dpm_level_enable_mask.mclk_dpm_enable_mask = | |
3835 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table); | |
3836 | data->dpm_level_enable_mask.pcie_dpm_enable_mask = | |
3837 | phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table); | |
3838 | ||
3839 | return 0; | |
3840 | } | |
3841 | ||
3842 | static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |
3843 | { | |
3844 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3845 | ||
3846 | if (0 == data->need_update_smu7_dpm_table) | |
3847 | return 0; | |
3848 | ||
3849 | if ((0 == data->sclk_dpm_key_disabled) && | |
3850 | (data->need_update_smu7_dpm_table & | |
3851 | (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { | |
3852 | ||
3853 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3854 | "Trying to Unfreeze SCLK DPM when DPM is disabled", | |
3855 | ); | |
d3f8c0ab | 3856 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
599a7e9f RZ |
3857 | PPSMC_MSG_SCLKDPM_UnfreezeLevel), |
3858 | "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", | |
3859 | return -EINVAL); | |
3860 | } | |
3861 | ||
3862 | if ((0 == data->mclk_dpm_key_disabled) && | |
3863 | (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { | |
3864 | ||
3865 | PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr), | |
3866 | "Trying to Unfreeze MCLK DPM when DPM is disabled", | |
3867 | ); | |
d3f8c0ab | 3868 | PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, |
fd78e6af | 3869 | PPSMC_MSG_MCLKDPM_UnfreezeLevel), |
599a7e9f RZ |
3870 | "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", |
3871 | return -EINVAL); | |
3872 | } | |
3873 | ||
49fd66e5 | 3874 | data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; |
599a7e9f RZ |
3875 | |
3876 | return 0; | |
3877 | } | |
3878 | ||
3879 | static int smu7_notify_link_speed_change_after_state_change( | |
3880 | struct pp_hwmgr *hwmgr, const void *input) | |
3881 | { | |
3882 | const struct phm_set_power_state_input *states = | |
3883 | (const struct phm_set_power_state_input *)input; | |
3884 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3885 | const struct smu7_power_state *smu7_ps = | |
3886 | cast_const_phw_smu7_power_state(states->pnew_state); | |
3887 | uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps); | |
3888 | uint8_t request; | |
3889 | ||
3890 | if (data->pspp_notify_required) { | |
3891 | if (target_link_speed == PP_PCIEGen3) | |
3892 | request = PCIE_PERF_REQ_GEN3; | |
3893 | else if (target_link_speed == PP_PCIEGen2) | |
3894 | request = PCIE_PERF_REQ_GEN2; | |
3895 | else | |
3896 | request = PCIE_PERF_REQ_GEN1; | |
3897 | ||
3898 | if (request == PCIE_PERF_REQ_GEN1 && | |
3899 | smu7_get_current_pcie_speed(hwmgr) > 0) | |
3900 | return 0; | |
3901 | ||
62ccb653 | 3902 | #ifdef CONFIG_ACPI |
e1deba28 | 3903 | if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) { |
599a7e9f | 3904 | if (PP_PCIEGen2 == target_link_speed) |
b5c11b8e | 3905 | pr_info("PSPP request to switch to Gen2 from Gen3 Failed!"); |
599a7e9f | 3906 | else |
b5c11b8e | 3907 | pr_info("PSPP request to switch to Gen1 from Gen2 Failed!"); |
599a7e9f | 3908 | } |
62ccb653 | 3909 | #endif |
599a7e9f RZ |
3910 | } |
3911 | ||
3912 | return 0; | |
3913 | } | |
3914 | ||
3915 | static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) | |
3916 | { | |
3917 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3918 | ||
0c24e7ef EH |
3919 | if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { |
3920 | if (hwmgr->chip_id == CHIP_VEGAM) | |
3921 | smum_send_msg_to_smc_with_parameter(hwmgr, | |
3922 | (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2); | |
3923 | else | |
3924 | smum_send_msg_to_smc_with_parameter(hwmgr, | |
3925 | (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); | |
3926 | } | |
1756f1bb | 3927 | return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; |
599a7e9f RZ |
3928 | } |
3929 | ||
3930 | static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) | |
3931 | { | |
3932 | int tmp_result, result = 0; | |
3933 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
3934 | ||
3935 | tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input); | |
3936 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3937 | "Failed to find DPM states clocks in DPM table!", | |
3938 | result = tmp_result); | |
3939 | ||
3940 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
3941 | PHM_PlatformCaps_PCIEPerformanceRequest)) { | |
3942 | tmp_result = | |
3943 | smu7_request_link_speed_change_before_state_change(hwmgr, input); | |
3944 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3945 | "Failed to request link speed change before state change!", | |
3946 | result = tmp_result); | |
3947 | } | |
3948 | ||
3949 | tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr); | |
3950 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3951 | "Failed to freeze SCLK MCLK DPM!", result = tmp_result); | |
3952 | ||
3953 | tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); | |
3954 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3955 | "Failed to populate and upload SCLK MCLK DPM levels!", | |
3956 | result = tmp_result); | |
3957 | ||
3c9d1fde RZ |
3958 | tmp_result = smu7_update_avfs(hwmgr); |
3959 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3960 | "Failed to update avfs voltages!", | |
3961 | result = tmp_result); | |
3962 | ||
599a7e9f RZ |
3963 | tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input); |
3964 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3965 | "Failed to generate DPM level enabled mask!", | |
3966 | result = tmp_result); | |
3967 | ||
3968 | tmp_result = smum_update_sclk_threshold(hwmgr); | |
3969 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3970 | "Failed to update SCLK threshold!", | |
3971 | result = tmp_result); | |
3972 | ||
3973 | tmp_result = smu7_notify_smc_display(hwmgr); | |
3974 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3975 | "Failed to notify smc display settings!", | |
3976 | result = tmp_result); | |
3977 | ||
3978 | tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr); | |
3979 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3980 | "Failed to unfreeze SCLK MCLK DPM!", | |
3981 | result = tmp_result); | |
3982 | ||
3983 | tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr); | |
3984 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3985 | "Failed to upload DPM level enabled mask!", | |
3986 | result = tmp_result); | |
3987 | ||
3988 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
3989 | PHM_PlatformCaps_PCIEPerformanceRequest)) { | |
3990 | tmp_result = | |
3991 | smu7_notify_link_speed_change_after_state_change(hwmgr, input); | |
3992 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
3993 | "Failed to notify link speed change after state change!", | |
3994 | result = tmp_result); | |
3995 | } | |
3996 | data->apply_optimized_settings = false; | |
3997 | return result; | |
3998 | } | |
3999 | ||
4000 | static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm) | |
4001 | { | |
4002 | hwmgr->thermal_controller. | |
4003 | advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm; | |
4004 | ||
d3f8c0ab | 4005 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
4006 | PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm); |
4007 | } | |
4008 | ||
f8a4c11b BX |
4009 | static int |
4010 | smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) | |
599a7e9f RZ |
4011 | { |
4012 | PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay; | |
4013 | ||
d3f8c0ab | 4014 | return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1; |
599a7e9f RZ |
4015 | } |
4016 | ||
f8a4c11b BX |
4017 | static int |
4018 | smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) | |
599a7e9f | 4019 | { |
555fd70c RZ |
4020 | if (hwmgr->display_config->num_display > 1 && |
4021 | !hwmgr->display_config->multi_monitor_in_sync) | |
599a7e9f RZ |
4022 | smu7_notify_smc_display_change(hwmgr, false); |
4023 | ||
4024 | return 0; | |
4025 | } | |
4026 | ||
4027 | /** | |
4028 | * Programs the display gap | |
4029 | * | |
4030 | * @param hwmgr the address of the powerplay hardware manager. | |
4031 | * @return always OK | |
4032 | */ | |
f8a4c11b | 4033 | static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4034 | { |
4035 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
599a7e9f RZ |
4036 | uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); |
4037 | uint32_t display_gap2; | |
4038 | uint32_t pre_vbi_time_in_us; | |
4039 | uint32_t frame_time_in_us; | |
555fd70c | 4040 | uint32_t ref_clock, refresh_rate; |
599a7e9f | 4041 | |
555fd70c | 4042 | display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); |
599a7e9f RZ |
4043 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); |
4044 | ||
2538090c | 4045 | ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); |
555fd70c | 4046 | refresh_rate = hwmgr->display_config->vrefresh; |
599a7e9f RZ |
4047 | |
4048 | if (0 == refresh_rate) | |
4049 | refresh_rate = 60; | |
4050 | ||
4051 | frame_time_in_us = 1000000 / refresh_rate; | |
4052 | ||
555fd70c | 4053 | pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; |
8b95f4f7 | 4054 | |
599a7e9f RZ |
4055 | data->frame_time_x2 = frame_time_in_us * 2 / 100; |
4056 | ||
4057 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); | |
4058 | ||
4059 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2); | |
4060 | ||
4061 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
d3f8c0ab | 4062 | data->soft_regs_start + smum_get_offsetof(hwmgr, |
599a7e9f RZ |
4063 | SMU_SoftRegisters, |
4064 | PreVBlankGap), 0x64); | |
4065 | ||
4066 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
d3f8c0ab | 4067 | data->soft_regs_start + smum_get_offsetof(hwmgr, |
599a7e9f RZ |
4068 | SMU_SoftRegisters, |
4069 | VBlankTimeout), | |
4070 | (frame_time_in_us - pre_vbi_time_in_us)); | |
4071 | ||
4072 | return 0; | |
4073 | } | |
4074 | ||
f8a4c11b | 4075 | static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4076 | { |
4077 | return smu7_program_display_gap(hwmgr); | |
4078 | } | |
4079 | ||
4080 | /** | |
4081 | * Set maximum target operating fan output RPM | |
4082 | * | |
4083 | * @param hwmgr: the address of the powerplay hardware manager. | |
4084 | * @param usMaxFanRpm: max operating fan RPM value. | |
4085 | * @return The response that came from the SMC. | |
4086 | */ | |
4087 | static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm) | |
4088 | { | |
4089 | hwmgr->thermal_controller. | |
4090 | advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm; | |
4091 | ||
d3f8c0ab | 4092 | return smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
4093 | PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm); |
4094 | } | |
4095 | ||
031ec948 RZ |
4096 | static const struct amdgpu_irq_src_funcs smu7_irq_funcs = { |
4097 | .process = phm_irq_process, | |
4098 | }; | |
4099 | ||
4d200372 | 4100 | static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) |
599a7e9f | 4101 | { |
031ec948 RZ |
4102 | struct amdgpu_irq_src *source = |
4103 | kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); | |
4104 | ||
4105 | if (!source) | |
4106 | return -ENOMEM; | |
4107 | ||
4108 | source->funcs = &smu7_irq_funcs; | |
4109 | ||
4110 | amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), | |
1ffdeca6 | 4111 | AMDGPU_IRQ_CLIENTID_LEGACY, |
091aec0b | 4112 | VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, |
031ec948 RZ |
4113 | source); |
4114 | amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), | |
1ffdeca6 | 4115 | AMDGPU_IRQ_CLIENTID_LEGACY, |
091aec0b | 4116 | VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, |
031ec948 RZ |
4117 | source); |
4118 | ||
4119 | /* Register CTF(GPIO_19) interrupt */ | |
4120 | amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), | |
1ffdeca6 | 4121 | AMDGPU_IRQ_CLIENTID_LEGACY, |
091aec0b | 4122 | VISLANDS30_IV_SRCID_GPIO_19, |
031ec948 RZ |
4123 | source); |
4124 | ||
599a7e9f RZ |
4125 | return 0; |
4126 | } | |
4127 | ||
f8a4c11b BX |
4128 | static bool |
4129 | smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) | |
599a7e9f RZ |
4130 | { |
4131 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4132 | bool is_update_required = false; | |
599a7e9f | 4133 | |
555fd70c | 4134 | if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) |
599a7e9f RZ |
4135 | is_update_required = true; |
4136 | ||
ec2e082a AD |
4137 | if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh) |
4138 | is_update_required = true; | |
4139 | ||
599a7e9f | 4140 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { |
555fd70c | 4141 | if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && |
599a7e9f | 4142 | (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || |
555fd70c | 4143 | hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) |
599a7e9f RZ |
4144 | is_update_required = true; |
4145 | } | |
4146 | return is_update_required; | |
4147 | } | |
4148 | ||
4149 | static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1, | |
4150 | const struct smu7_performance_level *pl2) | |
4151 | { | |
4152 | return ((pl1->memory_clock == pl2->memory_clock) && | |
4153 | (pl1->engine_clock == pl2->engine_clock) && | |
4154 | (pl1->pcie_gen == pl2->pcie_gen) && | |
4155 | (pl1->pcie_lane == pl2->pcie_lane)); | |
4156 | } | |
4157 | ||
f8a4c11b BX |
4158 | static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, |
4159 | const struct pp_hw_power_state *pstate1, | |
4160 | const struct pp_hw_power_state *pstate2, bool *equal) | |
599a7e9f | 4161 | { |
9faa6b02 RZ |
4162 | const struct smu7_power_state *psa; |
4163 | const struct smu7_power_state *psb; | |
599a7e9f | 4164 | int i; |
49fd66e5 | 4165 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); |
599a7e9f RZ |
4166 | |
4167 | if (pstate1 == NULL || pstate2 == NULL || equal == NULL) | |
4168 | return -EINVAL; | |
4169 | ||
9faa6b02 RZ |
4170 | psa = cast_const_phw_smu7_power_state(pstate1); |
4171 | psb = cast_const_phw_smu7_power_state(pstate2); | |
599a7e9f RZ |
4172 | /* If the two states don't even have the same number of performance levels they cannot be the same state. */ |
4173 | if (psa->performance_level_count != psb->performance_level_count) { | |
4174 | *equal = false; | |
4175 | return 0; | |
4176 | } | |
4177 | ||
4178 | for (i = 0; i < psa->performance_level_count; i++) { | |
4179 | if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { | |
4180 | /* If we have found even one performance level pair that is different the states are different. */ | |
4181 | *equal = false; | |
4182 | return 0; | |
4183 | } | |
4184 | } | |
4185 | ||
4186 | /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ | |
4187 | *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); | |
4188 | *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); | |
4189 | *equal &= (psa->sclk_threshold == psb->sclk_threshold); | |
49fd66e5 RZ |
4190 | /* For OD call, set value based on flag */ |
4191 | *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | | |
4192 | DPMTABLE_OD_UPDATE_MCLK | | |
4193 | DPMTABLE_OD_UPDATE_VDDC)); | |
599a7e9f RZ |
4194 | |
4195 | return 0; | |
4196 | } | |
4197 | ||
9da00630 | 4198 | static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4199 | { |
4200 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4201 | ||
4202 | uint32_t vbios_version; | |
4203 | uint32_t tmp; | |
4204 | ||
4205 | /* Read MC indirect register offset 0x9F bits [3:0] to see | |
4206 | * if VBIOS has already loaded a full version of MC ucode | |
4207 | * or not. | |
4208 | */ | |
4209 | ||
4210 | smu7_get_mc_microcode_version(hwmgr); | |
4211 | vbios_version = hwmgr->microcode_version_info.MC & 0xf; | |
4212 | ||
4213 | data->need_long_memory_training = false; | |
4214 | ||
4215 | cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, | |
4216 | ixMC_IO_DEBUG_UP_13); | |
4217 | tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA); | |
4218 | ||
4219 | if (tmp & (1 << 23)) { | |
4220 | data->mem_latency_high = MEM_LATENCY_HIGH; | |
4221 | data->mem_latency_low = MEM_LATENCY_LOW; | |
4222 | } else { | |
4223 | data->mem_latency_high = 330; | |
4224 | data->mem_latency_low = 330; | |
4225 | } | |
4226 | ||
4227 | return 0; | |
4228 | } | |
4229 | ||
4230 | static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) | |
4231 | { | |
4232 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4233 | ||
4234 | data->clock_registers.vCG_SPLL_FUNC_CNTL = | |
4235 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL); | |
4236 | data->clock_registers.vCG_SPLL_FUNC_CNTL_2 = | |
4237 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2); | |
4238 | data->clock_registers.vCG_SPLL_FUNC_CNTL_3 = | |
4239 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3); | |
4240 | data->clock_registers.vCG_SPLL_FUNC_CNTL_4 = | |
4241 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4); | |
4242 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM = | |
4243 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM); | |
4244 | data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 = | |
4245 | cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2); | |
4246 | data->clock_registers.vDLL_CNTL = | |
4247 | cgs_read_register(hwmgr->device, mmDLL_CNTL); | |
4248 | data->clock_registers.vMCLK_PWRMGT_CNTL = | |
4249 | cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL); | |
4250 | data->clock_registers.vMPLL_AD_FUNC_CNTL = | |
4251 | cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL); | |
4252 | data->clock_registers.vMPLL_DQ_FUNC_CNTL = | |
4253 | cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL); | |
4254 | data->clock_registers.vMPLL_FUNC_CNTL = | |
4255 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL); | |
4256 | data->clock_registers.vMPLL_FUNC_CNTL_1 = | |
4257 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1); | |
4258 | data->clock_registers.vMPLL_FUNC_CNTL_2 = | |
4259 | cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2); | |
4260 | data->clock_registers.vMPLL_SS1 = | |
4261 | cgs_read_register(hwmgr->device, mmMPLL_SS1); | |
4262 | data->clock_registers.vMPLL_SS2 = | |
4263 | cgs_read_register(hwmgr->device, mmMPLL_SS2); | |
4264 | return 0; | |
4265 | ||
4266 | } | |
4267 | ||
4268 | /** | |
4269 | * Find out if memory is GDDR5. | |
4270 | * | |
4271 | * @param hwmgr the address of the powerplay hardware manager. | |
4272 | * @return always 0 | |
4273 | */ | |
4274 | static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) | |
4275 | { | |
4276 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
c73a3626 | 4277 | struct amdgpu_device *adev = hwmgr->adev; |
599a7e9f | 4278 | |
c73a3626 | 4279 | data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); |
599a7e9f RZ |
4280 | |
4281 | return 0; | |
4282 | } | |
4283 | ||
4284 | /** | |
4285 | * Enables Dynamic Power Management by SMC | |
4286 | * | |
4287 | * @param hwmgr the address of the powerplay hardware manager. | |
4288 | * @return always 0 | |
4289 | */ | |
4290 | static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr) | |
4291 | { | |
4292 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, | |
4293 | GENERAL_PWRMGT, STATIC_PM_EN, 1); | |
4294 | ||
4295 | return 0; | |
4296 | } | |
4297 | ||
4298 | /** | |
4299 | * Initialize PowerGating States for different engines | |
4300 | * | |
4301 | * @param hwmgr the address of the powerplay hardware manager. | |
4302 | * @return always 0 | |
4303 | */ | |
4304 | static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr) | |
4305 | { | |
4306 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4307 | ||
4308 | data->uvd_power_gated = false; | |
4309 | data->vce_power_gated = false; | |
599a7e9f RZ |
4310 | |
4311 | return 0; | |
4312 | } | |
4313 | ||
4314 | static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr) | |
4315 | { | |
4316 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4317 | ||
4318 | data->low_sclk_interrupt_threshold = 0; | |
4319 | return 0; | |
4320 | } | |
4321 | ||
f8a4c11b | 4322 | static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
4323 | { |
4324 | int tmp_result, result = 0; | |
4325 | ||
9da00630 | 4326 | smu7_check_mc_firmware(hwmgr); |
599a7e9f RZ |
4327 | |
4328 | tmp_result = smu7_read_clock_registers(hwmgr); | |
4329 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4330 | "Failed to read clock registers!", result = tmp_result); | |
4331 | ||
4332 | tmp_result = smu7_get_memory_type(hwmgr); | |
4333 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4334 | "Failed to get memory type!", result = tmp_result); | |
4335 | ||
4336 | tmp_result = smu7_enable_acpi_power_management(hwmgr); | |
4337 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4338 | "Failed to enable ACPI power management!", result = tmp_result); | |
4339 | ||
4340 | tmp_result = smu7_init_power_gate_state(hwmgr); | |
4341 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4342 | "Failed to init power gate state!", result = tmp_result); | |
4343 | ||
4344 | tmp_result = smu7_get_mc_microcode_version(hwmgr); | |
4345 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4346 | "Failed to get MC microcode version!", result = tmp_result); | |
4347 | ||
4348 | tmp_result = smu7_init_sclk_threshold(hwmgr); | |
4349 | PP_ASSERT_WITH_CODE((0 == tmp_result), | |
4350 | "Failed to init sclk threshold!", result = tmp_result); | |
4351 | ||
4352 | return result; | |
4353 | } | |
4354 | ||
4355 | static int smu7_force_clock_level(struct pp_hwmgr *hwmgr, | |
4356 | enum pp_clock_type type, uint32_t mask) | |
4357 | { | |
4358 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4359 | ||
61e208b1 RZ |
4360 | if (mask == 0) |
4361 | return -EINVAL; | |
4362 | ||
599a7e9f RZ |
4363 | switch (type) { |
4364 | case PP_SCLK: | |
4365 | if (!data->sclk_dpm_key_disabled) | |
d3f8c0ab | 4366 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
4367 | PPSMC_MSG_SCLKDPM_SetEnabledMask, |
4368 | data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); | |
4369 | break; | |
4370 | case PP_MCLK: | |
4371 | if (!data->mclk_dpm_key_disabled) | |
d3f8c0ab | 4372 | smum_send_msg_to_smc_with_parameter(hwmgr, |
599a7e9f RZ |
4373 | PPSMC_MSG_MCLKDPM_SetEnabledMask, |
4374 | data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); | |
4375 | break; | |
4376 | case PP_PCIE: | |
4377 | { | |
4378 | uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask; | |
599a7e9f | 4379 | |
61e208b1 RZ |
4380 | if (!data->pcie_dpm_key_disabled) { |
4381 | if (fls(tmp) != ffs(tmp)) | |
4382 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel); | |
4383 | else | |
4384 | smum_send_msg_to_smc_with_parameter(hwmgr, | |
599a7e9f | 4385 | PPSMC_MSG_PCIeDPM_ForceLevel, |
61e208b1 RZ |
4386 | fls(tmp) - 1); |
4387 | } | |
599a7e9f RZ |
4388 | break; |
4389 | } | |
4390 | default: | |
4391 | break; | |
4392 | } | |
4393 | ||
4394 | return 0; | |
4395 | } | |
4396 | ||
4397 | static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, | |
4398 | enum pp_clock_type type, char *buf) | |
4399 | { | |
4400 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4401 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
4402 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
4403 | struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table); | |
6df21b77 RZ |
4404 | struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table); |
4405 | struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels); | |
4406 | struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels); | |
599a7e9f RZ |
4407 | int i, now, size = 0; |
4408 | uint32_t clock, pcie_speed; | |
4409 | ||
4410 | switch (type) { | |
4411 | case PP_SCLK: | |
d3f8c0ab | 4412 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency); |
599a7e9f RZ |
4413 | clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
4414 | ||
4415 | for (i = 0; i < sclk_table->count; i++) { | |
4416 | if (clock > sclk_table->dpm_levels[i].value) | |
4417 | continue; | |
4418 | break; | |
4419 | } | |
4420 | now = i; | |
4421 | ||
4422 | for (i = 0; i < sclk_table->count; i++) | |
4423 | size += sprintf(buf + size, "%d: %uMhz %s\n", | |
4424 | i, sclk_table->dpm_levels[i].value / 100, | |
4425 | (i == now) ? "*" : ""); | |
4426 | break; | |
4427 | case PP_MCLK: | |
d3f8c0ab | 4428 | smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency); |
599a7e9f RZ |
4429 | clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); |
4430 | ||
4431 | for (i = 0; i < mclk_table->count; i++) { | |
4432 | if (clock > mclk_table->dpm_levels[i].value) | |
4433 | continue; | |
4434 | break; | |
4435 | } | |
4436 | now = i; | |
4437 | ||
4438 | for (i = 0; i < mclk_table->count; i++) | |
4439 | size += sprintf(buf + size, "%d: %uMhz %s\n", | |
4440 | i, mclk_table->dpm_levels[i].value / 100, | |
4441 | (i == now) ? "*" : ""); | |
4442 | break; | |
4443 | case PP_PCIE: | |
4444 | pcie_speed = smu7_get_current_pcie_speed(hwmgr); | |
4445 | for (i = 0; i < pcie_table->count; i++) { | |
4446 | if (pcie_speed != pcie_table->dpm_levels[i].value) | |
4447 | continue; | |
4448 | break; | |
4449 | } | |
4450 | now = i; | |
4451 | ||
4452 | for (i = 0; i < pcie_table->count; i++) | |
4453 | size += sprintf(buf + size, "%d: %s %s\n", i, | |
7413d2fa EQ |
4454 | (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : |
4455 | (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : | |
4456 | (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", | |
599a7e9f RZ |
4457 | (i == now) ? "*" : ""); |
4458 | break; | |
6df21b77 RZ |
4459 | case OD_SCLK: |
4460 | if (hwmgr->od_enabled) { | |
a3c991f9 | 4461 | size = sprintf(buf, "%s:\n", "OD_SCLK"); |
6df21b77 | 4462 | for (i = 0; i < odn_sclk_table->num_of_pl; i++) |
a3c991f9 RZ |
4463 | size += sprintf(buf + size, "%d: %10uMHz %10umV\n", |
4464 | i, odn_sclk_table->entries[i].clock/100, | |
6df21b77 RZ |
4465 | odn_sclk_table->entries[i].vddc); |
4466 | } | |
4467 | break; | |
4468 | case OD_MCLK: | |
4469 | if (hwmgr->od_enabled) { | |
a3c991f9 | 4470 | size = sprintf(buf, "%s:\n", "OD_MCLK"); |
6df21b77 | 4471 | for (i = 0; i < odn_mclk_table->num_of_pl; i++) |
a3c991f9 RZ |
4472 | size += sprintf(buf + size, "%d: %10uMHz %10umV\n", |
4473 | i, odn_mclk_table->entries[i].clock/100, | |
6df21b77 RZ |
4474 | odn_mclk_table->entries[i].vddc); |
4475 | } | |
4476 | break; | |
a3c991f9 RZ |
4477 | case OD_RANGE: |
4478 | if (hwmgr->od_enabled) { | |
4479 | size = sprintf(buf, "%s:\n", "OD_RANGE"); | |
4480 | size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", | |
4481 | data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, | |
4482 | hwmgr->platform_descriptor.overdriveLimit.engineClock/100); | |
4483 | size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", | |
4484 | data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, | |
4485 | hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); | |
4486 | size += sprintf(buf + size, "VDDC: %7umV %11umV\n", | |
4487 | data->odn_dpm_table.min_vddc, | |
4488 | data->odn_dpm_table.max_vddc); | |
4489 | } | |
4490 | break; | |
599a7e9f RZ |
4491 | default: |
4492 | break; | |
4493 | } | |
4494 | return size; | |
4495 | } | |
4496 | ||
f93f0c3a | 4497 | static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) |
599a7e9f | 4498 | { |
2fde9ab2 RZ |
4499 | switch (mode) { |
4500 | case AMD_FAN_CTRL_NONE: | |
f93f0c3a | 4501 | smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); |
2fde9ab2 RZ |
4502 | break; |
4503 | case AMD_FAN_CTRL_MANUAL: | |
4504 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | |
4505 | PHM_PlatformCaps_MicrocodeFanControl)) | |
f93f0c3a | 4506 | smu7_fan_ctrl_stop_smc_fan_control(hwmgr); |
2fde9ab2 RZ |
4507 | break; |
4508 | case AMD_FAN_CTRL_AUTO: | |
f93f0c3a RZ |
4509 | if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode)) |
4510 | smu7_fan_ctrl_start_smc_fan_control(hwmgr); | |
2fde9ab2 RZ |
4511 | break; |
4512 | default: | |
4513 | break; | |
4514 | } | |
599a7e9f RZ |
4515 | } |
4516 | ||
f93f0c3a | 4517 | static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr) |
599a7e9f | 4518 | { |
2fde9ab2 | 4519 | return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL; |
599a7e9f RZ |
4520 | } |
4521 | ||
4522 | static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) | |
4523 | { | |
4524 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4525 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
4526 | struct smu7_single_dpm_table *golden_sclk_table = | |
4527 | &(data->golden_dpm_table.sclk_table); | |
a4233cc9 GJ |
4528 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
4529 | int golden_value = golden_sclk_table->dpm_levels | |
4530 | [golden_sclk_table->count - 1].value; | |
599a7e9f | 4531 | |
a4233cc9 GJ |
4532 | value -= golden_value; |
4533 | value = DIV_ROUND_UP(value * 100, golden_value); | |
599a7e9f RZ |
4534 | |
4535 | return value; | |
4536 | } | |
4537 | ||
4538 | static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) | |
4539 | { | |
4540 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4541 | struct smu7_single_dpm_table *golden_sclk_table = | |
4542 | &(data->golden_dpm_table.sclk_table); | |
4543 | struct pp_power_state *ps; | |
4544 | struct smu7_power_state *smu7_ps; | |
4545 | ||
4546 | if (value > 20) | |
4547 | value = 20; | |
4548 | ||
4549 | ps = hwmgr->request_ps; | |
4550 | ||
4551 | if (ps == NULL) | |
4552 | return -EINVAL; | |
4553 | ||
4554 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
4555 | ||
4556 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock = | |
4557 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * | |
4558 | value / 100 + | |
4559 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; | |
4560 | ||
4561 | return 0; | |
4562 | } | |
4563 | ||
4564 | static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) | |
4565 | { | |
4566 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4567 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
4568 | struct smu7_single_dpm_table *golden_mclk_table = | |
4569 | &(data->golden_dpm_table.mclk_table); | |
a4233cc9 GJ |
4570 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
4571 | int golden_value = golden_mclk_table->dpm_levels | |
4572 | [golden_mclk_table->count - 1].value; | |
599a7e9f | 4573 | |
a4233cc9 GJ |
4574 | value -= golden_value; |
4575 | value = DIV_ROUND_UP(value * 100, golden_value); | |
599a7e9f RZ |
4576 | |
4577 | return value; | |
4578 | } | |
4579 | ||
4580 | static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) | |
4581 | { | |
4582 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4583 | struct smu7_single_dpm_table *golden_mclk_table = | |
4584 | &(data->golden_dpm_table.mclk_table); | |
4585 | struct pp_power_state *ps; | |
4586 | struct smu7_power_state *smu7_ps; | |
4587 | ||
4588 | if (value > 20) | |
4589 | value = 20; | |
4590 | ||
4591 | ps = hwmgr->request_ps; | |
4592 | ||
4593 | if (ps == NULL) | |
4594 | return -EINVAL; | |
4595 | ||
4596 | smu7_ps = cast_phw_smu7_power_state(&ps->hardware); | |
4597 | ||
4598 | smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock = | |
4599 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * | |
4600 | value / 100 + | |
4601 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; | |
4602 | ||
4603 | return 0; | |
4604 | } | |
4605 | ||
4606 | ||
4607 | static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |
4608 | { | |
4609 | struct phm_ppt_v1_information *table_info = | |
4610 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
954e6bee RZ |
4611 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; |
4612 | struct phm_clock_voltage_dependency_table *sclk_table; | |
599a7e9f RZ |
4613 | int i; |
4614 | ||
954e6bee RZ |
4615 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
4616 | if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) | |
4617 | return -EINVAL; | |
4618 | dep_sclk_table = table_info->vdd_dep_on_sclk; | |
4d8d44c6 | 4619 | for (i = 0; i < dep_sclk_table->count; i++) |
c3cb424a | 4620 | clocks->clock[i] = dep_sclk_table->entries[i].clk * 10; |
4d8d44c6 | 4621 | clocks->count = dep_sclk_table->count; |
954e6bee RZ |
4622 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
4623 | sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; | |
4d8d44c6 | 4624 | for (i = 0; i < sclk_table->count; i++) |
c3cb424a | 4625 | clocks->clock[i] = sclk_table->entries[i].clk * 10; |
4d8d44c6 | 4626 | clocks->count = sclk_table->count; |
599a7e9f | 4627 | } |
954e6bee | 4628 | |
599a7e9f RZ |
4629 | return 0; |
4630 | } | |
4631 | ||
4632 | static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk) | |
4633 | { | |
4634 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4635 | ||
4636 | if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY) | |
4637 | return data->mem_latency_high; | |
4638 | else if (clk >= MEM_FREQ_HIGH_LATENCY) | |
4639 | return data->mem_latency_low; | |
4640 | else | |
4641 | return MEM_LATENCY_ERR; | |
4642 | } | |
4643 | ||
4644 | static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) | |
4645 | { | |
4646 | struct phm_ppt_v1_information *table_info = | |
4647 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
4648 | struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; | |
4649 | int i; | |
954e6bee | 4650 | struct phm_clock_voltage_dependency_table *mclk_table; |
599a7e9f | 4651 | |
954e6bee RZ |
4652 | if (hwmgr->pp_table_version == PP_TABLE_V1) { |
4653 | if (table_info == NULL) | |
4654 | return -EINVAL; | |
4655 | dep_mclk_table = table_info->vdd_dep_on_mclk; | |
4656 | for (i = 0; i < dep_mclk_table->count; i++) { | |
c3cb424a | 4657 | clocks->clock[i] = dep_mclk_table->entries[i].clk * 10; |
954e6bee | 4658 | clocks->latency[i] = smu7_get_mem_latency(hwmgr, |
599a7e9f | 4659 | dep_mclk_table->entries[i].clk); |
954e6bee | 4660 | } |
4d8d44c6 | 4661 | clocks->count = dep_mclk_table->count; |
954e6bee RZ |
4662 | } else if (hwmgr->pp_table_version == PP_TABLE_V0) { |
4663 | mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; | |
4d8d44c6 | 4664 | for (i = 0; i < mclk_table->count; i++) |
c3cb424a | 4665 | clocks->clock[i] = mclk_table->entries[i].clk * 10; |
4d8d44c6 | 4666 | clocks->count = mclk_table->count; |
599a7e9f RZ |
4667 | } |
4668 | return 0; | |
4669 | } | |
4670 | ||
4671 | static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, | |
4672 | struct amd_pp_clocks *clocks) | |
4673 | { | |
4674 | switch (type) { | |
4675 | case amd_pp_sys_clock: | |
4676 | smu7_get_sclks(hwmgr, clocks); | |
4677 | break; | |
4678 | case amd_pp_mem_clock: | |
4679 | smu7_get_mclks(hwmgr, clocks); | |
4680 | break; | |
4681 | default: | |
4682 | return -EINVAL; | |
4683 | } | |
4684 | ||
4685 | return 0; | |
4686 | } | |
4687 | ||
26f52781 RZ |
4688 | static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr, |
4689 | uint32_t virtual_addr_low, | |
4690 | uint32_t virtual_addr_hi, | |
4691 | uint32_t mc_addr_low, | |
4692 | uint32_t mc_addr_hi, | |
4693 | uint32_t size) | |
4694 | { | |
4695 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4696 | ||
4697 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
4698 | data->soft_regs_start + | |
4699 | smum_get_offsetof(hwmgr, | |
4700 | SMU_SoftRegisters, DRAM_LOG_ADDR_H), | |
4701 | mc_addr_hi); | |
4702 | ||
4703 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
4704 | data->soft_regs_start + | |
4705 | smum_get_offsetof(hwmgr, | |
4706 | SMU_SoftRegisters, DRAM_LOG_ADDR_L), | |
4707 | mc_addr_low); | |
4708 | ||
4709 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
4710 | data->soft_regs_start + | |
4711 | smum_get_offsetof(hwmgr, | |
4712 | SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H), | |
4713 | virtual_addr_hi); | |
4714 | ||
4715 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
4716 | data->soft_regs_start + | |
4717 | smum_get_offsetof(hwmgr, | |
4718 | SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L), | |
4719 | virtual_addr_low); | |
4720 | ||
4721 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | |
4722 | data->soft_regs_start + | |
4723 | smum_get_offsetof(hwmgr, | |
4724 | SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE), | |
4725 | size); | |
4726 | return 0; | |
4727 | } | |
4728 | ||
ad8cec7d RZ |
4729 | static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr, |
4730 | struct amd_pp_simple_clock_info *clocks) | |
4731 | { | |
4732 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4733 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | |
4734 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | |
4735 | ||
4736 | if (clocks == NULL) | |
4737 | return -EINVAL; | |
4738 | ||
4739 | clocks->memory_max_clock = mclk_table->count > 1 ? | |
4740 | mclk_table->dpm_levels[mclk_table->count-1].value : | |
4741 | mclk_table->dpm_levels[0].value; | |
4742 | clocks->engine_max_clock = sclk_table->count > 1 ? | |
4743 | sclk_table->dpm_levels[sclk_table->count-1].value : | |
4744 | sclk_table->dpm_levels[0].value; | |
4745 | return 0; | |
4746 | } | |
4747 | ||
4ba08257 EQ |
4748 | static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, |
4749 | struct PP_TemperatureRange *thermal_data) | |
4750 | { | |
4751 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4752 | struct phm_ppt_v1_information *table_info = | |
4753 | (struct phm_ppt_v1_information *)hwmgr->pptable; | |
4754 | ||
4755 | memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange)); | |
4756 | ||
4757 | if (hwmgr->pp_table_version == PP_TABLE_V1) | |
4758 | thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp * | |
4759 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; | |
4760 | else if (hwmgr->pp_table_version == PP_TABLE_V0) | |
4761 | thermal_data->max = data->thermal_temp_setting.temperature_shutdown * | |
4762 | PP_TEMPERATURE_UNITS_PER_CENTIGRADES; | |
4763 | ||
4764 | return 0; | |
4765 | } | |
4766 | ||
5e4d4fbe RZ |
4767 | static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, |
4768 | enum PP_OD_DPM_TABLE_COMMAND type, | |
4769 | uint32_t clk, | |
4770 | uint32_t voltage) | |
4771 | { | |
4772 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4773 | ||
d389d607 RZ |
4774 | if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { |
4775 | pr_info("OD voltage is out of range [%d - %d] mV\n", | |
4776 | data->odn_dpm_table.min_vddc, | |
4777 | data->odn_dpm_table.max_vddc); | |
5e4d4fbe RZ |
4778 | return false; |
4779 | } | |
4780 | ||
4781 | if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { | |
d389d607 | 4782 | if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || |
5e4d4fbe RZ |
4783 | hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { |
4784 | pr_info("OD engine clock is out of range [%d - %d] MHz\n", | |
d389d607 RZ |
4785 | data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, |
4786 | hwmgr->platform_descriptor.overdriveLimit.engineClock/100); | |
5e4d4fbe RZ |
4787 | return false; |
4788 | } | |
4789 | } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { | |
d389d607 | 4790 | if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || |
5e4d4fbe RZ |
4791 | hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { |
4792 | pr_info("OD memory clock is out of range [%d - %d] MHz\n", | |
d389d607 RZ |
4793 | data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, |
4794 | hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); | |
5e4d4fbe RZ |
4795 | return false; |
4796 | } | |
4797 | } else { | |
4798 | return false; | |
4799 | } | |
4800 | ||
4801 | return true; | |
4802 | } | |
4803 | ||
5e4d4fbe RZ |
4804 | static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, |
4805 | enum PP_OD_DPM_TABLE_COMMAND type, | |
4806 | long *input, uint32_t size) | |
4807 | { | |
4808 | uint32_t i; | |
4809 | struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL; | |
4810 | struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL; | |
4811 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4812 | ||
4813 | uint32_t input_clk; | |
4814 | uint32_t input_vol; | |
4815 | uint32_t input_level; | |
4816 | ||
4817 | PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", | |
4818 | return -EINVAL); | |
4819 | ||
4820 | if (!hwmgr->od_enabled) { | |
4821 | pr_info("OverDrive feature not enabled\n"); | |
4822 | return -EINVAL; | |
4823 | } | |
4824 | ||
4825 | if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { | |
4826 | podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels; | |
4827 | podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk; | |
4828 | PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), | |
4829 | "Failed to get ODN SCLK and Voltage tables", | |
4830 | return -EINVAL); | |
4831 | } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { | |
4832 | podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels; | |
4833 | podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk; | |
4834 | ||
4835 | PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend), | |
4836 | "Failed to get ODN MCLK and Voltage tables", | |
4837 | return -EINVAL); | |
4838 | } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { | |
4839 | smu7_odn_initial_default_setting(hwmgr); | |
4840 | return 0; | |
4841 | } else if (PP_OD_COMMIT_DPM_TABLE == type) { | |
4842 | smu7_check_dpm_table_updated(hwmgr); | |
4843 | return 0; | |
4844 | } else { | |
4845 | return -EINVAL; | |
4846 | } | |
4847 | ||
4848 | for (i = 0; i < size; i += 3) { | |
4849 | if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) { | |
4850 | pr_info("invalid clock voltage input \n"); | |
4851 | return 0; | |
4852 | } | |
4853 | input_level = input[i]; | |
4854 | input_clk = input[i+1] * 100; | |
4855 | input_vol = input[i+2]; | |
4856 | ||
4857 | if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { | |
4858 | podn_dpm_table_in_backend->entries[input_level].clock = input_clk; | |
4859 | podn_vdd_dep_in_backend->entries[input_level].clk = input_clk; | |
4860 | podn_dpm_table_in_backend->entries[input_level].vddc = input_vol; | |
4861 | podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol; | |
e51ee68f | 4862 | podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol; |
5e4d4fbe RZ |
4863 | } else { |
4864 | return -EINVAL; | |
4865 | } | |
4866 | } | |
4867 | ||
4868 | return 0; | |
4869 | } | |
4870 | ||
5d24af84 RZ |
4871 | static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) |
4872 | { | |
4873 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4874 | uint32_t i, size = 0; | |
4875 | uint32_t len; | |
4876 | ||
4877 | static const char *profile_name[6] = {"3D_FULL_SCREEN", | |
4878 | "POWER_SAVING", | |
4879 | "VIDEO", | |
4880 | "VR", | |
4881 | "COMPUTE", | |
4882 | "CUSTOM"}; | |
4883 | ||
4884 | static const char *title[8] = {"NUM", | |
4885 | "MODE_NAME", | |
4886 | "SCLK_UP_HYST", | |
4887 | "SCLK_DOWN_HYST", | |
4888 | "SCLK_ACTIVE_LEVEL", | |
4889 | "MCLK_UP_HYST", | |
4890 | "MCLK_DOWN_HYST", | |
4891 | "MCLK_ACTIVE_LEVEL"}; | |
4892 | ||
4893 | if (!buf) | |
4894 | return -EINVAL; | |
4895 | ||
4896 | size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n", | |
4897 | title[0], title[1], title[2], title[3], | |
4898 | title[4], title[5], title[6], title[7]); | |
4899 | ||
4900 | len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); | |
4901 | ||
4902 | for (i = 0; i < len; i++) { | |
4aa8c41b RZ |
4903 | if (i == hwmgr->power_profile_mode) { |
4904 | size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", | |
4905 | i, profile_name[i], "*", | |
4906 | data->current_profile_setting.sclk_up_hyst, | |
4907 | data->current_profile_setting.sclk_down_hyst, | |
4908 | data->current_profile_setting.sclk_activity, | |
4909 | data->current_profile_setting.mclk_up_hyst, | |
4910 | data->current_profile_setting.mclk_down_hyst, | |
4911 | data->current_profile_setting.mclk_activity); | |
4912 | continue; | |
4913 | } | |
5d24af84 RZ |
4914 | if (smu7_profiling[i].bupdate_sclk) |
4915 | size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", | |
4916 | i, profile_name[i], smu7_profiling[i].sclk_up_hyst, | |
4917 | smu7_profiling[i].sclk_down_hyst, | |
4918 | smu7_profiling[i].sclk_activity); | |
4919 | else | |
4920 | size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ", | |
4921 | i, profile_name[i], "-", "-", "-"); | |
4922 | ||
4923 | if (smu7_profiling[i].bupdate_mclk) | |
4924 | size += sprintf(buf + size, "%16d %16d %16d\n", | |
4925 | smu7_profiling[i].mclk_up_hyst, | |
4926 | smu7_profiling[i].mclk_down_hyst, | |
4927 | smu7_profiling[i].mclk_activity); | |
4928 | else | |
4929 | size += sprintf(buf + size, "%16s %16s %16s\n", | |
4930 | "-", "-", "-"); | |
4931 | } | |
4932 | ||
5d24af84 RZ |
4933 | return size; |
4934 | } | |
4935 | ||
180a8beb RZ |
4936 | static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr, |
4937 | enum PP_SMC_POWER_PROFILE requst) | |
4938 | { | |
4939 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4940 | uint32_t tmp, level; | |
4941 | ||
4942 | if (requst == PP_SMC_POWER_PROFILE_COMPUTE) { | |
4943 | if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) { | |
4944 | level = 0; | |
4945 | tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask; | |
4946 | while (tmp >>= 1) | |
4947 | level++; | |
4948 | if (level > 0) | |
4949 | smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1)); | |
4950 | } | |
4951 | } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) { | |
4952 | smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask); | |
4953 | } | |
4954 | } | |
4955 | ||
5d24af84 RZ |
4956 | static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) |
4957 | { | |
4958 | struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); | |
4959 | struct profile_mode_setting tmp; | |
4960 | enum PP_SMC_POWER_PROFILE mode; | |
4961 | ||
4962 | if (input == NULL) | |
4963 | return -EINVAL; | |
4964 | ||
4965 | mode = input[size]; | |
4966 | switch (mode) { | |
4967 | case PP_SMC_POWER_PROFILE_CUSTOM: | |
4968 | if (size < 8) | |
4969 | return -EINVAL; | |
4970 | ||
4aa8c41b RZ |
4971 | tmp.bupdate_sclk = input[0]; |
4972 | tmp.sclk_up_hyst = input[1]; | |
4973 | tmp.sclk_down_hyst = input[2]; | |
4974 | tmp.sclk_activity = input[3]; | |
4975 | tmp.bupdate_mclk = input[4]; | |
4976 | tmp.mclk_up_hyst = input[5]; | |
4977 | tmp.mclk_down_hyst = input[6]; | |
4978 | tmp.mclk_activity = input[7]; | |
4979 | if (!smum_update_dpm_settings(hwmgr, &tmp)) { | |
4980 | memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); | |
5d24af84 RZ |
4981 | hwmgr->power_profile_mode = mode; |
4982 | } | |
4983 | break; | |
4984 | case PP_SMC_POWER_PROFILE_FULLSCREEN3D: | |
4985 | case PP_SMC_POWER_PROFILE_POWERSAVING: | |
4986 | case PP_SMC_POWER_PROFILE_VIDEO: | |
4987 | case PP_SMC_POWER_PROFILE_VR: | |
4988 | case PP_SMC_POWER_PROFILE_COMPUTE: | |
4989 | if (mode == hwmgr->power_profile_mode) | |
4990 | return 0; | |
4991 | ||
4992 | memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting)); | |
4993 | if (!smum_update_dpm_settings(hwmgr, &tmp)) { | |
4994 | if (tmp.bupdate_sclk) { | |
4995 | data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk; | |
4996 | data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst; | |
4997 | data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst; | |
4998 | data->current_profile_setting.sclk_activity = tmp.sclk_activity; | |
4999 | } | |
5000 | if (tmp.bupdate_mclk) { | |
5001 | data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk; | |
5002 | data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst; | |
5003 | data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst; | |
5004 | data->current_profile_setting.mclk_activity = tmp.mclk_activity; | |
5005 | } | |
180a8beb | 5006 | smu7_patch_compute_profile_mode(hwmgr, mode); |
5d24af84 RZ |
5007 | hwmgr->power_profile_mode = mode; |
5008 | } | |
5009 | break; | |
5010 | default: | |
5011 | return -EINVAL; | |
5012 | } | |
5013 | ||
5014 | return 0; | |
5015 | } | |
5e4d4fbe | 5016 | |
f688b614 RZ |
5017 | static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, |
5018 | PHM_PerformanceLevelDesignation designation, uint32_t index, | |
5019 | PHM_PerformanceLevel *level) | |
5020 | { | |
5021 | const struct smu7_power_state *ps; | |
5022 | struct smu7_hwmgr *data; | |
5023 | uint32_t i; | |
5024 | ||
5025 | if (level == NULL || hwmgr == NULL || state == NULL) | |
5026 | return -EINVAL; | |
5027 | ||
5028 | data = hwmgr->backend; | |
5029 | ps = cast_const_phw_smu7_power_state(state); | |
5030 | ||
5031 | i = index > ps->performance_level_count - 1 ? | |
5032 | ps->performance_level_count - 1 : index; | |
5033 | ||
5034 | level->coreClock = ps->performance_levels[i].engine_clock; | |
5035 | level->memory_clock = ps->performance_levels[i].memory_clock; | |
5036 | ||
5037 | return 0; | |
5038 | } | |
5039 | ||
d355f149 RZ |
5040 | static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) |
5041 | { | |
5042 | int result; | |
5043 | ||
5044 | result = smu7_disable_dpm_tasks(hwmgr); | |
5045 | PP_ASSERT_WITH_CODE((0 == result), | |
5046 | "[disable_dpm_tasks] Failed to disable DPM!", | |
5047 | ); | |
5048 | ||
5049 | return result; | |
5050 | } | |
5051 | ||
a1c1a1de | 5052 | static const struct pp_hwmgr_func smu7_hwmgr_funcs = { |
599a7e9f | 5053 | .backend_init = &smu7_hwmgr_backend_init, |
a0aa7046 | 5054 | .backend_fini = &smu7_hwmgr_backend_fini, |
599a7e9f RZ |
5055 | .asic_setup = &smu7_setup_asic_task, |
5056 | .dynamic_state_management_enable = &smu7_enable_dpm_tasks, | |
5057 | .apply_state_adjust_rules = smu7_apply_state_adjust_rules, | |
5058 | .force_dpm_level = &smu7_force_dpm_level, | |
5059 | .power_state_set = smu7_set_power_state_tasks, | |
5060 | .get_power_state_size = smu7_get_power_state_size, | |
5061 | .get_mclk = smu7_dpm_get_mclk, | |
5062 | .get_sclk = smu7_dpm_get_sclk, | |
5063 | .patch_boot_state = smu7_dpm_patch_boot_state, | |
5064 | .get_pp_table_entry = smu7_get_pp_table_entry, | |
5065 | .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries, | |
599a7e9f RZ |
5066 | .powerdown_uvd = smu7_powerdown_uvd, |
5067 | .powergate_uvd = smu7_powergate_uvd, | |
5068 | .powergate_vce = smu7_powergate_vce, | |
5069 | .disable_clock_power_gating = smu7_disable_clock_power_gating, | |
5070 | .update_clock_gatings = smu7_update_clock_gatings, | |
5071 | .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment, | |
5072 | .display_config_changed = smu7_display_configuration_changed_task, | |
5073 | .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output, | |
5074 | .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output, | |
599a7e9f RZ |
5075 | .stop_thermal_controller = smu7_thermal_stop_thermal_controller, |
5076 | .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info, | |
5077 | .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent, | |
5078 | .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent, | |
5079 | .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default, | |
5080 | .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm, | |
5081 | .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm, | |
5082 | .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller, | |
4d200372 | 5083 | .register_irq_handlers = smu7_register_irq_handlers, |
599a7e9f RZ |
5084 | .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration, |
5085 | .check_states_equal = smu7_check_states_equal, | |
5086 | .set_fan_control_mode = smu7_set_fan_control_mode, | |
5087 | .get_fan_control_mode = smu7_get_fan_control_mode, | |
5088 | .force_clock_level = smu7_force_clock_level, | |
5089 | .print_clock_levels = smu7_print_clock_levels, | |
3eb6e479 | 5090 | .powergate_gfx = smu7_powergate_gfx, |
599a7e9f RZ |
5091 | .get_sclk_od = smu7_get_sclk_od, |
5092 | .set_sclk_od = smu7_set_sclk_od, | |
5093 | .get_mclk_od = smu7_get_mclk_od, | |
5094 | .set_mclk_od = smu7_set_mclk_od, | |
5095 | .get_clock_by_type = smu7_get_clock_by_type, | |
a6e36952 | 5096 | .read_sensor = smu7_read_sensor, |
f28a9b65 | 5097 | .dynamic_state_management_disable = smu7_disable_dpm_tasks, |
f9c993ce | 5098 | .avfs_control = smu7_avfs_control, |
1dfc41d4 | 5099 | .disable_smc_firmware_ctf = smu7_thermal_disable_alert, |
1ab47204 | 5100 | .start_thermal_controller = smu7_start_thermal_controller, |
26f52781 | 5101 | .notify_cac_buffer_info = smu7_notify_cac_buffer_info, |
ad8cec7d | 5102 | .get_max_high_clocks = smu7_get_max_high_clocks, |
4ba08257 | 5103 | .get_thermal_temperature_range = smu7_get_thermal_temperature_range, |
5e4d4fbe | 5104 | .odn_edit_dpm_table = smu7_odn_edit_dpm_table, |
6ab8555e | 5105 | .set_power_limit = smu7_set_power_limit, |
5d24af84 RZ |
5106 | .get_power_profile_mode = smu7_get_power_profile_mode, |
5107 | .set_power_profile_mode = smu7_set_power_profile_mode, | |
f688b614 | 5108 | .get_performance_level = smu7_get_performance_level, |
d355f149 | 5109 | .power_off_asic = smu7_power_off_asic, |
599a7e9f RZ |
5110 | }; |
5111 | ||
5112 | uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, | |
5113 | uint32_t clock_insr) | |
5114 | { | |
5115 | uint8_t i; | |
5116 | uint32_t temp; | |
5117 | uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); | |
5118 | ||
5119 | PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); | |
5120 | for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { | |
5121 | temp = clock >> i; | |
5122 | ||
5123 | if (temp >= min || i == 0) | |
5124 | break; | |
5125 | } | |
5126 | return i; | |
5127 | } | |
5128 | ||
a5b580e1 | 5129 | int smu7_init_function_pointers(struct pp_hwmgr *hwmgr) |
599a7e9f RZ |
5130 | { |
5131 | int ret = 0; | |
5132 | ||
5133 | hwmgr->hwmgr_func = &smu7_hwmgr_funcs; | |
5134 | if (hwmgr->pp_table_version == PP_TABLE_V0) | |
5135 | hwmgr->pptable_func = &pptable_funcs; | |
5136 | else if (hwmgr->pp_table_version == PP_TABLE_V1) | |
5137 | hwmgr->pptable_func = &pptable_v1_0_funcs; | |
5138 | ||
599a7e9f RZ |
5139 | return ret; |
5140 | } |