2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "hardwaremanager.h"
32 #include "ppatomfwctrl.h"
33 #include "atomfirmware.h"
34 #include "cgs_common.h"
35 #include "vega10_powertune.h"
37 #include "smu9_driver_if.h"
38 #include "vega10_inc.h"
39 #include "soc15_common.h"
40 #include "pppcielanes.h"
41 #include "vega10_hwmgr.h"
42 #include "vega10_smumgr.h"
43 #include "vega10_processpptables.h"
44 #include "vega10_pptable.h"
45 #include "vega10_thermal.h"
47 #include "amd_pcie_helpers.h"
48 #include "ppinterrupt.h"
49 #include "pp_overdriver.h"
50 #include "pp_thermal.h"
52 #include "smuio/smuio_9_0_offset.h"
53 #include "smuio/smuio_9_0_sh_mask.h"
55 #define HBM_MEMORY_CHANNEL_WIDTH 128
57 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
59 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
60 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
62 //DF_CS_AON0_DramBaseAddress0
63 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
64 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
65 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
66 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
67 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
68 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
69 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
70 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
71 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
72 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
74 static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
76 struct vega10_power_state *cast_phw_vega10_power_state(
77 struct pp_hw_power_state *hw_ps)
79 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
80 "Invalid Powerstate Type!",
83 return (struct vega10_power_state *)hw_ps;
86 const struct vega10_power_state *cast_const_phw_vega10_power_state(
87 const struct pp_hw_power_state *hw_ps)
89 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
90 "Invalid Powerstate Type!",
93 return (const struct vega10_power_state *)hw_ps;
96 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
98 struct vega10_hwmgr *data = hwmgr->backend;
100 data->registry_data.sclk_dpm_key_disabled =
101 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
102 data->registry_data.socclk_dpm_key_disabled =
103 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
104 data->registry_data.mclk_dpm_key_disabled =
105 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
106 data->registry_data.pcie_dpm_key_disabled =
107 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
109 data->registry_data.dcefclk_dpm_key_disabled =
110 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
112 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
113 data->registry_data.power_containment_support = 1;
114 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
115 data->registry_data.enable_tdc_limit_feature = 1;
118 data->registry_data.clock_stretcher_support =
119 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
121 data->registry_data.ulv_support =
122 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
124 data->registry_data.sclk_deep_sleep_support =
125 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
127 data->registry_data.disable_water_mark = 0;
129 data->registry_data.fan_control_support = 1;
130 data->registry_data.thermal_support = 1;
131 data->registry_data.fw_ctf_enabled = 1;
133 data->registry_data.avfs_support =
134 hwmgr->feature_mask & PP_AVFS_MASK ? true : false;
135 data->registry_data.led_dpm_enabled = 1;
137 data->registry_data.vr0hot_enabled = 1;
138 data->registry_data.vr1hot_enabled = 1;
139 data->registry_data.regulator_hot_gpio_support = 1;
141 data->registry_data.didt_support = 1;
142 if (data->registry_data.didt_support) {
143 data->registry_data.didt_mode = 6;
144 data->registry_data.sq_ramping_support = 1;
145 data->registry_data.db_ramping_support = 0;
146 data->registry_data.td_ramping_support = 0;
147 data->registry_data.tcp_ramping_support = 0;
148 data->registry_data.dbr_ramping_support = 0;
149 data->registry_data.edc_didt_support = 1;
150 data->registry_data.gc_didt_support = 0;
151 data->registry_data.psm_didt_support = 0;
154 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
155 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
156 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
157 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
158 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
159 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
160 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
161 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
162 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
163 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
164 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
165 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
168 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
169 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
170 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
171 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
174 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
176 struct vega10_hwmgr *data = hwmgr->backend;
177 struct phm_ppt_v2_information *table_info =
178 (struct phm_ppt_v2_information *)hwmgr->pptable;
179 struct amdgpu_device *adev = hwmgr->adev;
181 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182 PHM_PlatformCaps_SclkDeepSleep);
184 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185 PHM_PlatformCaps_DynamicPatchPowerState);
187 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
188 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
189 PHM_PlatformCaps_ControlVDDCI);
191 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
192 PHM_PlatformCaps_EnableSMU7ThermalManagement);
194 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
195 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196 PHM_PlatformCaps_UVDPowerGating);
198 if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
199 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
200 PHM_PlatformCaps_VCEPowerGating);
202 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
203 PHM_PlatformCaps_UnTabledHardwareInterface);
205 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206 PHM_PlatformCaps_FanSpeedInTableIsRPM);
208 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
209 PHM_PlatformCaps_ODFuzzyFanControlSupport);
211 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
212 PHM_PlatformCaps_DynamicPowerManagement);
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_SMC);
217 /* power tune caps */
218 /* assume disabled */
219 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
220 PHM_PlatformCaps_PowerContainment);
221 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_DiDtSupport);
223 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
224 PHM_PlatformCaps_SQRamping);
225 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
226 PHM_PlatformCaps_DBRamping);
227 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_TDRamping);
229 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
230 PHM_PlatformCaps_TCPRamping);
231 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
232 PHM_PlatformCaps_DBRRamping);
233 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
234 PHM_PlatformCaps_DiDtEDCEnable);
235 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
236 PHM_PlatformCaps_GCEDC);
237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_PSM);
240 if (data->registry_data.didt_support) {
241 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
242 if (data->registry_data.sq_ramping_support)
243 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
244 if (data->registry_data.db_ramping_support)
245 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
246 if (data->registry_data.td_ramping_support)
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
248 if (data->registry_data.tcp_ramping_support)
249 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
250 if (data->registry_data.dbr_ramping_support)
251 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
252 if (data->registry_data.edc_didt_support)
253 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
254 if (data->registry_data.gc_didt_support)
255 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
256 if (data->registry_data.psm_didt_support)
257 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
260 if (data->registry_data.power_containment_support)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_PowerContainment);
263 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
264 PHM_PlatformCaps_CAC);
266 if (table_info->tdp_table->usClockStretchAmount &&
267 data->registry_data.clock_stretcher_support)
268 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
269 PHM_PlatformCaps_ClockStretcher);
271 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
272 PHM_PlatformCaps_RegulatorHot);
273 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
274 PHM_PlatformCaps_AutomaticDCTransition);
276 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277 PHM_PlatformCaps_UVDDPM);
278 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
279 PHM_PlatformCaps_VCEDPM);
284 static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
286 struct vega10_hwmgr *data = hwmgr->backend;
287 struct phm_ppt_v2_information *table_info =
288 (struct phm_ppt_v2_information *)(hwmgr->pptable);
289 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
290 struct vega10_odn_vddc_lookup_table *od_lookup_table;
291 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
292 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
293 struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
294 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
298 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
300 data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
301 data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
304 od_lookup_table = &odn_table->vddc_lookup_table;
305 vddc_lookup_table = table_info->vddc_lookup_table;
307 for (i = 0; i < vddc_lookup_table->count; i++)
308 od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd;
310 od_lookup_table->count = vddc_lookup_table->count;
312 dep_table[0] = table_info->vdd_dep_on_sclk;
313 dep_table[1] = table_info->vdd_dep_on_mclk;
314 dep_table[2] = table_info->vdd_dep_on_socclk;
315 od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk;
316 od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk;
317 od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk;
319 for (i = 0; i < 3; i++)
320 smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]);
322 if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000)
323 odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc;
324 if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000)
325 odn_table->min_vddc = dep_table[0]->entries[0].vddc;
327 i = od_table[2]->count - 1;
328 od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ?
329 hwmgr->platform_descriptor.overdriveLimit.memoryClock :
330 od_table[2]->entries[i].clk;
331 od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ?
332 odn_table->max_vddc :
333 od_table[2]->entries[i].vddc;
338 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
340 struct vega10_hwmgr *data = hwmgr->backend;
342 uint32_t sub_vendor_id, hw_revision;
343 struct amdgpu_device *adev = hwmgr->adev;
345 vega10_initialize_power_tune_defaults(hwmgr);
347 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
348 data->smu_features[i].smu_feature_id = 0xffff;
349 data->smu_features[i].smu_feature_bitmap = 1 << i;
350 data->smu_features[i].enabled = false;
351 data->smu_features[i].supported = false;
354 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
355 FEATURE_DPM_PREFETCHER_BIT;
356 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
357 FEATURE_DPM_GFXCLK_BIT;
358 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
359 FEATURE_DPM_UCLK_BIT;
360 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
361 FEATURE_DPM_SOCCLK_BIT;
362 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
364 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
366 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
367 FEATURE_DPM_MP0CLK_BIT;
368 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
369 FEATURE_DPM_LINK_BIT;
370 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
371 FEATURE_DPM_DCEFCLK_BIT;
372 data->smu_features[GNLD_ULV].smu_feature_id =
374 data->smu_features[GNLD_AVFS].smu_feature_id =
376 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
377 FEATURE_DS_GFXCLK_BIT;
378 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
379 FEATURE_DS_SOCCLK_BIT;
380 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
382 data->smu_features[GNLD_PPT].smu_feature_id =
384 data->smu_features[GNLD_TDC].smu_feature_id =
386 data->smu_features[GNLD_THERMAL].smu_feature_id =
388 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
389 FEATURE_GFX_PER_CU_CG_BIT;
390 data->smu_features[GNLD_RM].smu_feature_id =
392 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
393 FEATURE_DS_DCEFCLK_BIT;
394 data->smu_features[GNLD_ACDC].smu_feature_id =
396 data->smu_features[GNLD_VR0HOT].smu_feature_id =
398 data->smu_features[GNLD_VR1HOT].smu_feature_id =
400 data->smu_features[GNLD_FW_CTF].smu_feature_id =
402 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
403 FEATURE_LED_DISPLAY_BIT;
404 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
405 FEATURE_FAN_CONTROL_BIT;
406 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
407 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
408 data->smu_features[GNLD_PCC_LIMIT].smu_feature_id = FEATURE_PCC_LIMIT_CONTROL_BIT;
410 if (!data->registry_data.prefetcher_dpm_key_disabled)
411 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
413 if (!data->registry_data.sclk_dpm_key_disabled)
414 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
416 if (!data->registry_data.mclk_dpm_key_disabled)
417 data->smu_features[GNLD_DPM_UCLK].supported = true;
419 if (!data->registry_data.socclk_dpm_key_disabled)
420 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
422 if (PP_CAP(PHM_PlatformCaps_UVDDPM))
423 data->smu_features[GNLD_DPM_UVD].supported = true;
425 if (PP_CAP(PHM_PlatformCaps_VCEDPM))
426 data->smu_features[GNLD_DPM_VCE].supported = true;
428 if (!data->registry_data.pcie_dpm_key_disabled)
429 data->smu_features[GNLD_DPM_LINK].supported = true;
431 if (!data->registry_data.dcefclk_dpm_key_disabled)
432 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
434 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
435 data->registry_data.sclk_deep_sleep_support) {
436 data->smu_features[GNLD_DS_GFXCLK].supported = true;
437 data->smu_features[GNLD_DS_SOCCLK].supported = true;
438 data->smu_features[GNLD_DS_LCLK].supported = true;
439 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
442 if (data->registry_data.enable_pkg_pwr_tracking_feature)
443 data->smu_features[GNLD_PPT].supported = true;
445 if (data->registry_data.enable_tdc_limit_feature)
446 data->smu_features[GNLD_TDC].supported = true;
448 if (data->registry_data.thermal_support)
449 data->smu_features[GNLD_THERMAL].supported = true;
451 if (data->registry_data.fan_control_support)
452 data->smu_features[GNLD_FAN_CONTROL].supported = true;
454 if (data->registry_data.fw_ctf_enabled)
455 data->smu_features[GNLD_FW_CTF].supported = true;
457 if (data->registry_data.avfs_support)
458 data->smu_features[GNLD_AVFS].supported = true;
460 if (data->registry_data.led_dpm_enabled)
461 data->smu_features[GNLD_LED_DISPLAY].supported = true;
463 if (data->registry_data.vr1hot_enabled)
464 data->smu_features[GNLD_VR1HOT].supported = true;
466 if (data->registry_data.vr0hot_enabled)
467 data->smu_features[GNLD_VR0HOT].supported = true;
469 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
470 hwmgr->smu_version = smum_get_argument(hwmgr);
471 /* ACG firmware has major version 5 */
472 if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
473 data->smu_features[GNLD_ACG].supported = true;
474 if (data->registry_data.didt_support)
475 data->smu_features[GNLD_DIDT].supported = true;
477 hw_revision = adev->pdev->revision;
478 sub_vendor_id = adev->pdev->subsystem_vendor;
480 if ((hwmgr->chip_id == 0x6862 ||
481 hwmgr->chip_id == 0x6861 ||
482 hwmgr->chip_id == 0x6868) &&
483 (hw_revision == 0) &&
484 (sub_vendor_id != 0x1002))
485 data->smu_features[GNLD_PCC_LIMIT].supported = true;
488 #ifdef PPLIB_VEGA10_EVV_SUPPORT
489 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
490 phm_ppt_v1_voltage_lookup_table *lookup_table,
491 uint16_t virtual_voltage_id, int32_t *socclk)
495 struct phm_ppt_v2_information *table_info =
496 (struct phm_ppt_v2_information *)(hwmgr->pptable);
498 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
499 "Lookup table is empty",
502 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
503 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
504 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
505 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
509 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
510 "Can't find requested voltage id in vdd_dep_on_socclk table!",
513 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
518 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
520 * Get Leakage VDDC based on leakage ID.
522 * @param hwmgr the address of the powerplay hardware manager.
525 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
527 struct vega10_hwmgr *data = hwmgr->backend;
532 struct phm_ppt_v2_information *table_info =
533 (struct phm_ppt_v2_information *)hwmgr->pptable;
534 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
535 table_info->vdd_dep_on_socclk;
538 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
539 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
541 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
542 table_info->vddc_lookup_table, vv_id, &sclk)) {
543 if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
544 for (j = 1; j < socclk_table->count; j++) {
545 if (socclk_table->entries[j].clk == sclk &&
546 socclk_table->entries[j].cks_enable == 0) {
553 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
554 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
555 "Error retrieving EVV voltage value!",
559 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
560 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
561 "Invalid VDDC value", result = -EINVAL;);
563 /* the voltage should not be zero nor equal to leakage ID */
564 if (vddc != 0 && vddc != vv_id) {
565 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
566 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
567 data->vddc_leakage.count++;
576 * Change virtual leakage voltage to actual value.
578 * @param hwmgr the address of the powerplay hardware manager.
579 * @param pointer to changing voltage
580 * @param pointer to leakage table
582 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
583 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
587 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
588 for (index = 0; index < leakage_table->count; index++) {
589 /* if this voltage matches a leakage voltage ID */
590 /* patch with actual leakage voltage */
591 if (leakage_table->leakage_id[index] == *voltage) {
592 *voltage = leakage_table->actual_voltage[index];
597 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
598 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
602 * Patch voltage lookup table by EVV leakages.
604 * @param hwmgr the address of the powerplay hardware manager.
605 * @param pointer to voltage lookup table
606 * @param pointer to leakage table
609 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
610 phm_ppt_v1_voltage_lookup_table *lookup_table,
611 struct vega10_leakage_voltage *leakage_table)
615 for (i = 0; i < lookup_table->count; i++)
616 vega10_patch_with_vdd_leakage(hwmgr,
617 &lookup_table->entries[i].us_vdd, leakage_table);
622 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
623 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
626 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
632 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
633 struct pp_hwmgr *hwmgr)
635 uint8_t entry_id, voltage_id;
637 struct phm_ppt_v2_information *table_info =
638 (struct phm_ppt_v2_information *)(hwmgr->pptable);
639 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
640 table_info->mm_dep_table;
641 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
642 table_info->vdd_dep_on_mclk;
644 for (i = 0; i < 6; i++) {
645 struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
647 case 0: vdt = table_info->vdd_dep_on_socclk; break;
648 case 1: vdt = table_info->vdd_dep_on_sclk; break;
649 case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
650 case 3: vdt = table_info->vdd_dep_on_pixclk; break;
651 case 4: vdt = table_info->vdd_dep_on_dispclk; break;
652 case 5: vdt = table_info->vdd_dep_on_phyclk; break;
655 for (entry_id = 0; entry_id < vdt->count; entry_id++) {
656 voltage_id = vdt->entries[entry_id].vddInd;
657 vdt->entries[entry_id].vddc =
658 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
662 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
663 voltage_id = mm_table->entries[entry_id].vddcInd;
664 mm_table->entries[entry_id].vddc =
665 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
668 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
669 voltage_id = mclk_table->entries[entry_id].vddInd;
670 mclk_table->entries[entry_id].vddc =
671 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
672 voltage_id = mclk_table->entries[entry_id].vddciInd;
673 mclk_table->entries[entry_id].vddci =
674 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
675 voltage_id = mclk_table->entries[entry_id].mvddInd;
676 mclk_table->entries[entry_id].mvdd =
677 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
685 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
686 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
688 uint32_t table_size, i, j;
689 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
691 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
692 "Lookup table is empty", return -EINVAL);
694 table_size = lookup_table->count;
696 /* Sorting voltages */
697 for (i = 0; i < table_size - 1; i++) {
698 for (j = i + 1; j > 0; j--) {
699 if (lookup_table->entries[j].us_vdd <
700 lookup_table->entries[j - 1].us_vdd) {
701 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
702 lookup_table->entries[j - 1] = lookup_table->entries[j];
703 lookup_table->entries[j] = tmp_voltage_lookup_record;
711 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
715 struct phm_ppt_v2_information *table_info =
716 (struct phm_ppt_v2_information *)(hwmgr->pptable);
717 #ifdef PPLIB_VEGA10_EVV_SUPPORT
718 struct vega10_hwmgr *data = hwmgr->backend;
720 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
721 table_info->vddc_lookup_table, &(data->vddc_leakage));
725 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
726 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
731 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
735 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
742 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
744 struct phm_ppt_v2_information *table_info =
745 (struct phm_ppt_v2_information *)(hwmgr->pptable);
746 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
747 table_info->vdd_dep_on_socclk;
748 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
749 table_info->vdd_dep_on_mclk;
751 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
752 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL);
753 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
754 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL);
756 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
757 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL);
758 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
759 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL);
761 table_info->max_clock_voltage_on_ac.sclk =
762 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
763 table_info->max_clock_voltage_on_ac.mclk =
764 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
765 table_info->max_clock_voltage_on_ac.vddc =
766 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
767 table_info->max_clock_voltage_on_ac.vddci =
768 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
770 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
771 table_info->max_clock_voltage_on_ac.sclk;
772 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
773 table_info->max_clock_voltage_on_ac.mclk;
774 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
775 table_info->max_clock_voltage_on_ac.vddc;
776 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
777 table_info->max_clock_voltage_on_ac.vddci;
782 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
784 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
785 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
787 kfree(hwmgr->backend);
788 hwmgr->backend = NULL;
793 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
796 struct vega10_hwmgr *data;
797 uint32_t config_telemetry = 0;
798 struct pp_atomfwctrl_voltage_table vol_table;
799 struct amdgpu_device *adev = hwmgr->adev;
801 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
805 hwmgr->backend = data;
807 hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
808 hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
809 hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
811 vega10_set_default_registry_data(hwmgr);
812 data->disable_dpm_mask = 0xff;
814 /* need to set voltage control types before EVV patching */
815 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
816 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
817 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
820 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
821 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
822 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
823 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
825 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
826 (vol_table.telemetry_offset & 0xff);
827 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
830 kfree(hwmgr->backend);
831 hwmgr->backend = NULL;
832 PP_ASSERT_WITH_CODE(false,
833 "VDDCR_SOC is not SVID2!",
838 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
839 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
840 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
841 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
844 ((vol_table.telemetry_slope << 24) & 0xff000000) |
845 ((vol_table.telemetry_offset << 16) & 0xff0000);
846 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
851 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
852 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
853 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
854 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
857 data->config_telemetry = config_telemetry;
859 vega10_set_features_platform_caps(hwmgr);
861 vega10_init_dpm_defaults(hwmgr);
863 #ifdef PPLIB_VEGA10_EVV_SUPPORT
864 /* Get leakage voltage based on leakage ID. */
865 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
866 "Get EVV Voltage Failed. Abort Driver loading!",
870 /* Patch our voltage dependency table with actual leakage voltage
871 * We need to perform leakage translation before it's used by other functions
873 vega10_complete_dependency_tables(hwmgr);
875 /* Parse pptable data read from VBIOS */
876 vega10_set_private_data_based_on_pptable(hwmgr);
878 data->is_tlu_enabled = false;
880 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
881 VEGA10_MAX_HARDWARE_POWERLEVELS;
882 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
883 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
885 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
886 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
887 hwmgr->platform_descriptor.clockStep.engineClock = 500;
888 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
890 data->total_active_cus = adev->gfx.cu_info.number;
891 /* Setup default Overdrive Fan control settings */
892 data->odn_fan_table.target_fan_speed =
893 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
894 data->odn_fan_table.target_temperature =
895 hwmgr->thermal_controller.
896 advanceFanControlParameters.ucTargetTemperature;
897 data->odn_fan_table.min_performance_clock =
898 hwmgr->thermal_controller.advanceFanControlParameters.
899 ulMinFanSCLKAcousticLimit;
900 data->odn_fan_table.min_fan_limit =
901 hwmgr->thermal_controller.
902 advanceFanControlParameters.usFanPWMMinLimit *
903 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
905 data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) &
906 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
907 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
908 PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
909 "Mem Channel Index Exceeded maximum!",
915 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
917 struct vega10_hwmgr *data = hwmgr->backend;
919 data->low_sclk_interrupt_threshold = 0;
924 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
926 struct vega10_hwmgr *data = hwmgr->backend;
927 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
929 struct pp_atomfwctrl_voltage_table table;
935 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
936 VOLTAGE_OBJ_GPIO_LUT, &table);
939 tmp = table.mask_low;
940 for (i = 0, j = 0; i < 32; i++) {
942 mask |= (uint32_t)(i << (8 * j));
950 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
951 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
952 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
956 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
958 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
959 "Failed to init sclk threshold!",
962 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
963 "Failed to set up led dpm config!",
966 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
972 * Remove repeated voltage values and create table with unique values.
974 * @param hwmgr the address of the powerplay hardware manager.
975 * @param vol_table the pointer to changing voltage table
976 * @return 0 in success
979 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
980 struct pp_atomfwctrl_voltage_table *vol_table)
985 struct pp_atomfwctrl_voltage_table *table;
987 PP_ASSERT_WITH_CODE(vol_table,
988 "Voltage Table empty.", return -EINVAL);
989 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
995 table->mask_low = vol_table->mask_low;
996 table->phase_delay = vol_table->phase_delay;
998 for (i = 0; i < vol_table->count; i++) {
999 vvalue = vol_table->entries[i].value;
1002 for (j = 0; j < table->count; j++) {
1003 if (vvalue == table->entries[j].value) {
1010 table->entries[table->count].value = vvalue;
1011 table->entries[table->count].smio_low =
1012 vol_table->entries[i].smio_low;
1017 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1023 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1024 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1025 struct pp_atomfwctrl_voltage_table *vol_table)
1029 PP_ASSERT_WITH_CODE(dep_table->count,
1030 "Voltage Dependency Table empty.",
1033 vol_table->mask_low = 0;
1034 vol_table->phase_delay = 0;
1035 vol_table->count = dep_table->count;
1037 for (i = 0; i < vol_table->count; i++) {
1038 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1039 vol_table->entries[i].smio_low = 0;
1042 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1044 "Failed to trim MVDD Table!",
1050 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1051 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1052 struct pp_atomfwctrl_voltage_table *vol_table)
1056 PP_ASSERT_WITH_CODE(dep_table->count,
1057 "Voltage Dependency Table empty.",
1060 vol_table->mask_low = 0;
1061 vol_table->phase_delay = 0;
1062 vol_table->count = dep_table->count;
1064 for (i = 0; i < dep_table->count; i++) {
1065 vol_table->entries[i].value = dep_table->entries[i].vddci;
1066 vol_table->entries[i].smio_low = 0;
1069 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1070 "Failed to trim VDDCI table.",
1076 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1077 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1078 struct pp_atomfwctrl_voltage_table *vol_table)
1082 PP_ASSERT_WITH_CODE(dep_table->count,
1083 "Voltage Dependency Table empty.",
1086 vol_table->mask_low = 0;
1087 vol_table->phase_delay = 0;
1088 vol_table->count = dep_table->count;
1090 for (i = 0; i < vol_table->count; i++) {
1091 vol_table->entries[i].value = dep_table->entries[i].vddc;
1092 vol_table->entries[i].smio_low = 0;
1098 /* ---- Voltage Tables ----
1099 * If the voltage table would be bigger than
1100 * what will fit into the state table on
1101 * the SMC keep only the higher entries.
1103 static void vega10_trim_voltage_table_to_fit_state_table(
1104 struct pp_hwmgr *hwmgr,
1105 uint32_t max_vol_steps,
1106 struct pp_atomfwctrl_voltage_table *vol_table)
1108 unsigned int i, diff;
1110 if (vol_table->count <= max_vol_steps)
1113 diff = vol_table->count - max_vol_steps;
1115 for (i = 0; i < max_vol_steps; i++)
1116 vol_table->entries[i] = vol_table->entries[i + diff];
1118 vol_table->count = max_vol_steps;
1122 * Create Voltage Tables.
1124 * @param hwmgr the address of the powerplay hardware manager.
1127 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1129 struct vega10_hwmgr *data = hwmgr->backend;
1130 struct phm_ppt_v2_information *table_info =
1131 (struct phm_ppt_v2_information *)hwmgr->pptable;
1134 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1135 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1136 result = vega10_get_mvdd_voltage_table(hwmgr,
1137 table_info->vdd_dep_on_mclk,
1138 &(data->mvdd_voltage_table));
1139 PP_ASSERT_WITH_CODE(!result,
1140 "Failed to retrieve MVDDC table!",
1144 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1145 result = vega10_get_vddci_voltage_table(hwmgr,
1146 table_info->vdd_dep_on_mclk,
1147 &(data->vddci_voltage_table));
1148 PP_ASSERT_WITH_CODE(!result,
1149 "Failed to retrieve VDDCI_MEM table!",
1153 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1154 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1155 result = vega10_get_vdd_voltage_table(hwmgr,
1156 table_info->vdd_dep_on_sclk,
1157 &(data->vddc_voltage_table));
1158 PP_ASSERT_WITH_CODE(!result,
1159 "Failed to retrieve VDDCR_SOC table!",
1163 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1164 "Too many voltage values for VDDC. Trimming to fit state table.",
1165 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1166 16, &(data->vddc_voltage_table)));
1168 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1169 "Too many voltage values for VDDCI. Trimming to fit state table.",
1170 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1171 16, &(data->vddci_voltage_table)));
1173 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1174 "Too many voltage values for MVDD. Trimming to fit state table.",
1175 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1176 16, &(data->mvdd_voltage_table)));
1183 * @fn vega10_init_dpm_state
1184 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1186 * @param dpm_state - the address of the DPM Table to initiailize.
1189 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1191 dpm_state->soft_min_level = 0xff;
1192 dpm_state->soft_max_level = 0xff;
1193 dpm_state->hard_min_level = 0xff;
1194 dpm_state->hard_max_level = 0xff;
1197 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1198 struct vega10_single_dpm_table *dpm_table,
1199 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1203 dpm_table->count = 0;
1205 for (i = 0; i < dep_table->count; i++) {
1206 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1207 dep_table->entries[i].clk) {
1208 dpm_table->dpm_levels[dpm_table->count].value =
1209 dep_table->entries[i].clk;
1210 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1215 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1217 struct vega10_hwmgr *data = hwmgr->backend;
1218 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1219 struct phm_ppt_v2_information *table_info =
1220 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1221 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1222 table_info->pcie_table;
1225 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1226 "Incorrect number of PCIE States from VBIOS!",
1229 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1230 if (data->registry_data.pcieSpeedOverride)
1231 pcie_table->pcie_gen[i] =
1232 data->registry_data.pcieSpeedOverride;
1234 pcie_table->pcie_gen[i] =
1235 bios_pcie_table->entries[i].gen_speed;
1237 if (data->registry_data.pcieLaneOverride)
1238 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1239 data->registry_data.pcieLaneOverride);
1241 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1242 bios_pcie_table->entries[i].lane_width);
1243 if (data->registry_data.pcieClockOverride)
1244 pcie_table->lclk[i] =
1245 data->registry_data.pcieClockOverride;
1247 pcie_table->lclk[i] =
1248 bios_pcie_table->entries[i].pcie_sclk;
1251 pcie_table->count = NUM_LINK_LEVELS;
1257 * This function is to initialize all DPM state tables
1258 * for SMU based on the dependency table.
1259 * Dynamic state patching function will then trim these
1260 * state tables to the allowed range based
1261 * on the power policy or external client requests,
1262 * such as UVD request, etc.
1264 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1266 struct vega10_hwmgr *data = hwmgr->backend;
1267 struct phm_ppt_v2_information *table_info =
1268 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1269 struct vega10_single_dpm_table *dpm_table;
1272 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1273 table_info->vdd_dep_on_socclk;
1274 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1275 table_info->vdd_dep_on_sclk;
1276 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1277 table_info->vdd_dep_on_mclk;
1278 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1279 table_info->mm_dep_table;
1280 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1281 table_info->vdd_dep_on_dcefclk;
1282 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1283 table_info->vdd_dep_on_pixclk;
1284 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1285 table_info->vdd_dep_on_dispclk;
1286 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1287 table_info->vdd_dep_on_phyclk;
1289 PP_ASSERT_WITH_CODE(dep_soc_table,
1290 "SOCCLK dependency table is missing. This table is mandatory",
1292 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1293 "SOCCLK dependency table is empty. This table is mandatory",
1296 PP_ASSERT_WITH_CODE(dep_gfx_table,
1297 "GFXCLK dependency table is missing. This table is mandatory",
1299 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1300 "GFXCLK dependency table is empty. This table is mandatory",
1303 PP_ASSERT_WITH_CODE(dep_mclk_table,
1304 "MCLK dependency table is missing. This table is mandatory",
1306 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1307 "MCLK dependency table has to have is missing. This table is mandatory",
1310 /* Initialize Sclk DPM table based on allow Sclk values */
1311 dpm_table = &(data->dpm_table.soc_table);
1312 vega10_setup_default_single_dpm_table(hwmgr,
1316 vega10_init_dpm_state(&(dpm_table->dpm_state));
1318 dpm_table = &(data->dpm_table.gfx_table);
1319 vega10_setup_default_single_dpm_table(hwmgr,
1322 if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
1323 hwmgr->platform_descriptor.overdriveLimit.engineClock =
1324 dpm_table->dpm_levels[dpm_table->count-1].value;
1325 vega10_init_dpm_state(&(dpm_table->dpm_state));
1327 /* Initialize Mclk DPM table based on allow Mclk values */
1328 data->dpm_table.mem_table.count = 0;
1329 dpm_table = &(data->dpm_table.mem_table);
1330 vega10_setup_default_single_dpm_table(hwmgr,
1333 if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
1334 hwmgr->platform_descriptor.overdriveLimit.memoryClock =
1335 dpm_table->dpm_levels[dpm_table->count-1].value;
1336 vega10_init_dpm_state(&(dpm_table->dpm_state));
1338 data->dpm_table.eclk_table.count = 0;
1339 dpm_table = &(data->dpm_table.eclk_table);
1340 for (i = 0; i < dep_mm_table->count; i++) {
1341 if (i == 0 || dpm_table->dpm_levels
1342 [dpm_table->count - 1].value <=
1343 dep_mm_table->entries[i].eclk) {
1344 dpm_table->dpm_levels[dpm_table->count].value =
1345 dep_mm_table->entries[i].eclk;
1346 dpm_table->dpm_levels[dpm_table->count].enabled =
1347 (i == 0) ? true : false;
1351 vega10_init_dpm_state(&(dpm_table->dpm_state));
1353 data->dpm_table.vclk_table.count = 0;
1354 data->dpm_table.dclk_table.count = 0;
1355 dpm_table = &(data->dpm_table.vclk_table);
1356 for (i = 0; i < dep_mm_table->count; i++) {
1357 if (i == 0 || dpm_table->dpm_levels
1358 [dpm_table->count - 1].value <=
1359 dep_mm_table->entries[i].vclk) {
1360 dpm_table->dpm_levels[dpm_table->count].value =
1361 dep_mm_table->entries[i].vclk;
1362 dpm_table->dpm_levels[dpm_table->count].enabled =
1363 (i == 0) ? true : false;
1367 vega10_init_dpm_state(&(dpm_table->dpm_state));
1369 dpm_table = &(data->dpm_table.dclk_table);
1370 for (i = 0; i < dep_mm_table->count; i++) {
1371 if (i == 0 || dpm_table->dpm_levels
1372 [dpm_table->count - 1].value <=
1373 dep_mm_table->entries[i].dclk) {
1374 dpm_table->dpm_levels[dpm_table->count].value =
1375 dep_mm_table->entries[i].dclk;
1376 dpm_table->dpm_levels[dpm_table->count].enabled =
1377 (i == 0) ? true : false;
1381 vega10_init_dpm_state(&(dpm_table->dpm_state));
1383 /* Assume there is no headless Vega10 for now */
1384 dpm_table = &(data->dpm_table.dcef_table);
1385 vega10_setup_default_single_dpm_table(hwmgr,
1389 vega10_init_dpm_state(&(dpm_table->dpm_state));
1391 dpm_table = &(data->dpm_table.pixel_table);
1392 vega10_setup_default_single_dpm_table(hwmgr,
1396 vega10_init_dpm_state(&(dpm_table->dpm_state));
1398 dpm_table = &(data->dpm_table.display_table);
1399 vega10_setup_default_single_dpm_table(hwmgr,
1403 vega10_init_dpm_state(&(dpm_table->dpm_state));
1405 dpm_table = &(data->dpm_table.phy_table);
1406 vega10_setup_default_single_dpm_table(hwmgr,
1410 vega10_init_dpm_state(&(dpm_table->dpm_state));
1412 vega10_setup_default_pcie_table(hwmgr);
1414 /* save a copy of the default DPM table */
1415 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1416 sizeof(struct vega10_dpm_table));
1422 * @fn vega10_populate_ulv_state
1423 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1425 * @param hwmgr - the address of the hardware manager.
1428 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1430 struct vega10_hwmgr *data = hwmgr->backend;
1431 struct phm_ppt_v2_information *table_info =
1432 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1434 data->smc_state_table.pp_table.UlvOffsetVid =
1435 (uint8_t)table_info->us_ulv_voltage_offset;
1437 data->smc_state_table.pp_table.UlvSmnclkDid =
1438 (uint8_t)(table_info->us_ulv_smnclk_did);
1439 data->smc_state_table.pp_table.UlvMp1clkDid =
1440 (uint8_t)(table_info->us_ulv_mp1clk_did);
1441 data->smc_state_table.pp_table.UlvGfxclkBypass =
1442 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1443 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1444 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1445 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1446 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1451 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1452 uint32_t lclock, uint8_t *curr_lclk_did)
1454 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1456 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1458 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1460 "Failed to get LCLK clock settings from VBIOS!",
1463 *curr_lclk_did = dividers.ulDid;
1468 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1471 struct vega10_hwmgr *data = hwmgr->backend;
1472 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1473 struct vega10_pcie_table *pcie_table =
1474 &(data->dpm_table.pcie_table);
1477 for (i = 0; i < pcie_table->count; i++) {
1478 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1479 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1481 result = vega10_populate_single_lclk_level(hwmgr,
1482 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1484 pr_info("Populate LClock Level %d Failed!\n", i);
1490 while (i < NUM_LINK_LEVELS) {
1491 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1492 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1494 result = vega10_populate_single_lclk_level(hwmgr,
1495 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1497 pr_info("Populate LClock Level %d Failed!\n", i);
1507 * Populates single SMC GFXSCLK structure using the provided engine clock
1509 * @param hwmgr the address of the hardware manager
1510 * @param gfx_clock the GFX clock to use to populate the structure.
1511 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1514 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1515 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1518 struct phm_ppt_v2_information *table_info =
1519 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1520 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk;
1521 struct vega10_hwmgr *data = hwmgr->backend;
1522 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1523 uint32_t gfx_max_clock =
1524 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1527 if (hwmgr->od_enabled)
1528 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1529 &(data->odn_dpm_table.vdd_dep_on_sclk);
1531 dep_on_sclk = table_info->vdd_dep_on_sclk;
1533 PP_ASSERT_WITH_CODE(dep_on_sclk,
1534 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1537 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1538 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1540 for (i = 0; i < dep_on_sclk->count; i++) {
1541 if (dep_on_sclk->entries[i].clk == gfx_clock)
1544 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1545 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1549 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1550 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1551 gfx_clock, ÷rs),
1552 "Failed to get GFX Clock settings from VBIOS!",
1555 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1556 current_gfxclk_level->FbMult =
1557 cpu_to_le32(dividers.ulPll_fb_mult);
1558 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1559 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1560 current_gfxclk_level->SsFbMult =
1561 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1562 current_gfxclk_level->SsSlewFrac =
1563 cpu_to_le16(dividers.usPll_ss_slew_frac);
1564 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1566 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1572 * @brief Populates single SMC SOCCLK structure using the provided clock.
1574 * @param hwmgr - the address of the hardware manager.
1575 * @param soc_clock - the SOC clock to use to populate the structure.
1576 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1577 * @return 0 on success..
1579 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1580 uint32_t soc_clock, uint8_t *current_soc_did,
1581 uint8_t *current_vol_index)
1583 struct vega10_hwmgr *data = hwmgr->backend;
1584 struct phm_ppt_v2_information *table_info =
1585 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1586 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc;
1587 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1590 if (hwmgr->od_enabled) {
1591 dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1592 &data->odn_dpm_table.vdd_dep_on_socclk;
1593 for (i = 0; i < dep_on_soc->count; i++) {
1594 if (dep_on_soc->entries[i].clk >= soc_clock)
1598 dep_on_soc = table_info->vdd_dep_on_socclk;
1599 for (i = 0; i < dep_on_soc->count; i++) {
1600 if (dep_on_soc->entries[i].clk == soc_clock)
1605 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1606 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1609 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1610 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1611 soc_clock, ÷rs),
1612 "Failed to get SOC Clock settings from VBIOS!",
1615 *current_soc_did = (uint8_t)dividers.ulDid;
1616 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1621 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1623 * @param hwmgr the address of the hardware manager
1625 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1627 struct vega10_hwmgr *data = hwmgr->backend;
1628 struct phm_ppt_v2_information *table_info =
1629 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1630 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1631 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1635 for (i = 0; i < dpm_table->count; i++) {
1636 result = vega10_populate_single_gfx_level(hwmgr,
1637 dpm_table->dpm_levels[i].value,
1638 &(pp_table->GfxclkLevel[i]),
1639 &(pp_table->AcgFreqTable[i]));
1645 while (i < NUM_GFXCLK_DPM_LEVELS) {
1646 result = vega10_populate_single_gfx_level(hwmgr,
1647 dpm_table->dpm_levels[j].value,
1648 &(pp_table->GfxclkLevel[i]),
1649 &(pp_table->AcgFreqTable[i]));
1655 pp_table->GfxclkSlewRate =
1656 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1658 dpm_table = &(data->dpm_table.soc_table);
1659 for (i = 0; i < dpm_table->count; i++) {
1660 result = vega10_populate_single_soc_level(hwmgr,
1661 dpm_table->dpm_levels[i].value,
1662 &(pp_table->SocclkDid[i]),
1663 &(pp_table->SocDpmVoltageIndex[i]));
1669 while (i < NUM_SOCCLK_DPM_LEVELS) {
1670 result = vega10_populate_single_soc_level(hwmgr,
1671 dpm_table->dpm_levels[j].value,
1672 &(pp_table->SocclkDid[i]),
1673 &(pp_table->SocDpmVoltageIndex[i]));
1682 static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr)
1684 struct vega10_hwmgr *data = hwmgr->backend;
1685 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1686 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
1687 struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
1689 uint8_t soc_vid = 0;
1690 uint32_t i, max_vddc_level;
1692 if (hwmgr->od_enabled)
1693 vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table;
1695 vddc_lookup_table = table_info->vddc_lookup_table;
1697 max_vddc_level = vddc_lookup_table->count;
1698 for (i = 0; i < max_vddc_level; i++) {
1699 soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd);
1700 pp_table->SocVid[i] = soc_vid;
1702 while (i < MAX_REGULAR_DPM_NUMBER) {
1703 pp_table->SocVid[i] = soc_vid;
1709 * @brief Populates single SMC GFXCLK structure using the provided clock.
1711 * @param hwmgr - the address of the hardware manager.
1712 * @param mem_clock - the memory clock to use to populate the structure.
1713 * @return 0 on success..
1715 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1716 uint32_t mem_clock, uint8_t *current_mem_vid,
1717 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1719 struct vega10_hwmgr *data = hwmgr->backend;
1720 struct phm_ppt_v2_information *table_info =
1721 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1722 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk;
1723 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1724 uint32_t mem_max_clock =
1725 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1728 if (hwmgr->od_enabled)
1729 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1730 &data->odn_dpm_table.vdd_dep_on_mclk;
1732 dep_on_mclk = table_info->vdd_dep_on_mclk;
1734 PP_ASSERT_WITH_CODE(dep_on_mclk,
1735 "Invalid SOC_VDD-UCLK Dependency Table!",
1738 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
1739 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1741 for (i = 0; i < dep_on_mclk->count; i++) {
1742 if (dep_on_mclk->entries[i].clk == mem_clock)
1745 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1746 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1750 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1751 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs),
1752 "Failed to get UCLK settings from VBIOS!",
1756 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1757 *current_mem_soc_vind =
1758 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1759 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1760 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1762 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1763 "Invalid Divider ID!",
1770 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1772 * @param pHwMgr - the address of the hardware manager.
1773 * @return PP_Result_OK on success.
1775 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1777 struct vega10_hwmgr *data = hwmgr->backend;
1778 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1779 struct vega10_single_dpm_table *dpm_table =
1780 &(data->dpm_table.mem_table);
1784 for (i = 0; i < dpm_table->count; i++) {
1785 result = vega10_populate_single_memory_level(hwmgr,
1786 dpm_table->dpm_levels[i].value,
1787 &(pp_table->MemVid[i]),
1788 &(pp_table->UclkLevel[i]),
1789 &(pp_table->MemSocVoltageIndex[i]));
1795 while (i < NUM_UCLK_DPM_LEVELS) {
1796 result = vega10_populate_single_memory_level(hwmgr,
1797 dpm_table->dpm_levels[j].value,
1798 &(pp_table->MemVid[i]),
1799 &(pp_table->UclkLevel[i]),
1800 &(pp_table->MemSocVoltageIndex[i]));
1806 pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
1807 pp_table->MemoryChannelWidth =
1808 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
1809 channel_number[data->mem_channels]);
1811 pp_table->LowestUclkReservedForUlv =
1812 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1817 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1818 DSPCLK_e disp_clock)
1820 struct vega10_hwmgr *data = hwmgr->backend;
1821 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1822 struct phm_ppt_v2_information *table_info =
1823 (struct phm_ppt_v2_information *)
1825 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1827 uint16_t clk = 0, vddc = 0;
1830 switch (disp_clock) {
1831 case DSPCLK_DCEFCLK:
1832 dep_table = table_info->vdd_dep_on_dcefclk;
1834 case DSPCLK_DISPCLK:
1835 dep_table = table_info->vdd_dep_on_dispclk;
1838 dep_table = table_info->vdd_dep_on_pixclk;
1841 dep_table = table_info->vdd_dep_on_phyclk;
1847 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1848 "Number Of Entries Exceeded maximum!",
1851 for (i = 0; i < dep_table->count; i++) {
1852 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1853 vddc = table_info->vddc_lookup_table->
1854 entries[dep_table->entries[i].vddInd].us_vdd;
1855 vid = (uint8_t)convert_to_vid(vddc);
1856 pp_table->DisplayClockTable[disp_clock][i].Freq =
1858 pp_table->DisplayClockTable[disp_clock][i].Vid =
1862 while (i < NUM_DSPCLK_LEVELS) {
1863 pp_table->DisplayClockTable[disp_clock][i].Freq =
1865 pp_table->DisplayClockTable[disp_clock][i].Vid =
1873 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1877 for (i = 0; i < DSPCLK_COUNT; i++) {
1878 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1879 "Failed to populate Clock in DisplayClockTable!",
1886 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1887 uint32_t eclock, uint8_t *current_eclk_did,
1888 uint8_t *current_soc_vol)
1890 struct phm_ppt_v2_information *table_info =
1891 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1892 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1893 table_info->mm_dep_table;
1894 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1897 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1898 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1900 "Failed to get ECLK clock settings from VBIOS!",
1903 *current_eclk_did = (uint8_t)dividers.ulDid;
1905 for (i = 0; i < dep_table->count; i++) {
1906 if (dep_table->entries[i].eclk == eclock)
1907 *current_soc_vol = dep_table->entries[i].vddcInd;
1913 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1915 struct vega10_hwmgr *data = hwmgr->backend;
1916 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1917 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1918 int result = -EINVAL;
1921 for (i = 0; i < dpm_table->count; i++) {
1922 result = vega10_populate_single_eclock_level(hwmgr,
1923 dpm_table->dpm_levels[i].value,
1924 &(pp_table->EclkDid[i]),
1925 &(pp_table->VceDpmVoltageIndex[i]));
1931 while (i < NUM_VCE_DPM_LEVELS) {
1932 result = vega10_populate_single_eclock_level(hwmgr,
1933 dpm_table->dpm_levels[j].value,
1934 &(pp_table->EclkDid[i]),
1935 &(pp_table->VceDpmVoltageIndex[i]));
1944 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1945 uint32_t vclock, uint8_t *current_vclk_did)
1947 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1949 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1950 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1952 "Failed to get VCLK clock settings from VBIOS!",
1955 *current_vclk_did = (uint8_t)dividers.ulDid;
1960 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
1961 uint32_t dclock, uint8_t *current_dclk_did)
1963 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1965 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1966 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1968 "Failed to get DCLK clock settings from VBIOS!",
1971 *current_dclk_did = (uint8_t)dividers.ulDid;
1976 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
1978 struct vega10_hwmgr *data = hwmgr->backend;
1979 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1980 struct vega10_single_dpm_table *vclk_dpm_table =
1981 &(data->dpm_table.vclk_table);
1982 struct vega10_single_dpm_table *dclk_dpm_table =
1983 &(data->dpm_table.dclk_table);
1984 struct phm_ppt_v2_information *table_info =
1985 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1986 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1987 table_info->mm_dep_table;
1988 int result = -EINVAL;
1991 for (i = 0; i < vclk_dpm_table->count; i++) {
1992 result = vega10_populate_single_vclock_level(hwmgr,
1993 vclk_dpm_table->dpm_levels[i].value,
1994 &(pp_table->VclkDid[i]));
2000 while (i < NUM_UVD_DPM_LEVELS) {
2001 result = vega10_populate_single_vclock_level(hwmgr,
2002 vclk_dpm_table->dpm_levels[j].value,
2003 &(pp_table->VclkDid[i]));
2009 for (i = 0; i < dclk_dpm_table->count; i++) {
2010 result = vega10_populate_single_dclock_level(hwmgr,
2011 dclk_dpm_table->dpm_levels[i].value,
2012 &(pp_table->DclkDid[i]));
2018 while (i < NUM_UVD_DPM_LEVELS) {
2019 result = vega10_populate_single_dclock_level(hwmgr,
2020 dclk_dpm_table->dpm_levels[j].value,
2021 &(pp_table->DclkDid[i]));
2027 for (i = 0; i < dep_table->count; i++) {
2028 if (dep_table->entries[i].vclk ==
2029 vclk_dpm_table->dpm_levels[i].value &&
2030 dep_table->entries[i].dclk ==
2031 dclk_dpm_table->dpm_levels[i].value)
2032 pp_table->UvdDpmVoltageIndex[i] =
2033 dep_table->entries[i].vddcInd;
2039 while (i < NUM_UVD_DPM_LEVELS) {
2040 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2047 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2049 struct vega10_hwmgr *data = hwmgr->backend;
2050 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2051 struct phm_ppt_v2_information *table_info =
2052 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2053 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2054 table_info->vdd_dep_on_sclk;
2057 for (i = 0; i < dep_table->count; i++) {
2058 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2059 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2060 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2066 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2068 struct vega10_hwmgr *data = hwmgr->backend;
2069 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2070 struct phm_ppt_v2_information *table_info =
2071 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2072 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2073 table_info->vdd_dep_on_sclk;
2074 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2078 pp_table->MinVoltageVid = (uint8_t)0xff;
2079 pp_table->MaxVoltageVid = (uint8_t)0;
2081 if (data->smu_features[GNLD_AVFS].supported) {
2082 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2084 pp_table->MinVoltageVid = (uint8_t)
2085 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2086 pp_table->MaxVoltageVid = (uint8_t)
2087 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2089 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2090 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2091 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2092 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2093 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2094 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2095 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2097 pp_table->BtcGbVdroopTableCksOff.a0 =
2098 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2099 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2100 pp_table->BtcGbVdroopTableCksOff.a1 =
2101 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2102 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2103 pp_table->BtcGbVdroopTableCksOff.a2 =
2104 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2105 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2107 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2108 pp_table->BtcGbVdroopTableCksOn.a0 =
2109 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2110 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2111 pp_table->BtcGbVdroopTableCksOn.a1 =
2112 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2113 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2114 pp_table->BtcGbVdroopTableCksOn.a2 =
2115 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2116 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2118 pp_table->AvfsGbCksOn.m1 =
2119 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2120 pp_table->AvfsGbCksOn.m2 =
2121 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2122 pp_table->AvfsGbCksOn.b =
2123 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2124 pp_table->AvfsGbCksOn.m1_shift = 24;
2125 pp_table->AvfsGbCksOn.m2_shift = 12;
2126 pp_table->AvfsGbCksOn.b_shift = 0;
2128 pp_table->OverrideAvfsGbCksOn =
2129 avfs_params.ucEnableGbFuseTableCkson;
2130 pp_table->AvfsGbCksOff.m1 =
2131 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2132 pp_table->AvfsGbCksOff.m2 =
2133 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2134 pp_table->AvfsGbCksOff.b =
2135 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2136 pp_table->AvfsGbCksOff.m1_shift = 24;
2137 pp_table->AvfsGbCksOff.m2_shift = 12;
2138 pp_table->AvfsGbCksOff.b_shift = 0;
2140 for (i = 0; i < dep_table->count; i++)
2141 pp_table->StaticVoltageOffsetVid[i] =
2142 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2144 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2145 data->disp_clk_quad_eqn_a) &&
2146 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2147 data->disp_clk_quad_eqn_b)) {
2148 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2149 (int32_t)data->disp_clk_quad_eqn_a;
2150 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2151 (int32_t)data->disp_clk_quad_eqn_b;
2152 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2153 (int32_t)data->disp_clk_quad_eqn_c;
2155 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2156 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2157 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2158 (int32_t)avfs_params.ulDispclk2GfxclkM2;
2159 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2160 (int32_t)avfs_params.ulDispclk2GfxclkB;
2163 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2164 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2165 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2167 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2168 data->dcef_clk_quad_eqn_a) &&
2169 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2170 data->dcef_clk_quad_eqn_b)) {
2171 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2172 (int32_t)data->dcef_clk_quad_eqn_a;
2173 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2174 (int32_t)data->dcef_clk_quad_eqn_b;
2175 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2176 (int32_t)data->dcef_clk_quad_eqn_c;
2178 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2179 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2180 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2181 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
2182 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2183 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2186 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2187 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2188 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2190 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2191 data->pixel_clk_quad_eqn_a) &&
2192 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2193 data->pixel_clk_quad_eqn_b)) {
2194 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2195 (int32_t)data->pixel_clk_quad_eqn_a;
2196 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2197 (int32_t)data->pixel_clk_quad_eqn_b;
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2199 (int32_t)data->pixel_clk_quad_eqn_c;
2201 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2202 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2203 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2204 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2206 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2210 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2211 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2212 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2213 data->phy_clk_quad_eqn_a) &&
2214 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2215 data->phy_clk_quad_eqn_b)) {
2216 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2217 (int32_t)data->phy_clk_quad_eqn_a;
2218 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2219 (int32_t)data->phy_clk_quad_eqn_b;
2220 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2221 (int32_t)data->phy_clk_quad_eqn_c;
2223 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2224 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2225 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2226 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
2227 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2228 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2231 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2232 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2233 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2235 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2236 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2237 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2238 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2239 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2240 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2242 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2243 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2244 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2245 pp_table->AcgAvfsGb.m1_shift = 0;
2246 pp_table->AcgAvfsGb.m2_shift = 0;
2247 pp_table->AcgAvfsGb.b_shift = 0;
2250 data->smu_features[GNLD_AVFS].supported = false;
2257 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2259 struct vega10_hwmgr *data = hwmgr->backend;
2260 uint32_t agc_btc_response;
2262 if (data->smu_features[GNLD_ACG].supported) {
2263 if (0 == vega10_enable_smc_features(hwmgr, true,
2264 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2265 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2267 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
2269 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
2270 agc_btc_response = smum_get_argument(hwmgr);
2272 if (1 == agc_btc_response) {
2273 if (1 == data->acg_loop_state)
2274 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
2275 else if (2 == data->acg_loop_state)
2276 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
2277 if (0 == vega10_enable_smc_features(hwmgr, true,
2278 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2279 data->smu_features[GNLD_ACG].enabled = true;
2281 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2282 data->smu_features[GNLD_ACG].enabled = false;
2289 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2291 struct vega10_hwmgr *data = hwmgr->backend;
2293 if (data->smu_features[GNLD_ACG].supported &&
2294 data->smu_features[GNLD_ACG].enabled)
2295 if (!vega10_enable_smc_features(hwmgr, false,
2296 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2297 data->smu_features[GNLD_ACG].enabled = false;
2302 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2304 struct vega10_hwmgr *data = hwmgr->backend;
2305 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2306 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2309 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2311 if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
2312 data->registry_data.regulator_hot_gpio_support) {
2313 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2314 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2315 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2316 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2318 pp_table->VR0HotGpio = 0;
2319 pp_table->VR0HotPolarity = 0;
2320 pp_table->VR1HotGpio = 0;
2321 pp_table->VR1HotPolarity = 0;
2324 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
2325 data->registry_data.ac_dc_switch_gpio_support) {
2326 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2327 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2329 pp_table->AcDcGpio = 0;
2330 pp_table->AcDcPolarity = 0;
2337 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2339 struct vega10_hwmgr *data = hwmgr->backend;
2341 if (data->smu_features[GNLD_AVFS].supported) {
2343 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2345 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2346 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2348 data->smu_features[GNLD_AVFS].enabled = true;
2350 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2352 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2353 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2355 data->smu_features[GNLD_AVFS].enabled = false;
2362 static int vega10_update_avfs(struct pp_hwmgr *hwmgr)
2364 struct vega10_hwmgr *data = hwmgr->backend;
2366 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2367 vega10_avfs_enable(hwmgr, false);
2368 } else if (data->need_update_dpm_table) {
2369 vega10_avfs_enable(hwmgr, false);
2370 vega10_avfs_enable(hwmgr, true);
2372 vega10_avfs_enable(hwmgr, true);
2378 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2382 uint64_t serial_number = 0;
2383 uint32_t top32, bottom32;
2384 struct phm_fuses_default fuse;
2386 struct vega10_hwmgr *data = hwmgr->backend;
2387 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2389 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
2390 top32 = smum_get_argument(hwmgr);
2392 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
2393 bottom32 = smum_get_argument(hwmgr);
2395 serial_number = ((uint64_t)bottom32 << 32) | top32;
2397 if (pp_override_get_default_fuse_value(serial_number, &fuse) == 0) {
2398 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2399 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2400 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2401 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2402 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2403 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2404 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2405 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2406 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2407 result = smum_smc_table_manager(hwmgr, (uint8_t *)avfs_fuse_table,
2408 AVFSFUSETABLE, false);
2409 PP_ASSERT_WITH_CODE(!result,
2410 "Failed to upload FuseOVerride!",
2417 static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
2419 struct vega10_hwmgr *data = hwmgr->backend;
2420 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2421 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
2422 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
2423 struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
2426 dep_table = table_info->vdd_dep_on_mclk;
2427 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk);
2429 for (i = 0; i < dep_table->count; i++) {
2430 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2431 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
2436 dep_table = table_info->vdd_dep_on_sclk;
2437 odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk);
2438 for (i = 0; i < dep_table->count; i++) {
2439 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
2440 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
2445 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
2446 data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
2447 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2452 * Initializes the SMC table and uploads it
2454 * @param hwmgr the address of the powerplay hardware manager.
2455 * @param pInput the pointer to input data (PowerState)
2458 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2461 struct vega10_hwmgr *data = hwmgr->backend;
2462 struct phm_ppt_v2_information *table_info =
2463 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2464 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2465 struct pp_atomfwctrl_voltage_table voltage_table;
2466 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2467 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
2469 result = vega10_setup_default_dpm_tables(hwmgr);
2470 PP_ASSERT_WITH_CODE(!result,
2471 "Failed to setup default DPM tables!",
2474 /* initialize ODN table */
2475 if (hwmgr->od_enabled) {
2476 if (odn_table->max_vddc) {
2477 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
2478 vega10_check_dpm_table_updated(hwmgr);
2480 vega10_odn_initial_default_setting(hwmgr);
2484 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2485 VOLTAGE_OBJ_SVID2, &voltage_table);
2486 pp_table->MaxVidStep = voltage_table.max_vid_step;
2488 pp_table->GfxDpmVoltageMode =
2489 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2490 pp_table->SocDpmVoltageMode =
2491 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2492 pp_table->UclkDpmVoltageMode =
2493 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2494 pp_table->UvdDpmVoltageMode =
2495 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2496 pp_table->VceDpmVoltageMode =
2497 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2498 pp_table->Mp0DpmVoltageMode =
2499 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2501 pp_table->DisplayDpmVoltageMode =
2502 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2504 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2505 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2507 if (data->registry_data.ulv_support &&
2508 table_info->us_ulv_voltage_offset) {
2509 result = vega10_populate_ulv_state(hwmgr);
2510 PP_ASSERT_WITH_CODE(!result,
2511 "Failed to initialize ULV state!",
2515 result = vega10_populate_smc_link_levels(hwmgr);
2516 PP_ASSERT_WITH_CODE(!result,
2517 "Failed to initialize Link Level!",
2520 result = vega10_populate_all_graphic_levels(hwmgr);
2521 PP_ASSERT_WITH_CODE(!result,
2522 "Failed to initialize Graphics Level!",
2525 result = vega10_populate_all_memory_levels(hwmgr);
2526 PP_ASSERT_WITH_CODE(!result,
2527 "Failed to initialize Memory Level!",
2530 vega10_populate_vddc_soc_levels(hwmgr);
2532 result = vega10_populate_all_display_clock_levels(hwmgr);
2533 PP_ASSERT_WITH_CODE(!result,
2534 "Failed to initialize Display Level!",
2537 result = vega10_populate_smc_vce_levels(hwmgr);
2538 PP_ASSERT_WITH_CODE(!result,
2539 "Failed to initialize VCE Level!",
2542 result = vega10_populate_smc_uvd_levels(hwmgr);
2543 PP_ASSERT_WITH_CODE(!result,
2544 "Failed to initialize UVD Level!",
2547 if (data->registry_data.clock_stretcher_support) {
2548 result = vega10_populate_clock_stretcher_table(hwmgr);
2549 PP_ASSERT_WITH_CODE(!result,
2550 "Failed to populate Clock Stretcher Table!",
2554 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2556 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2557 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2558 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2559 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2560 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2561 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2562 SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk);
2564 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2565 SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk);
2567 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2568 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2569 if (0 != boot_up_values.usVddc) {
2570 smum_send_msg_to_smc_with_parameter(hwmgr,
2571 PPSMC_MSG_SetFloorSocVoltage,
2572 (boot_up_values.usVddc * 4));
2573 data->vbios_boot_state.bsoc_vddc_lock = true;
2575 data->vbios_boot_state.bsoc_vddc_lock = false;
2577 smum_send_msg_to_smc_with_parameter(hwmgr,
2578 PPSMC_MSG_SetMinDeepSleepDcefclk,
2579 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2582 result = vega10_populate_avfs_parameters(hwmgr);
2583 PP_ASSERT_WITH_CODE(!result,
2584 "Failed to initialize AVFS Parameters!",
2587 result = vega10_populate_gpio_parameters(hwmgr);
2588 PP_ASSERT_WITH_CODE(!result,
2589 "Failed to initialize GPIO Parameters!",
2592 pp_table->GfxclkAverageAlpha = (uint8_t)
2593 (data->gfxclk_average_alpha);
2594 pp_table->SocclkAverageAlpha = (uint8_t)
2595 (data->socclk_average_alpha);
2596 pp_table->UclkAverageAlpha = (uint8_t)
2597 (data->uclk_average_alpha);
2598 pp_table->GfxActivityAverageAlpha = (uint8_t)
2599 (data->gfx_activity_average_alpha);
2601 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2603 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
2605 PP_ASSERT_WITH_CODE(!result,
2606 "Failed to upload PPtable!", return result);
2608 result = vega10_avfs_enable(hwmgr, true);
2609 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2611 vega10_acg_enable(hwmgr);
2616 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2618 struct vega10_hwmgr *data = hwmgr->backend;
2620 if (data->smu_features[GNLD_THERMAL].supported) {
2621 if (data->smu_features[GNLD_THERMAL].enabled)
2622 pr_info("THERMAL Feature Already enabled!");
2624 PP_ASSERT_WITH_CODE(
2625 !vega10_enable_smc_features(hwmgr,
2627 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2628 "Enable THERMAL Feature Failed!",
2630 data->smu_features[GNLD_THERMAL].enabled = true;
2636 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2638 struct vega10_hwmgr *data = hwmgr->backend;
2640 if (data->smu_features[GNLD_THERMAL].supported) {
2641 if (!data->smu_features[GNLD_THERMAL].enabled)
2642 pr_info("THERMAL Feature Already disabled!");
2644 PP_ASSERT_WITH_CODE(
2645 !vega10_enable_smc_features(hwmgr,
2647 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2648 "disable THERMAL Feature Failed!",
2650 data->smu_features[GNLD_THERMAL].enabled = false;
2656 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2658 struct vega10_hwmgr *data = hwmgr->backend;
2660 if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
2661 if (data->smu_features[GNLD_VR0HOT].supported) {
2662 PP_ASSERT_WITH_CODE(
2663 !vega10_enable_smc_features(hwmgr,
2665 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2666 "Attempt to Enable VR0 Hot feature Failed!",
2668 data->smu_features[GNLD_VR0HOT].enabled = true;
2670 if (data->smu_features[GNLD_VR1HOT].supported) {
2671 PP_ASSERT_WITH_CODE(
2672 !vega10_enable_smc_features(hwmgr,
2674 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2675 "Attempt to Enable VR0 Hot feature Failed!",
2677 data->smu_features[GNLD_VR1HOT].enabled = true;
2684 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2686 struct vega10_hwmgr *data = hwmgr->backend;
2688 if (data->registry_data.ulv_support) {
2689 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2690 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2691 "Enable ULV Feature Failed!",
2693 data->smu_features[GNLD_ULV].enabled = true;
2699 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2701 struct vega10_hwmgr *data = hwmgr->backend;
2703 if (data->registry_data.ulv_support) {
2704 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2705 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2706 "disable ULV Feature Failed!",
2708 data->smu_features[GNLD_ULV].enabled = false;
2714 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2716 struct vega10_hwmgr *data = hwmgr->backend;
2718 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2719 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2720 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2721 "Attempt to Enable DS_GFXCLK Feature Failed!",
2723 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2726 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2727 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2728 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2729 "Attempt to Enable DS_SOCCLK Feature Failed!",
2731 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2734 if (data->smu_features[GNLD_DS_LCLK].supported) {
2735 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2736 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2737 "Attempt to Enable DS_LCLK Feature Failed!",
2739 data->smu_features[GNLD_DS_LCLK].enabled = true;
2742 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2743 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2744 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2745 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2747 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2753 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2755 struct vega10_hwmgr *data = hwmgr->backend;
2757 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2758 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2759 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2760 "Attempt to disable DS_GFXCLK Feature Failed!",
2762 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2765 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2766 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2767 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2768 "Attempt to disable DS_ Feature Failed!",
2770 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2773 if (data->smu_features[GNLD_DS_LCLK].supported) {
2774 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2775 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2776 "Attempt to disable DS_LCLK Feature Failed!",
2778 data->smu_features[GNLD_DS_LCLK].enabled = false;
2781 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2782 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2783 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2784 "Attempt to disable DS_DCEFCLK Feature Failed!",
2786 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2792 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2794 struct vega10_hwmgr *data = hwmgr->backend;
2795 uint32_t i, feature_mask = 0;
2798 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2799 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2800 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2801 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2802 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2805 for (i = 0; i < GNLD_DPM_MAX; i++) {
2806 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2807 if (data->smu_features[i].supported) {
2808 if (data->smu_features[i].enabled) {
2809 feature_mask |= data->smu_features[i].
2811 data->smu_features[i].enabled = false;
2817 vega10_enable_smc_features(hwmgr, false, feature_mask);
2823 * @brief Tell SMC to enabled the supported DPMs.
2825 * @param hwmgr - the address of the powerplay hardware manager.
2826 * @Param bitmap - bitmap for the features to enabled.
2827 * @return 0 on at least one DPM is successfully enabled.
2829 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2831 struct vega10_hwmgr *data = hwmgr->backend;
2832 uint32_t i, feature_mask = 0;
2834 for (i = 0; i < GNLD_DPM_MAX; i++) {
2835 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2836 if (data->smu_features[i].supported) {
2837 if (!data->smu_features[i].enabled) {
2838 feature_mask |= data->smu_features[i].
2840 data->smu_features[i].enabled = true;
2846 if (vega10_enable_smc_features(hwmgr,
2847 true, feature_mask)) {
2848 for (i = 0; i < GNLD_DPM_MAX; i++) {
2849 if (data->smu_features[i].smu_feature_bitmap &
2851 data->smu_features[i].enabled = false;
2855 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2856 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2857 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2858 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2859 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2862 if (data->vbios_boot_state.bsoc_vddc_lock) {
2863 smum_send_msg_to_smc_with_parameter(hwmgr,
2864 PPSMC_MSG_SetFloorSocVoltage, 0);
2865 data->vbios_boot_state.bsoc_vddc_lock = false;
2868 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
2869 if (data->smu_features[GNLD_ACDC].supported) {
2870 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2871 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2872 "Attempt to Enable DS_GFXCLK Feature Failed!",
2874 data->smu_features[GNLD_ACDC].enabled = true;
2881 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
2883 struct vega10_hwmgr *data = hwmgr->backend;
2885 if (data->smu_features[GNLD_PCC_LIMIT].supported) {
2886 if (enable == data->smu_features[GNLD_PCC_LIMIT].enabled)
2887 pr_info("GNLD_PCC_LIMIT has been %s \n", enable ? "enabled" : "disabled");
2888 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
2889 enable, data->smu_features[GNLD_PCC_LIMIT].smu_feature_bitmap),
2890 "Attempt to Enable PCC Limit feature Failed!",
2892 data->smu_features[GNLD_PCC_LIMIT].enabled = enable;
2898 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2900 struct vega10_hwmgr *data = hwmgr->backend;
2901 int tmp_result, result = 0;
2903 vega10_enable_disable_PCC_limit_feature(hwmgr, true);
2905 smum_send_msg_to_smc_with_parameter(hwmgr,
2906 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2908 tmp_result = vega10_construct_voltage_tables(hwmgr);
2909 PP_ASSERT_WITH_CODE(!tmp_result,
2910 "Failed to construct voltage tables!",
2911 result = tmp_result);
2913 tmp_result = vega10_init_smc_table(hwmgr);
2914 PP_ASSERT_WITH_CODE(!tmp_result,
2915 "Failed to initialize SMC table!",
2916 result = tmp_result);
2918 if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
2919 tmp_result = vega10_enable_thermal_protection(hwmgr);
2920 PP_ASSERT_WITH_CODE(!tmp_result,
2921 "Failed to enable thermal protection!",
2922 result = tmp_result);
2925 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2926 PP_ASSERT_WITH_CODE(!tmp_result,
2927 "Failed to enable VR hot feature!",
2928 result = tmp_result);
2930 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2931 PP_ASSERT_WITH_CODE(!tmp_result,
2932 "Failed to enable deep sleep master switch!",
2933 result = tmp_result);
2935 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2936 PP_ASSERT_WITH_CODE(!tmp_result,
2937 "Failed to start DPM!", result = tmp_result);
2939 /* enable didt, do not abort if failed didt */
2940 tmp_result = vega10_enable_didt_config(hwmgr);
2941 PP_ASSERT(!tmp_result,
2942 "Failed to enable didt config!");
2944 tmp_result = vega10_enable_power_containment(hwmgr);
2945 PP_ASSERT_WITH_CODE(!tmp_result,
2946 "Failed to enable power containment!",
2947 result = tmp_result);
2949 tmp_result = vega10_power_control_set_level(hwmgr);
2950 PP_ASSERT_WITH_CODE(!tmp_result,
2951 "Failed to power control set level!",
2952 result = tmp_result);
2954 tmp_result = vega10_enable_ulv(hwmgr);
2955 PP_ASSERT_WITH_CODE(!tmp_result,
2956 "Failed to enable ULV!",
2957 result = tmp_result);
2962 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2964 return sizeof(struct vega10_power_state);
2967 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2968 void *state, struct pp_power_state *power_state,
2969 void *pp_table, uint32_t classification_flag)
2971 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2972 struct vega10_power_state *vega10_power_state =
2973 cast_phw_vega10_power_state(&(power_state->hardware));
2974 struct vega10_performance_level *performance_level;
2975 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2976 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2977 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
2978 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
2979 (ATOM_Vega10_SOCCLK_Dependency_Table *)
2980 (((unsigned long)powerplay_table) +
2981 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
2982 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
2983 (ATOM_Vega10_GFXCLK_Dependency_Table *)
2984 (((unsigned long)powerplay_table) +
2985 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
2986 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
2987 (ATOM_Vega10_MCLK_Dependency_Table *)
2988 (((unsigned long)powerplay_table) +
2989 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
2992 /* The following fields are not initialized here:
2993 * id orderedList allStatesList
2995 power_state->classification.ui_label =
2996 (le16_to_cpu(state_entry->usClassification) &
2997 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
2998 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
2999 power_state->classification.flags = classification_flag;
3000 /* NOTE: There is a classification2 flag in BIOS
3001 * that is not being used right now
3003 power_state->classification.temporary_state = false;
3004 power_state->classification.to_be_deleted = false;
3006 power_state->validation.disallowOnDC =
3007 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3008 ATOM_Vega10_DISALLOW_ON_DC) != 0);
3010 power_state->display.disableFrameModulation = false;
3011 power_state->display.limitRefreshrate = false;
3012 power_state->display.enableVariBright =
3013 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3014 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3016 power_state->validation.supportedPowerLevels = 0;
3017 power_state->uvd_clocks.VCLK = 0;
3018 power_state->uvd_clocks.DCLK = 0;
3019 power_state->temperatures.min = 0;
3020 power_state->temperatures.max = 0;
3022 performance_level = &(vega10_power_state->performance_levels
3023 [vega10_power_state->performance_level_count++]);
3025 PP_ASSERT_WITH_CODE(
3026 (vega10_power_state->performance_level_count <
3027 NUM_GFXCLK_DPM_LEVELS),
3028 "Performance levels exceeds SMC limit!",
3031 PP_ASSERT_WITH_CODE(
3032 (vega10_power_state->performance_level_count <=
3033 hwmgr->platform_descriptor.
3034 hardwareActivityPerformanceLevels),
3035 "Performance levels exceeds Driver limit!",
3038 /* Performance levels are arranged from low to high. */
3039 performance_level->soc_clock = socclk_dep_table->entries
3040 [state_entry->ucSocClockIndexLow].ulClk;
3041 performance_level->gfx_clock = gfxclk_dep_table->entries
3042 [state_entry->ucGfxClockIndexLow].ulClk;
3043 performance_level->mem_clock = mclk_dep_table->entries
3044 [state_entry->ucMemClockIndexLow].ulMemClk;
3046 performance_level = &(vega10_power_state->performance_levels
3047 [vega10_power_state->performance_level_count++]);
3048 performance_level->soc_clock = socclk_dep_table->entries
3049 [state_entry->ucSocClockIndexHigh].ulClk;
3050 if (gfxclk_dep_table->ucRevId == 0) {
3051 performance_level->gfx_clock = gfxclk_dep_table->entries
3052 [state_entry->ucGfxClockIndexHigh].ulClk;
3053 } else if (gfxclk_dep_table->ucRevId == 1) {
3054 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3055 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3058 performance_level->mem_clock = mclk_dep_table->entries
3059 [state_entry->ucMemClockIndexHigh].ulMemClk;
3063 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3064 unsigned long entry_index, struct pp_power_state *state)
3067 struct vega10_power_state *ps;
3069 state->hardware.magic = PhwVega10_Magic;
3071 ps = cast_phw_vega10_power_state(&state->hardware);
3073 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3074 vega10_get_pp_table_entry_callback_func);
3077 * This is the earliest time we have all the dependency table
3078 * and the VBIOS boot state
3080 /* set DC compatible flag if this state supports DC */
3081 if (!state->validation.disallowOnDC)
3082 ps->dc_compatible = true;
3084 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3085 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3090 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3091 struct pp_hw_power_state *hw_ps)
3096 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3097 struct pp_power_state *request_ps,
3098 const struct pp_power_state *current_ps)
3100 struct amdgpu_device *adev = hwmgr->adev;
3101 struct vega10_power_state *vega10_ps =
3102 cast_phw_vega10_power_state(&request_ps->hardware);
3105 struct PP_Clocks minimum_clocks = {0};
3106 bool disable_mclk_switching;
3107 bool disable_mclk_switching_for_frame_lock;
3108 bool disable_mclk_switching_for_vr;
3109 bool force_mclk_high;
3110 const struct phm_clock_and_voltage_limits *max_limits;
3112 struct vega10_hwmgr *data = hwmgr->backend;
3113 struct phm_ppt_v2_information *table_info =
3114 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3116 uint32_t stable_pstate_sclk_dpm_percentage;
3117 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3120 data->battery_state = (PP_StateUILabel_Battery ==
3121 request_ps->classification.ui_label);
3123 if (vega10_ps->performance_level_count != 2)
3124 pr_info("VI should always have 2 performance levels");
3126 max_limits = adev->pm.ac_power ?
3127 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3128 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3130 /* Cap clock DPM tables at DC MAX if it is in DC. */
3131 if (!adev->pm.ac_power) {
3132 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3133 if (vega10_ps->performance_levels[i].mem_clock >
3135 vega10_ps->performance_levels[i].mem_clock =
3137 if (vega10_ps->performance_levels[i].gfx_clock >
3139 vega10_ps->performance_levels[i].gfx_clock =
3144 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3145 minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3146 minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3148 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3149 stable_pstate_sclk_dpm_percentage =
3150 data->registry_data.stable_pstate_sclk_dpm_percentage;
3151 PP_ASSERT_WITH_CODE(
3152 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3153 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3154 "percent sclk value must range from 1% to 100%, setting default value",
3155 stable_pstate_sclk_dpm_percentage = 75);
3157 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3158 stable_pstate_sclk = (max_limits->sclk *
3159 stable_pstate_sclk_dpm_percentage) / 100;
3161 for (count = table_info->vdd_dep_on_sclk->count - 1;
3162 count >= 0; count--) {
3163 if (stable_pstate_sclk >=
3164 table_info->vdd_dep_on_sclk->entries[count].clk) {
3165 stable_pstate_sclk =
3166 table_info->vdd_dep_on_sclk->entries[count].clk;
3172 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3174 stable_pstate_mclk = max_limits->mclk;
3176 minimum_clocks.engineClock = stable_pstate_sclk;
3177 minimum_clocks.memoryClock = stable_pstate_mclk;
3180 disable_mclk_switching_for_frame_lock =
3181 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3182 disable_mclk_switching_for_vr =
3183 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
3184 force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
3186 if (hwmgr->display_config->num_display == 0)
3187 disable_mclk_switching = false;
3189 disable_mclk_switching = (hwmgr->display_config->num_display > 1) ||
3190 disable_mclk_switching_for_frame_lock ||
3191 disable_mclk_switching_for_vr ||
3194 sclk = vega10_ps->performance_levels[0].gfx_clock;
3195 mclk = vega10_ps->performance_levels[0].mem_clock;
3197 if (sclk < minimum_clocks.engineClock)
3198 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3199 max_limits->sclk : minimum_clocks.engineClock;
3201 if (mclk < minimum_clocks.memoryClock)
3202 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3203 max_limits->mclk : minimum_clocks.memoryClock;
3205 vega10_ps->performance_levels[0].gfx_clock = sclk;
3206 vega10_ps->performance_levels[0].mem_clock = mclk;
3208 if (vega10_ps->performance_levels[1].gfx_clock <
3209 vega10_ps->performance_levels[0].gfx_clock)
3210 vega10_ps->performance_levels[0].gfx_clock =
3211 vega10_ps->performance_levels[1].gfx_clock;
3213 if (disable_mclk_switching) {
3214 /* Set Mclk the max of level 0 and level 1 */
3215 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3216 mclk = vega10_ps->performance_levels[1].mem_clock;
3218 /* Find the lowest MCLK frequency that is within
3219 * the tolerable latency defined in DAL
3221 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3222 for (i = 0; i < data->mclk_latency_table.count; i++) {
3223 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3224 (data->mclk_latency_table.entries[i].frequency >=
3225 vega10_ps->performance_levels[0].mem_clock) &&
3226 (data->mclk_latency_table.entries[i].frequency <=
3227 vega10_ps->performance_levels[1].mem_clock))
3228 mclk = data->mclk_latency_table.entries[i].frequency;
3230 vega10_ps->performance_levels[0].mem_clock = mclk;
3232 if (vega10_ps->performance_levels[1].mem_clock <
3233 vega10_ps->performance_levels[0].mem_clock)
3234 vega10_ps->performance_levels[0].mem_clock =
3235 vega10_ps->performance_levels[1].mem_clock;
3238 if (PP_CAP(PHM_PlatformCaps_StablePState)) {
3239 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3240 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3241 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3248 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3250 struct vega10_hwmgr *data = hwmgr->backend;
3251 const struct phm_set_power_state_input *states =
3252 (const struct phm_set_power_state_input *)input;
3253 const struct vega10_power_state *vega10_ps =
3254 cast_const_phw_vega10_power_state(states->pnew_state);
3255 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
3256 uint32_t sclk = vega10_ps->performance_levels
3257 [vega10_ps->performance_level_count - 1].gfx_clock;
3258 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
3259 uint32_t mclk = vega10_ps->performance_levels
3260 [vega10_ps->performance_level_count - 1].mem_clock;
3263 for (i = 0; i < sclk_table->count; i++) {
3264 if (sclk == sclk_table->dpm_levels[i].value)
3268 if (i >= sclk_table->count) {
3269 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3270 sclk_table->dpm_levels[i-1].value = sclk;
3273 for (i = 0; i < mclk_table->count; i++) {
3274 if (mclk == mclk_table->dpm_levels[i].value)
3278 if (i >= mclk_table->count) {
3279 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3280 mclk_table->dpm_levels[i-1].value = mclk;
3283 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3284 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3289 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3290 struct pp_hwmgr *hwmgr, const void *input)
3293 struct vega10_hwmgr *data = hwmgr->backend;
3294 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3295 struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
3296 struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
3299 if (!data->need_update_dpm_table)
3302 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
3303 for (count = 0; count < dpm_table->gfx_table.count; count++)
3304 dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3307 odn_clk_table = &odn_table->vdd_dep_on_mclk;
3308 if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
3309 for (count = 0; count < dpm_table->mem_table.count; count++)
3310 dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
3313 if (data->need_update_dpm_table &
3314 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
3315 result = vega10_populate_all_graphic_levels(hwmgr);
3316 PP_ASSERT_WITH_CODE((0 == result),
3317 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3321 if (data->need_update_dpm_table &
3322 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3323 result = vega10_populate_all_memory_levels(hwmgr);
3324 PP_ASSERT_WITH_CODE((0 == result),
3325 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3329 vega10_populate_vddc_soc_levels(hwmgr);
3334 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3335 struct vega10_single_dpm_table *dpm_table,
3336 uint32_t low_limit, uint32_t high_limit)
3340 for (i = 0; i < dpm_table->count; i++) {
3341 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3342 (dpm_table->dpm_levels[i].value > high_limit))
3343 dpm_table->dpm_levels[i].enabled = false;
3345 dpm_table->dpm_levels[i].enabled = true;
3350 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3351 struct vega10_single_dpm_table *dpm_table,
3352 uint32_t low_limit, uint32_t high_limit,
3353 uint32_t disable_dpm_mask)
3357 for (i = 0; i < dpm_table->count; i++) {
3358 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3359 (dpm_table->dpm_levels[i].value > high_limit))
3360 dpm_table->dpm_levels[i].enabled = false;
3361 else if (!((1 << i) & disable_dpm_mask))
3362 dpm_table->dpm_levels[i].enabled = false;
3364 dpm_table->dpm_levels[i].enabled = true;
3369 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3370 const struct vega10_power_state *vega10_ps)
3372 struct vega10_hwmgr *data = hwmgr->backend;
3373 uint32_t high_limit_count;
3375 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3376 "power state did not have any performance level",
3379 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3381 vega10_trim_single_dpm_states(hwmgr,
3382 &(data->dpm_table.soc_table),
3383 vega10_ps->performance_levels[0].soc_clock,
3384 vega10_ps->performance_levels[high_limit_count].soc_clock);
3386 vega10_trim_single_dpm_states_with_mask(hwmgr,
3387 &(data->dpm_table.gfx_table),
3388 vega10_ps->performance_levels[0].gfx_clock,
3389 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3390 data->disable_dpm_mask);
3392 vega10_trim_single_dpm_states(hwmgr,
3393 &(data->dpm_table.mem_table),
3394 vega10_ps->performance_levels[0].mem_clock,
3395 vega10_ps->performance_levels[high_limit_count].mem_clock);
3400 static uint32_t vega10_find_lowest_dpm_level(
3401 struct vega10_single_dpm_table *table)
3405 for (i = 0; i < table->count; i++) {
3406 if (table->dpm_levels[i].enabled)
3413 static uint32_t vega10_find_highest_dpm_level(
3414 struct vega10_single_dpm_table *table)
3418 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3419 for (i = table->count; i > 0; i--) {
3420 if (table->dpm_levels[i - 1].enabled)
3424 pr_info("DPM Table Has Too Many Entries!");
3425 return MAX_REGULAR_DPM_NUMBER - 1;
3431 static void vega10_apply_dal_minimum_voltage_request(
3432 struct pp_hwmgr *hwmgr)
3437 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3439 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3440 struct phm_ppt_v2_information *table_info =
3441 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3443 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3445 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3448 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3450 struct vega10_hwmgr *data = hwmgr->backend;
3451 uint32_t socclk_idx;
3453 vega10_apply_dal_minimum_voltage_request(hwmgr);
3455 if (!data->registry_data.sclk_dpm_key_disabled) {
3456 if (data->smc_state_table.gfx_boot_level !=
3457 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3458 smum_send_msg_to_smc_with_parameter(hwmgr,
3459 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3460 data->smc_state_table.gfx_boot_level);
3461 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3462 data->smc_state_table.gfx_boot_level;
3466 if (!data->registry_data.mclk_dpm_key_disabled) {
3467 if (data->smc_state_table.mem_boot_level !=
3468 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3469 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3470 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3471 smum_send_msg_to_smc_with_parameter(hwmgr,
3472 PPSMC_MSG_SetSoftMinSocclkByIndex,
3475 smum_send_msg_to_smc_with_parameter(hwmgr,
3476 PPSMC_MSG_SetSoftMinUclkByIndex,
3477 data->smc_state_table.mem_boot_level);
3479 data->dpm_table.mem_table.dpm_state.soft_min_level =
3480 data->smc_state_table.mem_boot_level;
3487 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3489 struct vega10_hwmgr *data = hwmgr->backend;
3491 vega10_apply_dal_minimum_voltage_request(hwmgr);
3493 if (!data->registry_data.sclk_dpm_key_disabled) {
3494 if (data->smc_state_table.gfx_max_level !=
3495 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3496 smum_send_msg_to_smc_with_parameter(hwmgr,
3497 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3498 data->smc_state_table.gfx_max_level);
3499 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3500 data->smc_state_table.gfx_max_level;
3504 if (!data->registry_data.mclk_dpm_key_disabled) {
3505 if (data->smc_state_table.mem_max_level !=
3506 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3507 smum_send_msg_to_smc_with_parameter(hwmgr,
3508 PPSMC_MSG_SetSoftMaxUclkByIndex,
3509 data->smc_state_table.mem_max_level);
3510 data->dpm_table.mem_table.dpm_state.soft_max_level =
3511 data->smc_state_table.mem_max_level;
3518 static int vega10_generate_dpm_level_enable_mask(
3519 struct pp_hwmgr *hwmgr, const void *input)
3521 struct vega10_hwmgr *data = hwmgr->backend;
3522 const struct phm_set_power_state_input *states =
3523 (const struct phm_set_power_state_input *)input;
3524 const struct vega10_power_state *vega10_ps =
3525 cast_const_phw_vega10_power_state(states->pnew_state);
3528 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3529 "Attempt to Trim DPM States Failed!",
3532 data->smc_state_table.gfx_boot_level =
3533 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3534 data->smc_state_table.gfx_max_level =
3535 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3536 data->smc_state_table.mem_boot_level =
3537 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3538 data->smc_state_table.mem_max_level =
3539 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3541 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3542 "Attempt to upload DPM Bootup Levels Failed!",
3544 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3545 "Attempt to upload DPM Max Levels Failed!",
3547 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3548 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3551 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3552 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3557 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3559 struct vega10_hwmgr *data = hwmgr->backend;
3561 if (data->smu_features[GNLD_DPM_VCE].supported) {
3562 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
3564 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3565 "Attempt to Enable/Disable DPM VCE Failed!",
3567 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3573 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3575 struct vega10_hwmgr *data = hwmgr->backend;
3576 uint32_t low_sclk_interrupt_threshold = 0;
3578 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
3579 (data->low_sclk_interrupt_threshold != 0)) {
3580 low_sclk_interrupt_threshold =
3581 data->low_sclk_interrupt_threshold;
3583 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3584 cpu_to_le32(low_sclk_interrupt_threshold);
3586 /* This message will also enable SmcToHost Interrupt */
3587 smum_send_msg_to_smc_with_parameter(hwmgr,
3588 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3589 (uint32_t)low_sclk_interrupt_threshold);
3595 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3598 int tmp_result, result = 0;
3599 struct vega10_hwmgr *data = hwmgr->backend;
3600 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3602 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3603 PP_ASSERT_WITH_CODE(!tmp_result,
3604 "Failed to find DPM states clocks in DPM table!",
3605 result = tmp_result);
3607 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3608 PP_ASSERT_WITH_CODE(!tmp_result,
3609 "Failed to populate and upload SCLK MCLK DPM levels!",
3610 result = tmp_result);
3612 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3613 PP_ASSERT_WITH_CODE(!tmp_result,
3614 "Failed to generate DPM level enabled mask!",
3615 result = tmp_result);
3617 tmp_result = vega10_update_sclk_threshold(hwmgr);
3618 PP_ASSERT_WITH_CODE(!tmp_result,
3619 "Failed to update SCLK threshold!",
3620 result = tmp_result);
3622 result = smum_smc_table_manager(hwmgr, (uint8_t *)pp_table, PPTABLE, false);
3623 PP_ASSERT_WITH_CODE(!result,
3624 "Failed to upload PPtable!", return result);
3626 vega10_update_avfs(hwmgr);
3628 data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
3633 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3635 struct pp_power_state *ps;
3636 struct vega10_power_state *vega10_ps;
3641 ps = hwmgr->request_ps;
3646 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3649 return vega10_ps->performance_levels[0].gfx_clock;
3651 return vega10_ps->performance_levels
3652 [vega10_ps->performance_level_count - 1].gfx_clock;
3655 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3657 struct pp_power_state *ps;
3658 struct vega10_power_state *vega10_ps;
3663 ps = hwmgr->request_ps;
3668 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3671 return vega10_ps->performance_levels[0].mem_clock;
3673 return vega10_ps->performance_levels
3674 [vega10_ps->performance_level_count-1].mem_clock;
3677 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3685 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
3686 value = smum_get_argument(hwmgr);
3688 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3689 *query = value << 8;
3694 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3695 void *value, int *size)
3697 struct amdgpu_device *adev = hwmgr->adev;
3698 uint32_t sclk_mhz, mclk_idx, activity_percent = 0;
3699 struct vega10_hwmgr *data = hwmgr->backend;
3700 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3705 case AMDGPU_PP_SENSOR_GFX_SCLK:
3706 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
3707 sclk_mhz = smum_get_argument(hwmgr);
3708 *((uint32_t *)value) = sclk_mhz * 100;
3710 case AMDGPU_PP_SENSOR_GFX_MCLK:
3711 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
3712 mclk_idx = smum_get_argument(hwmgr);
3713 if (mclk_idx < dpm_table->mem_table.count) {
3714 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
3720 case AMDGPU_PP_SENSOR_GPU_LOAD:
3721 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
3722 activity_percent = smum_get_argument(hwmgr);
3723 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3726 case AMDGPU_PP_SENSOR_GPU_TEMP:
3727 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
3730 case AMDGPU_PP_SENSOR_UVD_POWER:
3731 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3734 case AMDGPU_PP_SENSOR_VCE_POWER:
3735 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3738 case AMDGPU_PP_SENSOR_GPU_POWER:
3739 ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value);
3741 case AMDGPU_PP_SENSOR_VDDGFX:
3742 val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) &
3743 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >>
3744 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT;
3745 *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid);
3747 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3748 ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value);
3760 static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
3763 smum_send_msg_to_smc_with_parameter(hwmgr,
3764 PPSMC_MSG_SetUclkFastSwitch,
3768 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
3769 struct pp_display_clock_request *clock_req)
3772 enum amd_pp_clock_type clk_type = clock_req->clock_type;
3773 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
3774 DSPCLK_e clk_select = 0;
3775 uint32_t clk_request = 0;
3778 case amd_pp_dcef_clock:
3779 clk_select = DSPCLK_DCEFCLK;
3781 case amd_pp_disp_clock:
3782 clk_select = DSPCLK_DISPCLK;
3784 case amd_pp_pixel_clock:
3785 clk_select = DSPCLK_PIXCLK;
3787 case amd_pp_phy_clock:
3788 clk_select = DSPCLK_PHYCLK;
3791 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3797 clk_request = (clk_freq << 16) | clk_select;
3798 smum_send_msg_to_smc_with_parameter(hwmgr,
3799 PPSMC_MSG_RequestDisplayClockByFreq,
3806 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
3807 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
3813 if (mclk_table == NULL || mclk_table->count == 0)
3816 count = (uint8_t)(mclk_table->count);
3818 for(i = 0; i < count; i++) {
3819 if(mclk_table->entries[i].clk >= frequency)
3826 static int vega10_notify_smc_display_config_after_ps_adjustment(
3827 struct pp_hwmgr *hwmgr)
3829 struct vega10_hwmgr *data = hwmgr->backend;
3830 struct vega10_single_dpm_table *dpm_table =
3831 &data->dpm_table.dcef_table;
3832 struct phm_ppt_v2_information *table_info =
3833 (struct phm_ppt_v2_information *)hwmgr->pptable;
3834 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
3836 struct PP_Clocks min_clocks = {0};
3838 struct pp_display_clock_request clock_req;
3840 if ((hwmgr->display_config->num_display > 1) &&
3841 !hwmgr->display_config->multi_monitor_in_sync &&
3842 !hwmgr->display_config->nb_pstate_switch_disable)
3843 vega10_notify_smc_display_change(hwmgr, false);
3845 vega10_notify_smc_display_change(hwmgr, true);
3847 min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
3848 min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
3849 min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3851 for (i = 0; i < dpm_table->count; i++) {
3852 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
3856 if (i < dpm_table->count) {
3857 clock_req.clock_type = amd_pp_dcef_clock;
3858 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value * 10;
3859 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
3860 smum_send_msg_to_smc_with_parameter(
3861 hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
3862 min_clocks.dcefClockInSR / 100);
3864 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3867 pr_debug("Cannot find requested DCEFCLK!");
3870 if (min_clocks.memoryClock != 0) {
3871 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
3872 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
3873 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
3879 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
3881 struct vega10_hwmgr *data = hwmgr->backend;
3883 data->smc_state_table.gfx_boot_level =
3884 data->smc_state_table.gfx_max_level =
3885 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3886 data->smc_state_table.mem_boot_level =
3887 data->smc_state_table.mem_max_level =
3888 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3890 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3891 "Failed to upload boot level to highest!",
3894 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3895 "Failed to upload dpm max level to highest!",
3901 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3903 struct vega10_hwmgr *data = hwmgr->backend;
3905 data->smc_state_table.gfx_boot_level =
3906 data->smc_state_table.gfx_max_level =
3907 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3908 data->smc_state_table.mem_boot_level =
3909 data->smc_state_table.mem_max_level =
3910 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3912 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3913 "Failed to upload boot level to highest!",
3916 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3917 "Failed to upload dpm max level to highest!",
3924 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3926 struct vega10_hwmgr *data = hwmgr->backend;
3928 data->smc_state_table.gfx_boot_level =
3929 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3930 data->smc_state_table.gfx_max_level =
3931 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3932 data->smc_state_table.mem_boot_level =
3933 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3934 data->smc_state_table.mem_max_level =
3935 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3937 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3938 "Failed to upload DPM Bootup Levels!",
3941 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3942 "Failed to upload DPM Max Levels!",
3947 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3948 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
3950 struct phm_ppt_v2_information *table_info =
3951 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3953 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
3954 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
3955 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
3956 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
3957 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
3958 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
3959 hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
3960 hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
3963 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3965 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
3967 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
3968 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3969 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
3970 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
3975 static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
3978 case AMD_FAN_CTRL_NONE:
3979 vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3981 case AMD_FAN_CTRL_MANUAL:
3982 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
3983 vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
3985 case AMD_FAN_CTRL_AUTO:
3986 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
3987 vega10_fan_ctrl_start_smc_fan_control(hwmgr);
3994 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
3995 enum pp_clock_type type, uint32_t mask)
3997 struct vega10_hwmgr *data = hwmgr->backend;
4001 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
4002 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
4004 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4005 "Failed to upload boot level to lowest!",
4008 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4009 "Failed to upload dpm max level to highest!",
4014 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
4015 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
4017 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4018 "Failed to upload boot level to lowest!",
4021 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4022 "Failed to upload dpm max level to highest!",
4035 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4036 enum amd_dpm_forced_level level)
4039 uint32_t sclk_mask = 0;
4040 uint32_t mclk_mask = 0;
4041 uint32_t soc_mask = 0;
4043 if (hwmgr->pstate_sclk == 0)
4044 vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4047 case AMD_DPM_FORCED_LEVEL_HIGH:
4048 ret = vega10_force_dpm_highest(hwmgr);
4050 case AMD_DPM_FORCED_LEVEL_LOW:
4051 ret = vega10_force_dpm_lowest(hwmgr);
4053 case AMD_DPM_FORCED_LEVEL_AUTO:
4054 ret = vega10_unforce_dpm_levels(hwmgr);
4056 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4057 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4058 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4059 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4060 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4063 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4064 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4066 case AMD_DPM_FORCED_LEVEL_MANUAL:
4067 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4073 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4074 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4075 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4076 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4082 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4084 struct vega10_hwmgr *data = hwmgr->backend;
4086 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4087 return AMD_FAN_CTRL_MANUAL;
4089 return AMD_FAN_CTRL_AUTO;
4092 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4093 struct amd_pp_simple_clock_info *info)
4095 struct phm_ppt_v2_information *table_info =
4096 (struct phm_ppt_v2_information *)hwmgr->pptable;
4097 struct phm_clock_and_voltage_limits *max_limits =
4098 &table_info->max_clock_voltage_on_ac;
4100 info->engine_max_clock = max_limits->sclk;
4101 info->memory_max_clock = max_limits->mclk;
4106 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4107 struct pp_clock_levels_with_latency *clocks)
4109 struct phm_ppt_v2_information *table_info =
4110 (struct phm_ppt_v2_information *)hwmgr->pptable;
4111 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4112 table_info->vdd_dep_on_sclk;
4115 clocks->num_levels = 0;
4116 for (i = 0; i < dep_table->count; i++) {
4117 if (dep_table->entries[i].clk) {
4118 clocks->data[clocks->num_levels].clocks_in_khz =
4119 dep_table->entries[i].clk * 10;
4120 clocks->num_levels++;
4126 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4127 struct pp_clock_levels_with_latency *clocks)
4129 struct phm_ppt_v2_information *table_info =
4130 (struct phm_ppt_v2_information *)hwmgr->pptable;
4131 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4132 table_info->vdd_dep_on_mclk;
4133 struct vega10_hwmgr *data = hwmgr->backend;
4137 for (i = 0; i < dep_table->count; i++) {
4138 if (dep_table->entries[i].clk) {
4140 clocks->data[j].clocks_in_khz =
4141 dep_table->entries[i].clk * 10;
4142 data->mclk_latency_table.entries[j].frequency =
4143 dep_table->entries[i].clk;
4144 clocks->data[j].latency_in_us =
4145 data->mclk_latency_table.entries[j].latency = 25;
4149 clocks->num_levels = data->mclk_latency_table.count = j;
4152 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4153 struct pp_clock_levels_with_latency *clocks)
4155 struct phm_ppt_v2_information *table_info =
4156 (struct phm_ppt_v2_information *)hwmgr->pptable;
4157 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4158 table_info->vdd_dep_on_dcefclk;
4161 for (i = 0; i < dep_table->count; i++) {
4162 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4163 clocks->data[i].latency_in_us = 0;
4164 clocks->num_levels++;
4168 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4169 struct pp_clock_levels_with_latency *clocks)
4171 struct phm_ppt_v2_information *table_info =
4172 (struct phm_ppt_v2_information *)hwmgr->pptable;
4173 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4174 table_info->vdd_dep_on_socclk;
4177 for (i = 0; i < dep_table->count; i++) {
4178 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4179 clocks->data[i].latency_in_us = 0;
4180 clocks->num_levels++;
4184 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4185 enum amd_pp_clock_type type,
4186 struct pp_clock_levels_with_latency *clocks)
4189 case amd_pp_sys_clock:
4190 vega10_get_sclks(hwmgr, clocks);
4192 case amd_pp_mem_clock:
4193 vega10_get_memclocks(hwmgr, clocks);
4195 case amd_pp_dcef_clock:
4196 vega10_get_dcefclocks(hwmgr, clocks);
4198 case amd_pp_soc_clock:
4199 vega10_get_socclocks(hwmgr, clocks);
4208 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4209 enum amd_pp_clock_type type,
4210 struct pp_clock_levels_with_voltage *clocks)
4212 struct phm_ppt_v2_information *table_info =
4213 (struct phm_ppt_v2_information *)hwmgr->pptable;
4214 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4218 case amd_pp_mem_clock:
4219 dep_table = table_info->vdd_dep_on_mclk;
4221 case amd_pp_dcef_clock:
4222 dep_table = table_info->vdd_dep_on_dcefclk;
4224 case amd_pp_disp_clock:
4225 dep_table = table_info->vdd_dep_on_dispclk;
4227 case amd_pp_pixel_clock:
4228 dep_table = table_info->vdd_dep_on_pixclk;
4230 case amd_pp_phy_clock:
4231 dep_table = table_info->vdd_dep_on_phyclk;
4237 for (i = 0; i < dep_table->count; i++) {
4238 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
4239 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4240 entries[dep_table->entries[i].vddInd].us_vdd);
4241 clocks->num_levels++;
4244 if (i < dep_table->count)
4250 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4253 struct vega10_hwmgr *data = hwmgr->backend;
4254 struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
4255 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4258 if (!data->registry_data.disable_water_mark) {
4259 smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
4260 data->water_marks_bitmap = WaterMarksExist;
4266 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4267 enum pp_clock_type type, char *buf)
4269 struct vega10_hwmgr *data = hwmgr->backend;
4270 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4271 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4272 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4273 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
4275 int i, now, size = 0;
4279 if (data->registry_data.sclk_dpm_key_disabled)
4282 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
4283 now = smum_get_argument(hwmgr);
4285 for (i = 0; i < sclk_table->count; i++)
4286 size += sprintf(buf + size, "%d: %uMhz %s\n",
4287 i, sclk_table->dpm_levels[i].value / 100,
4288 (i == now) ? "*" : "");
4291 if (data->registry_data.mclk_dpm_key_disabled)
4294 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
4295 now = smum_get_argument(hwmgr);
4297 for (i = 0; i < mclk_table->count; i++)
4298 size += sprintf(buf + size, "%d: %uMhz %s\n",
4299 i, mclk_table->dpm_levels[i].value / 100,
4300 (i == now) ? "*" : "");
4303 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
4304 now = smum_get_argument(hwmgr);
4306 for (i = 0; i < pcie_table->count; i++)
4307 size += sprintf(buf + size, "%d: %s %s\n", i,
4308 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
4309 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
4310 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
4311 (i == now) ? "*" : "");
4314 if (hwmgr->od_enabled) {
4315 size = sprintf(buf, "%s:\n", "OD_SCLK");
4316 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4317 for (i = 0; i < podn_vdd_dep->count; i++)
4318 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4319 i, podn_vdd_dep->entries[i].clk / 100,
4320 podn_vdd_dep->entries[i].vddc);
4324 if (hwmgr->od_enabled) {
4325 size = sprintf(buf, "%s:\n", "OD_MCLK");
4326 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4327 for (i = 0; i < podn_vdd_dep->count; i++)
4328 size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
4329 i, podn_vdd_dep->entries[i].clk/100,
4330 podn_vdd_dep->entries[i].vddc);
4334 if (hwmgr->od_enabled) {
4335 size = sprintf(buf, "%s:\n", "OD_RANGE");
4336 size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4337 data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
4338 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4339 size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4340 data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
4341 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4342 size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4343 data->odn_dpm_table.min_vddc,
4344 data->odn_dpm_table.max_vddc);
4353 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4355 struct vega10_hwmgr *data = hwmgr->backend;
4356 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4359 if ((data->water_marks_bitmap & WaterMarksExist) &&
4360 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4361 result = smum_smc_table_manager(hwmgr, (uint8_t *)wm_table, WMTABLE, false);
4362 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4363 data->water_marks_bitmap |= WaterMarksLoaded;
4366 if (data->water_marks_bitmap & WaterMarksLoaded) {
4367 smum_send_msg_to_smc_with_parameter(hwmgr,
4368 PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
4374 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4376 struct vega10_hwmgr *data = hwmgr->backend;
4378 if (data->smu_features[GNLD_DPM_UVD].supported) {
4379 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
4381 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4382 "Attempt to Enable/Disable DPM UVD Failed!",
4384 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4389 static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4391 struct vega10_hwmgr *data = hwmgr->backend;
4393 data->vce_power_gated = bgate;
4394 vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4397 static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4399 struct vega10_hwmgr *data = hwmgr->backend;
4401 data->uvd_power_gated = bgate;
4402 vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4405 static inline bool vega10_are_power_levels_equal(
4406 const struct vega10_performance_level *pl1,
4407 const struct vega10_performance_level *pl2)
4409 return ((pl1->soc_clock == pl2->soc_clock) &&
4410 (pl1->gfx_clock == pl2->gfx_clock) &&
4411 (pl1->mem_clock == pl2->mem_clock));
4414 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4415 const struct pp_hw_power_state *pstate1,
4416 const struct pp_hw_power_state *pstate2, bool *equal)
4418 const struct vega10_power_state *psa;
4419 const struct vega10_power_state *psb;
4422 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4425 psa = cast_const_phw_vega10_power_state(pstate1);
4426 psb = cast_const_phw_vega10_power_state(pstate2);
4427 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4428 if (psa->performance_level_count != psb->performance_level_count) {
4433 for (i = 0; i < psa->performance_level_count; i++) {
4434 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4435 /* If we have found even one performance level pair that is different the states are different. */
4441 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4442 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4443 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4444 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4450 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4452 struct vega10_hwmgr *data = hwmgr->backend;
4453 bool is_update_required = false;
4455 if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4456 is_update_required = true;
4458 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
4459 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
4460 is_update_required = true;
4463 return is_update_required;
4466 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4468 int tmp_result, result = 0;
4470 if (PP_CAP(PHM_PlatformCaps_ThermalController))
4471 vega10_disable_thermal_protection(hwmgr);
4473 tmp_result = vega10_disable_power_containment(hwmgr);
4474 PP_ASSERT_WITH_CODE((tmp_result == 0),
4475 "Failed to disable power containment!", result = tmp_result);
4477 tmp_result = vega10_disable_didt_config(hwmgr);
4478 PP_ASSERT_WITH_CODE((tmp_result == 0),
4479 "Failed to disable didt config!", result = tmp_result);
4481 tmp_result = vega10_avfs_enable(hwmgr, false);
4482 PP_ASSERT_WITH_CODE((tmp_result == 0),
4483 "Failed to disable AVFS!", result = tmp_result);
4485 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4486 PP_ASSERT_WITH_CODE((tmp_result == 0),
4487 "Failed to stop DPM!", result = tmp_result);
4489 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4490 PP_ASSERT_WITH_CODE((tmp_result == 0),
4491 "Failed to disable deep sleep!", result = tmp_result);
4493 tmp_result = vega10_disable_ulv(hwmgr);
4494 PP_ASSERT_WITH_CODE((tmp_result == 0),
4495 "Failed to disable ulv!", result = tmp_result);
4497 tmp_result = vega10_acg_disable(hwmgr);
4498 PP_ASSERT_WITH_CODE((tmp_result == 0),
4499 "Failed to disable acg!", result = tmp_result);
4501 vega10_enable_disable_PCC_limit_feature(hwmgr, false);
4505 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4507 struct vega10_hwmgr *data = hwmgr->backend;
4510 result = vega10_disable_dpm_tasks(hwmgr);
4511 PP_ASSERT_WITH_CODE((0 == result),
4512 "[disable_dpm_tasks] Failed to disable DPM!",
4514 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4519 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
4521 struct vega10_hwmgr *data = hwmgr->backend;
4522 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4523 struct vega10_single_dpm_table *golden_sclk_table =
4524 &(data->golden_dpm_table.gfx_table);
4527 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
4528 golden_sclk_table->dpm_levels
4529 [golden_sclk_table->count - 1].value) *
4531 golden_sclk_table->dpm_levels
4532 [golden_sclk_table->count - 1].value;
4537 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4539 struct vega10_hwmgr *data = hwmgr->backend;
4540 struct vega10_single_dpm_table *golden_sclk_table =
4541 &(data->golden_dpm_table.gfx_table);
4542 struct pp_power_state *ps;
4543 struct vega10_power_state *vega10_ps;
4545 ps = hwmgr->request_ps;
4550 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4552 vega10_ps->performance_levels
4553 [vega10_ps->performance_level_count - 1].gfx_clock =
4554 golden_sclk_table->dpm_levels
4555 [golden_sclk_table->count - 1].value *
4557 golden_sclk_table->dpm_levels
4558 [golden_sclk_table->count - 1].value;
4560 if (vega10_ps->performance_levels
4561 [vega10_ps->performance_level_count - 1].gfx_clock >
4562 hwmgr->platform_descriptor.overdriveLimit.engineClock) {
4563 vega10_ps->performance_levels
4564 [vega10_ps->performance_level_count - 1].gfx_clock =
4565 hwmgr->platform_descriptor.overdriveLimit.engineClock;
4566 pr_warn("max sclk supported by vbios is %d\n",
4567 hwmgr->platform_descriptor.overdriveLimit.engineClock);
4572 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
4574 struct vega10_hwmgr *data = hwmgr->backend;
4575 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4576 struct vega10_single_dpm_table *golden_mclk_table =
4577 &(data->golden_dpm_table.mem_table);
4580 value = (mclk_table->dpm_levels
4581 [mclk_table->count - 1].value -
4582 golden_mclk_table->dpm_levels
4583 [golden_mclk_table->count - 1].value) *
4585 golden_mclk_table->dpm_levels
4586 [golden_mclk_table->count - 1].value;
4591 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4593 struct vega10_hwmgr *data = hwmgr->backend;
4594 struct vega10_single_dpm_table *golden_mclk_table =
4595 &(data->golden_dpm_table.mem_table);
4596 struct pp_power_state *ps;
4597 struct vega10_power_state *vega10_ps;
4599 ps = hwmgr->request_ps;
4604 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
4606 vega10_ps->performance_levels
4607 [vega10_ps->performance_level_count - 1].mem_clock =
4608 golden_mclk_table->dpm_levels
4609 [golden_mclk_table->count - 1].value *
4611 golden_mclk_table->dpm_levels
4612 [golden_mclk_table->count - 1].value;
4614 if (vega10_ps->performance_levels
4615 [vega10_ps->performance_level_count - 1].mem_clock >
4616 hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
4617 vega10_ps->performance_levels
4618 [vega10_ps->performance_level_count - 1].mem_clock =
4619 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
4620 pr_warn("max mclk supported by vbios is %d\n",
4621 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
4627 static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4628 uint32_t virtual_addr_low,
4629 uint32_t virtual_addr_hi,
4630 uint32_t mc_addr_low,
4631 uint32_t mc_addr_hi,
4634 smum_send_msg_to_smc_with_parameter(hwmgr,
4635 PPSMC_MSG_SetSystemVirtualDramAddrHigh,
4637 smum_send_msg_to_smc_with_parameter(hwmgr,
4638 PPSMC_MSG_SetSystemVirtualDramAddrLow,
4640 smum_send_msg_to_smc_with_parameter(hwmgr,
4641 PPSMC_MSG_DramLogSetDramAddrHigh,
4644 smum_send_msg_to_smc_with_parameter(hwmgr,
4645 PPSMC_MSG_DramLogSetDramAddrLow,
4648 smum_send_msg_to_smc_with_parameter(hwmgr,
4649 PPSMC_MSG_DramLogSetDramSize,
4654 static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
4655 struct PP_TemperatureRange *thermal_data)
4657 struct phm_ppt_v2_information *table_info =
4658 (struct phm_ppt_v2_information *)hwmgr->pptable;
4660 memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
4662 thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
4663 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
4668 static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
4670 struct vega10_hwmgr *data = hwmgr->backend;
4671 uint32_t i, size = 0;
4672 static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
4678 static const char *profile_name[6] = {"3D_FULL_SCREEN",
4684 static const char *title[6] = {"NUM",
4689 "MIN_ACTIVE_LEVEL"};
4694 size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
4695 title[1], title[2], title[3], title[4], title[5]);
4697 for (i = 0; i < PP_SMC_POWER_PROFILE_CUSTOM; i++)
4698 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
4699 i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4700 profile_mode_setting[i][0], profile_mode_setting[i][1],
4701 profile_mode_setting[i][2], profile_mode_setting[i][3]);
4702 size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n", i,
4703 profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
4704 data->custom_profile_mode[0], data->custom_profile_mode[1],
4705 data->custom_profile_mode[2], data->custom_profile_mode[3]);
4709 static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
4711 struct vega10_hwmgr *data = hwmgr->backend;
4712 uint8_t busy_set_point;
4714 uint8_t use_rlc_busy;
4715 uint8_t min_active_level;
4717 hwmgr->power_profile_mode = input[size];
4719 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
4720 1<<hwmgr->power_profile_mode);
4722 if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
4723 if (size == 0 || size > 4)
4726 data->custom_profile_mode[0] = busy_set_point = input[0];
4727 data->custom_profile_mode[1] = FPS = input[1];
4728 data->custom_profile_mode[2] = use_rlc_busy = input[2];
4729 data->custom_profile_mode[3] = min_active_level = input[3];
4730 smum_send_msg_to_smc_with_parameter(hwmgr,
4731 PPSMC_MSG_SetCustomGfxDpmParameters,
4732 busy_set_point | FPS<<8 |
4733 use_rlc_busy << 16 | min_active_level<<24);
4740 static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
4741 enum PP_OD_DPM_TABLE_COMMAND type,
4745 struct vega10_hwmgr *data = hwmgr->backend;
4746 struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4747 struct vega10_single_dpm_table *golden_table;
4749 if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) {
4750 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc);
4754 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4755 golden_table = &(data->golden_dpm_table.gfx_table);
4756 if (golden_table->dpm_levels[0].value > clk ||
4757 hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
4758 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
4759 golden_table->dpm_levels[0].value/100,
4760 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4763 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4764 golden_table = &(data->golden_dpm_table.mem_table);
4765 if (golden_table->dpm_levels[0].value > clk ||
4766 hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
4767 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
4768 golden_table->dpm_levels[0].value/100,
4769 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4779 static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
4780 enum PP_OD_DPM_TABLE_COMMAND type)
4782 struct vega10_hwmgr *data = hwmgr->backend;
4783 struct phm_ppt_v2_information *table_info = hwmgr->pptable;
4784 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
4785 struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
4787 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
4788 &data->odn_dpm_table.vdd_dep_on_socclk;
4789 struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table;
4791 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep;
4794 if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
4795 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
4796 for (i = 0; i < podn_vdd_dep->count - 1; i++)
4797 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4798 if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
4799 od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
4800 } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
4801 podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
4802 for (i = 0; i < dpm_table->count; i++) {
4803 for (j = 0; j < od_vddc_lookup_table->count; j++) {
4804 if (od_vddc_lookup_table->entries[j].us_vdd >
4805 podn_vdd_dep->entries[i].vddc)
4808 if (j == od_vddc_lookup_table->count) {
4809 od_vddc_lookup_table->entries[j-1].us_vdd =
4810 podn_vdd_dep->entries[i].vddc;
4811 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4813 podn_vdd_dep->entries[i].vddInd = j;
4815 dpm_table = &data->dpm_table.soc_table;
4816 for (i = 0; i < dep_table->count; i++) {
4817 if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
4818 dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
4819 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4820 podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4821 dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
4824 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
4825 podn_vdd_dep->entries[dep_table->count-1].clk) {
4826 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4827 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
4828 dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
4830 if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
4831 podn_vdd_dep->entries[dep_table->count-1].vddInd) {
4832 data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
4833 podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
4838 static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
4839 enum PP_OD_DPM_TABLE_COMMAND type,
4840 long *input, uint32_t size)
4842 struct vega10_hwmgr *data = hwmgr->backend;
4843 struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table;
4844 struct vega10_single_dpm_table *dpm_table;
4848 uint32_t input_level;
4851 PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
4854 if (!hwmgr->od_enabled) {
4855 pr_info("OverDrive feature not enabled\n");
4859 if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
4860 dpm_table = &data->dpm_table.gfx_table;
4861 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk;
4862 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4863 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
4864 dpm_table = &data->dpm_table.mem_table;
4865 podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk;
4866 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4867 } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
4868 memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
4869 vega10_odn_initial_default_setting(hwmgr);
4871 } else if (PP_OD_COMMIT_DPM_TABLE == type) {
4872 vega10_check_dpm_table_updated(hwmgr);
4878 for (i = 0; i < size; i += 3) {
4879 if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) {
4880 pr_info("invalid clock voltage input\n");
4883 input_level = input[i];
4884 input_clk = input[i+1] * 100;
4885 input_vol = input[i+2];
4887 if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
4888 dpm_table->dpm_levels[input_level].value = input_clk;
4889 podn_vdd_dep_table->entries[input_level].clk = input_clk;
4890 podn_vdd_dep_table->entries[input_level].vddc = input_vol;
4895 vega10_odn_update_soc_table(hwmgr, type);
4899 static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
4900 PHM_PerformanceLevelDesignation designation, uint32_t index,
4901 PHM_PerformanceLevel *level)
4903 const struct vega10_power_state *ps;
4904 struct vega10_hwmgr *data;
4907 if (level == NULL || hwmgr == NULL || state == NULL)
4910 data = hwmgr->backend;
4911 ps = cast_const_phw_vega10_power_state(state);
4913 i = index > ps->performance_level_count - 1 ?
4914 ps->performance_level_count - 1 : index;
4916 level->coreClock = ps->performance_levels[i].gfx_clock;
4917 level->memory_clock = ps->performance_levels[i].mem_clock;
4922 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
4923 .backend_init = vega10_hwmgr_backend_init,
4924 .backend_fini = vega10_hwmgr_backend_fini,
4925 .asic_setup = vega10_setup_asic_task,
4926 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
4927 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
4928 .get_num_of_pp_table_entries =
4929 vega10_get_number_of_powerplay_table_entries,
4930 .get_power_state_size = vega10_get_power_state_size,
4931 .get_pp_table_entry = vega10_get_pp_table_entry,
4932 .patch_boot_state = vega10_patch_boot_state,
4933 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
4934 .power_state_set = vega10_set_power_state_tasks,
4935 .get_sclk = vega10_dpm_get_sclk,
4936 .get_mclk = vega10_dpm_get_mclk,
4937 .notify_smc_display_config_after_ps_adjustment =
4938 vega10_notify_smc_display_config_after_ps_adjustment,
4939 .force_dpm_level = vega10_dpm_force_dpm_level,
4940 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
4941 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
4942 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
4943 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
4944 .reset_fan_speed_to_default =
4945 vega10_fan_ctrl_reset_fan_speed_to_default,
4946 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
4947 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
4948 .uninitialize_thermal_controller =
4949 vega10_thermal_ctrl_uninitialize_thermal_controller,
4950 .set_fan_control_mode = vega10_set_fan_control_mode,
4951 .get_fan_control_mode = vega10_get_fan_control_mode,
4952 .read_sensor = vega10_read_sensor,
4953 .get_dal_power_level = vega10_get_dal_power_level,
4954 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
4955 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
4956 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
4957 .display_clock_voltage_request = vega10_display_clock_voltage_request,
4958 .force_clock_level = vega10_force_clock_level,
4959 .print_clock_levels = vega10_print_clock_levels,
4960 .display_config_changed = vega10_display_configuration_changed_task,
4961 .powergate_uvd = vega10_power_gate_uvd,
4962 .powergate_vce = vega10_power_gate_vce,
4963 .check_states_equal = vega10_check_states_equal,
4964 .check_smc_update_required_for_display_configuration =
4965 vega10_check_smc_update_required_for_display_configuration,
4966 .power_off_asic = vega10_power_off_asic,
4967 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
4968 .get_sclk_od = vega10_get_sclk_od,
4969 .set_sclk_od = vega10_set_sclk_od,
4970 .get_mclk_od = vega10_get_mclk_od,
4971 .set_mclk_od = vega10_set_mclk_od,
4972 .avfs_control = vega10_avfs_enable,
4973 .notify_cac_buffer_info = vega10_notify_cac_buffer_info,
4974 .get_thermal_temperature_range = vega10_get_thermal_temperature_range,
4975 .register_irq_handlers = smu9_register_irq_handlers,
4976 .start_thermal_controller = vega10_start_thermal_controller,
4977 .get_power_profile_mode = vega10_get_power_profile_mode,
4978 .set_power_profile_mode = vega10_set_power_profile_mode,
4979 .set_power_limit = vega10_set_power_limit,
4980 .odn_edit_dpm_table = vega10_odn_edit_dpm_table,
4981 .get_performance_level = vega10_get_performance_level,
4984 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
4986 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
4987 hwmgr->pptable_func = &vega10_pptable_funcs;